query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
The collection of associated Private Endpoint Connections. | def private_endpoint_connections(self) -> Sequence['outputs.PrivateEndpointConnectionDataModelResponse']:
return pulumi.get(self, "private_endpoint_connections") | [
"def get_connections(self):\n return self.connections",
"def list_connections(self):\n path = self.build_url(\"/connections\")\n return self.request('get', path)",
"def endpoints(self) -> object:\n return self._endpoints",
"def list_connections(self):\n\t\treturn dict((conn.connection_id,\n\t\t [node.path for node in conn.fs_nodes])\n\t\t for conn\n\t\t in self.server.connections)",
"def connections(self):\n return self.inboundConnections.values() + self.outboundConnections.values()",
"def get_all(self):\n\t\treturn self.all_connections",
"def list_connections(self):\n url = self._get_management_url(\"connections\")\n conns = self._call_management(url)\n\n return conns",
"def get_connected_endpoints_and_subscriptions(\n self\n ) -> Tuple[Tuple[str, Set[Type[BaseEvent]]], ...]:\n ...",
"def get_connections_list() -> list[models.DatabaseConnection]:\n\n return list(get_connections_map().values()) or []",
"def connections(self):\n if self._connections is None:\n # get connection pairs\n w = 10 if self.width == 24 else 11\n conn = [(anode, cathode) for cathode in range(12) for anode in [a for a in range(12) if a!= cathode][:w]]\n # arrange connection pairs in coordinate grid\n col_height, cols = (5, 24) if self.width == 24 else (11, 12)\n self._connections = [conn[col_height*i:col_height*i+col_height] for i in range(cols)]\n return self._connections",
"def _layer_connections(self):\n return [[j for j in [i-1,i+1] if 0<=j<self.num_layers]\n for i in range(self.num_layers)]",
"def get_connections(self, scheme=None):\n cs = []\n for cr in self._conns:\n if scheme is None or (scheme and scheme == cr.scheme):\n c.append(cr.conn)\n return cs",
"def getConnections(self):\n self.gLogging.debug(\"getConnections invoked\")\n try:\n if len(self.connections) > 0:\n connected = [x for x, y in self.connections]\n lines = self.gHosts.pickHosts(_printing=False)\n for line in lines:\n if 'group' in line:\n #group = gutils.trim_ansi(line).split('id')[0].split(\":\")[1].strip()\n group = \"id\".join(gutils.trim_ansi(line).split('id')[:-1]).split(\":\")[1].strip()\n if 'host' in line:\n #line must be cleaned up from ansi escape sequences\n host = \"id\".join(gutils.trim_ansi(line).split('id')[:-1]).split(\":\")[1].strip()\n if host in connected:\n details = self.gHosts.searchHostName(host)[0]\n print(\"\\t\" + host, gutils.color_pick(self.gConfig, '[connected, ip: {}, port: {}]'.format(details['host'], details['port']), self.gConfig['JSON']['pick_yes']))\n else:\n print(\"\\t\" + host, gutils.color_pick(self.gConfig, '[no connected]', self.gConfig['JSON']['pick_no']))\n else:\n self.gLogging.show(\"there is no active connection\")\n except Exception:\n self.gLogging.error(\"cannot get connections list\")",
"def get_fully_qualified_port_connections(self):\n namespace = self.component.get_node_addr()\n conns = []\n for src, sink in self.component.portconnections:\n src_new = namespace.get_subns_addr(src)\n sink_new = namespace.get_subns_addr(sink)\n conns.append((src_new, sink_new))\n return conns",
"def getConnections(self, toEdge):\n return self._outgoing.get(toEdge, [])",
"def establishedConnections(self):\n return [\n x for x in self.connections() if x.fullyEstablished]",
"def outlets(self) -> Outlet:\n return self._outlets",
"def get_all_connections(self, channel):\r\n channel = to_object(channel, objtype='channel')\r\n return self.filter(db_channel=channel)",
"def get_connected_endpoints_and_subscriptions(\n self\n ) -> Tuple[Tuple[str, Set[Type[BaseEvent]]], ...]:\n return ((self.name, self.get_subscribed_events()),) + tuple(\n (remote.name, remote.get_subscribed_events())\n for remote in self._connections\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The Guid id of the private link scope. | def private_link_scope_id(self) -> str:
return pulumi.get(self, "private_link_scope_id") | [
"def GUID(self) -> _n_2_t_0:",
"def block_id(self):\n return str(self.scope_ids.usage_id)",
"def access_group_id(self) -> str:\n return pulumi.get(self, \"access_group_id\")",
"def url_id(self):\n return self.id",
"def scope_map_id(self) -> Optional[str]:\n return pulumi.get(self, \"scope_map_id\")",
"def get_external_id(self):\n pass",
"def unique_id(self):\n return self.id",
"def get_visible_name_from_scope(cls, scope):\n return ('%s-%s' % (scope.uuid.hex, scope.name))[:64]",
"def getGrantId(self): \n return self.grantId",
"def access_keyid(self):\n return self._access_keyid",
"def as_global_id(self) -> str:\n return self.prefix + str(self.id)",
"def public_id(self):\n return modhex(pack('>I', self.id))",
"def id(cls) -> Global:\n return Global.current_application_id()",
"def generation_id(self):\n ret = self._get_attr(\"generationId\")\n return ret",
"def bot_id(self):\n return self._bot_id",
"def account_id(self): # DG: renamed\n pass",
"def id(self) -> int:\n return self._context.id",
"def UniqueId(self) -> str:",
"def make_scoped_device_id(idval: str, scope: str) -> str:\n if scope == \"network\":\n return idval\n else:\n return get_gateway_hwid() + \":\" + idval",
"def private_link(self) -> Optional[pulumi.Input['FrontdoorOriginPrivateLinkArgs']]:\n return pulumi.get(self, \"private_link\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The country or region where the resource is located | def country_or_region(self) -> Optional[str]:
return pulumi.get(self, "country_or_region") | [
"def this_region(self):\n _logger.debug('%s', where_am_i())\n if self._metadata is None:\n _logger.warning('metadata is None !')\n # TODO: should it severe error case instead ??\n return None\n try:\n return self._metadata['instance']['region']\n except Exception as e:\n _logger.warning('No region information in metadata: %s', str(e))\n return None",
"def get_region(self):\n try:\n return self.meta_data['placement'][\n 'availability-zone'][:-1].strip()\n except KeyError:\n raise IcsMetaException(\n \"Cannot find the 'region info' in meta-data.\")",
"def region(self):\n return self._unit.received[\"region\"]",
"def ex_get_region(self, name):\r\n if name.startswith('https://'):\r\n short_name = self._get_components_from_path(name)['name']\r\n request = name\r\n else:\r\n short_name = name\r\n request = '/regions/%s' % (name)\r\n # Check region cache first\r\n if short_name in self.region_dict:\r\n return self.region_dict[short_name]\r\n # Otherwise, look up region information\r\n response = self.connection.request(request, method='GET').object\r\n return self._to_region(response)",
"def contry_code(self):\n return self._data.get('profile', {}).get('countryCode')",
"def country_name(self, ip_address):\n return self.country(ip_address).get('country_name')",
"def _usa_state(self, place: str):\n country = 'United States'\n region = place.split('(')[-1].strip(' ()').title()\n\n return country, region",
"def country_name(self):\n if self.country:\n if hasattr(self.country, 'common_name'):\n return self.country.common_name\n return self.country.name\n return None",
"def resource_location(self):\n if \"resourceLocation\" in self._prop_dict:\n return self._prop_dict[\"resourceLocation\"]\n else:\n return None",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def get_country(self):\r\n if len(self.user_flag) > 0:\r\n country = self.user_flag[0].get(\"title\", \"\")\r\n else:\r\n country = \"\"\r\n return country",
"def country(self):\n return Country(alpha_2=self.country_code)",
"def country_code(self, ip_address):\n return self.country(ip_address).get('country_code')",
"def get_locationAreaCode(self):\n return self._lac",
"def ec2_current_region() -> Optional[str]:\n cfg = ec2_metadata()\n if cfg is None:\n return None\n return cfg.get(\"region\", None)",
"def _AutoDetectRegion(self):\n def _GetRegionContext(unused_object_type, context):\n if self._flags.region:\n return self.DenormalizeResourceName(self._flags.region)\n return self.GetRegionForResource(self.api.addresses,\n context['address'])\n\n self._context_parser.context_prompt_fxns['region'] = _GetRegionContext",
"def country_of_origin(self):\n if self._country_of_origin_id is None:\n self._reload()\n return self._country_of_origin_id",
"def get_default_region(self):\r\n return self._default_region",
"def location(self):\n return self.patient.get('location', None)",
"def region(project):\n return_code, location = common.execute(\n 'gcloud app describe --project={project} '\n '--format=\"value(locationId)\"'.format(project=project))\n if return_code:\n raise RuntimeError('Could not get App Engine region')\n\n return region_from_location(location.strip().decode('utf-8'))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The district, state, or province where the resource is located. | def district(self) -> Optional[str]:
return pulumi.get(self, "district") | [
"def state_or_province(self):\n return self._state_or_province",
"def location(self):\n return self.patient.get('location', None)",
"def district_or_county(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"district_or_county\")",
"def get_district(self):\n for p in self.overview['positions']:\n if not p['In Congress']['end']:\n try:\n return p['District']\n except KeyError:\n return None\n return None",
"def location(self):\n return '{}, {}'.format(\n self.city,\n self.state.abbr,\n )",
"def location(self):\n return self._redunda.location",
"def this_region(self):\n _logger.debug('%s', where_am_i())\n if self._metadata is None:\n _logger.warning('metadata is None !')\n # TODO: should it severe error case instead ??\n return None\n try:\n return self._metadata['instance']['region']\n except Exception as e:\n _logger.warning('No region information in metadata: %s', str(e))\n return None",
"def resource_location(self):\n if \"resourceLocation\" in self._prop_dict:\n return self._prop_dict[\"resourceLocation\"]\n else:\n return None",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def getAncestorLand(self):\n try:\n if self.checkRelatedLocations():\n s = requests.get(self.__data['relatedLocations']['primaryLocations'][0]['links']['self']['href'], headers=getHeaders())\n data = json.loads(s.content)\n\n return data['links']['ancestorLand']['title']\n else:\n return None\n except:\n return None",
"def location_details(self):\n return self._location_details",
"def getCity(self):\n return self._city",
"def getFireDistrict(self):\n p = Selector(text=self.content).xpath('//*[@id=\"MainContent_lblUf05\"]/text()')\n try:\n firedistrict = p.extract()[0]\n except IndexError:\n print(\"No Fire District is available for %s\" % self.getLocation())\n return \"\"\n return firedistrict",
"def municipality(self):\n return self.generator.parse(\"{{city}} kommune\")",
"def get_region(self):\n try:\n return self.meta_data['placement'][\n 'availability-zone'][:-1].strip()\n except KeyError:\n raise IcsMetaException(\n \"Cannot find the 'region info' in meta-data.\")",
"def city(self) -> str:\n _city: str = self.seq_df[\"CITY_NAME\"].values[0]\n return _city",
"def get_geo_location_desc(self):\n if self.ascender_data and 'geo_location_desc' in self.ascender_data:\n return self.ascender_data['geo_location_desc']\n return ''",
"def region(self):\n return self._unit.received[\"region\"]",
"def Location(self) -> str:",
"def get_home_town(self):\r\n return self._home_town"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The machine extension instance view. | def instance_view(self) -> Optional['outputs.MachineExtensionInstanceViewResponse']:
return pulumi.get(self, "instance_view") | [
"def instance_view(self) -> Optional['outputs.VirtualMachineExtensionInstanceViewResponse']:\n return pulumi.get(self, \"instance_view\")",
"def inspect_view(self, request, instance_pk):\n kwargs = {\"model_admin\": self, \"instance_pk\": instance_pk}\n view_class = self.inspect_view_class\n return view_class.as_view(**kwargs)(request)",
"def make_viso_instance(self):\n pass",
"def virtual_machine_template(self):\n return self._virtual_machine_template",
"def instance(self):\n return self.__es",
"def model_instance(self) -> any:\n pass",
"def custom_view(self) -> discord.ui.View | None:\n return self._custom_view",
"def extensions(self) -> Optional[Sequence['outputs.MachineExtensionInstanceViewResponse']]:\n return pulumi.get(self, \"extensions\")",
"def create_workview(self):\r\n\r\n self.workview = WorkView()\r\n\r\n return self.workview",
"def _get_vm_instance_views(self, vm_index, vm, sub_index, sub):\n vm_name = vm.get('name')\n _log.info('Working on VM #%d: %s; %s', vm_index, vm_name,\n util.outline_az_sub(sub_index, sub, self._tenant))\n try:\n creds = self._credentials\n sub_id = sub.get('subscription_id')\n compute_client = ComputeManagementClient(creds, sub_id)\n vm_id = vm.get('id')\n rg_name = tools.parse_resource_id(vm_id)['resource_group']\n vm_iv = compute_client.virtual_machines.instance_view(rg_name,\n vm_name)\n vm_iv = vm_iv.as_dict()\n yield _process_vm_instance_view(vm_index, vm, vm_iv,\n sub_index, sub, self._tenant)\n except Exception as e:\n _log.error('Failed to fetch vm_instance_view for VM #%d: '\n '%s; %s; error: %s: %s', vm_index, vm_name,\n util.outline_az_sub(sub_index, sub, self._tenant),\n type(e).__name__, e)",
"def vm(self):\n ret = self._get_attr(\"VM\")\n return ret",
"def describe(self):\n print(Controller().describe_instances())",
"def view(self,) -> pn.pane.HTML:\r\n return pn.pane.HTML(self.__html__())",
"def _create_motd_model_view(self):\n \n # Only do imports when you need to! This makes sure that the import\n # only happens when somebody needs the motd attribute.\n from motd.model.i_motd import IMOTD\n from motd.ui.motd_model_view import MOTDModelView\n \n # ask the application for an IMOTD instance\n motd = self.application.get_service(IMOTD)\n \n motd_model_view = MOTDModelView(model=motd)\n return motd_model_view",
"def aovCollectionInstance(self):\n \n pass",
"def create_view(self, view_class, additional_context=None):\n additional_context = additional_context or {}\n view = view_class(self.widgetastic_browser, additional_context=additional_context)\n return view",
"def PLUGIN_ENTRY(): # pylint: disable=invalid-name\n return BapView()",
"def as_view(cls, **initkwargs):\n return super().as_view(**initkwargs)",
"def machine(self):\n ret = self._get_attr(\"machine\")\n return IMachine(ret)",
"def _get_mst_instances(self):\n return self.__mst_instances"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Specifies the assessment mode. | def assessment_mode(self) -> Optional[str]:
return pulumi.get(self, "assessment_mode") | [
"def set_mode(self, mode):\n self._mode = mode",
"def set_ai(self):\n difficulty = self.difficulty()\n if difficulty == \"easy\":\n self._ai = EasyMode()\n elif difficulty == \"medium\":\n self._ai = MediumMode()\n elif difficulty == \"hard\":\n self._ai = HardMode()",
"def _set_mode(self, mode):\n self._parent._write(self, DacBase._COMMAND_SET_SLOT_MODE.format(mode))\n self._mode = mode",
"def trial_mode(self, trial_mode):\n\n self._trial_mode = trial_mode",
"def _change_mode(self, attr, old, new):\n self.exg_mode = new",
"def set_immersive_mode(self):\n if self.device.get_device_android_version().major >= 11:\n logw(\"immersive mode not available on Android 11+ devices\")\n return\n logi(\"setting immersive mode\")\n self.device.execute_command(f\"settings put global policy_control immersive.full={self.package_name}\", shell=True)\\\n .validate(Exception(\"error setting immersive mode\"))",
"def detected_mode_set(self, event):\n self.mode.set(2)\n self.change_mode()",
"def setMode( self, aMode ):\n if ( aMode != 0 ) and ( aMode != 1 ):\n raise VibroP_GraphCorrupted( \"ERROR: a wrong mode was set\" )\n else:\n self.__Mode = aMode",
"def set_engagement_mode():\n mode = request.args.get('mode', type=str)\n print(mode)\n\n ba_srv.setEngagementMode(mode)\n\n return {\n \"status\": \"ok\",\n \"mode\": mode\n }",
"def set_training_mode(self, mode: bool) -> None:\n self.actor.set_training_mode(mode)\n self.critic.set_training_mode(mode)\n self.training = mode",
"def set_mode(self, mode):\n self.mode = mode\n if mode == \"train\" or mode is True:\n self.models.train()\n elif mode in [\"val\", \"test\", \"eval\"] or mode is False:\n self.models.eval()\n else:\n raise ValueError(f\"Invalid model mode `{mode}`!\")",
"def set_mode(mode):\n master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')\n master.wait_heartbeat()\n\n mode_id = master.mode_mapping()[mode]\n master.mav.set_mode_send(\n master.target_system,\n mavutil.mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED,\n mode_id)\n print(\"Mode \" + mode + \" successfully set.\")\n return True",
"def device_execution_mode(self, value):\n\n self._device_execution_mode.set(value)",
"def captured_mode_set(self, event):\n self.mode.set(1)\n self.change_mode()",
"def __try_set_auto_assessment_mode(self):\n try:\n self.status_handler.set_current_operation(Constants.CONFIGURE_PATCHING_AUTO_ASSESSMENT)\n self.composite_logger.log_debug(\"Systemd information: {0}\".format(str(self.auto_assess_service_manager.get_version()))) # proactive support telemetry\n\n if self.execution_config.assessment_mode is None:\n self.composite_logger.log_debug(\"No assessment mode config was present. No configuration changes will occur.\")\n elif self.execution_config.assessment_mode == Constants.AssessmentModes.AUTOMATIC_BY_PLATFORM:\n self.composite_logger.log_debug(\"Enabling platform-based automatic assessment.\")\n if not self.auto_assess_service_manager.systemd_exists():\n raise Exception(\"Systemd is not available on this system, and platform-based auto-assessment cannot be configured.\")\n self.auto_assess_service_manager.create_and_set_service_idem()\n self.auto_assess_timer_manager.create_and_set_timer_idem()\n self.current_auto_assessment_state = Constants.AutoAssessmentStates.ENABLED\n elif self.execution_config.assessment_mode == Constants.AssessmentModes.IMAGE_DEFAULT:\n self.composite_logger.log_debug(\"Disabling platform-based automatic assessment.\")\n self.auto_assess_timer_manager.remove_timer()\n self.auto_assess_service_manager.remove_service()\n self.current_auto_assessment_state = Constants.AutoAssessmentStates.DISABLED\n else:\n raise Exception(\"Unknown assessment mode specified. [AssessmentMode={0}]\".format(self.execution_config.assessment_mode))\n\n self.__report_consolidated_configure_patch_status()\n self.composite_logger.log_debug(\"Completed processing automatic assessment mode configuration.\")\n except Exception as error:\n # deliberately not setting self.configure_patching_exception_error here as it does not feed into the parent object. Not a bug, if you're thinking about it.\n self.composite_logger.log_error(\"Error while processing automatic assessment mode configuration. [Error={0}]\".format(repr(error)))\n self.__report_consolidated_configure_patch_status(status=Constants.STATUS_TRANSITIONING, error=error)\n self.configure_patching_successful &= False\n\n # revert operation back to parent\n self.composite_logger.log_debug(\"Restoring status handler operation to {0}.\".format(Constants.CONFIGURE_PATCHING))\n self.status_handler.set_current_operation(Constants.CONFIGURE_PATCHING)",
"def set_mode(self, nt):\n return _radio_astro_swig.detect_set_mode(self, nt)",
"def setAccessMode(self, mode): \n self.__accessMode = mode",
"def experiment_mode(self):\n return self._experiment_mode",
"def config_mode(self):\n\n pass",
"def set_edid_mode(self, mode: str):\n if mode not in ['port1', 'remix', 'default']:\n _LOGGER.error(\"Bad EDID mode\")\n return\n result = self._avior.set_edid_mode(mode)\n if \"OK\" in result:\n pass\n else:\n _LOGGER.error(\"Set EDID mode error: {}\".format(result))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Specifies the patch mode. | def patch_mode(self) -> Optional[str]:
return pulumi.get(self, "patch_mode") | [
"def set_mode(self, mode):\n self._mode = mode",
"def set_reference_mode(self, mode='ext'):\n if mode == 'ext':\n self.write(\"FMOD 0\")\n else:\n self.write(\"FMOD 1\")",
"def _change_mode(self, attr, old, new):\n self.exg_mode = new",
"def patch(self, patch):\n if patch is None:\n raise ValueError(\"Invalid value for `patch`, must not be `None`\") # noqa: E501\n\n self._patch = patch",
"def __try_set_patch_mode(self):\n try:\n self.status_handler.set_current_operation(Constants.CONFIGURE_PATCHING)\n self.current_auto_os_patch_state = self.package_manager.get_current_auto_os_patch_state()\n\n # disable auto OS updates if VM is configured for platform updates only.\n # NOTE: this condition will be false for Assessment operations, since patchMode is not sent in the API request\n if self.current_auto_os_patch_state != Constants.AutomaticOSPatchStates.DISABLED and self.execution_config.patch_mode == Constants.PatchModes.AUTOMATIC_BY_PLATFORM:\n self.package_manager.disable_auto_os_update()\n\n self.current_auto_os_patch_state = self.package_manager.get_current_auto_os_patch_state()\n\n if self.execution_config.patch_mode == Constants.PatchModes.AUTOMATIC_BY_PLATFORM and self.current_auto_os_patch_state == Constants.AutomaticOSPatchStates.UNKNOWN:\n # NOTE: only sending details in error objects for customer visibility on why patch state is unknown, overall configurepatching status will remain successful\n self.configure_patching_exception_error = \"Could not disable one or more automatic OS update services. Please check if they are configured correctly\"\n\n self.composite_logger.log_debug(\"Completed processing patch mode configuration.\")\n except Exception as error:\n self.composite_logger.log_error(\"Error while processing patch mode configuration. [Error={0}]\".format(repr(error)))\n self.configure_patching_exception_error = error\n self.configure_patching_successful &= False",
"def set_mode_prop( self ):\n self.__is_text_file = eolfix_callback_prop\n self.__is_text_file_params = {}",
"def apply_patch(self, patch=None):\n if not any([patch, self.patch]):\n return\n if not patch:\n patch = self.patch\n self.cmd(f\"patch -p1 < {self.project.patch}/{self.ver}/{patch}\")",
"def set_mode(self, nt):\n return _radio_astro_swig.detect_set_mode(self, nt)",
"def apply_patch(self, patch=None):\n if not any([patch, self.patch]):\n return\n if not patch:\n patch = self.patch\n self.cmd(f'patch -p1 < {self.project.patch}/{self.ver}/{patch}')",
"def set_mode_prop(self):\n self.__is_text_file = eolfix_callback_prop\n self.__is_text_file_params = {}",
"def config_mode(self):\n\n pass",
"def setOperationMode(self):\n\n mode = ''\n if self.ui.checkEditNone.isChecked():\n mode = 'normal'\n elif self.ui.checkEditBuildPoints.isChecked():\n mode = 'build'\n elif self.ui.checkEditHorizonMask.isChecked():\n mode = 'horizon'\n elif self.ui.checkPolarAlignment.isChecked():\n mode = 'star'\n\n # styles\n if self.horizonMarker is not None:\n self.horizonMarker.set_marker(self.MODE[mode]['horMarker'])\n self.horizonMarker.set_color(self.MODE[mode]['horColor'])\n if self.pointsBuild is not None:\n self.pointsBuild.set_color(self.MODE[mode]['buildPColor'])\n if self.starsAlign is not None:\n # self.starsAlignAnnotate.set_color(self.MODE[mode]['horMarker'])\n self.starsAlign.set_color(self.MODE[mode]['starColor'])\n\n self.drawCanvas()\n return True",
"def set_mode(self, mode):\n self._write_byte(BNO055_OPR_MODE_ADDR, mode & 0xFF)\n # Delay for 30 milliseconds (datsheet recommends 19ms, but a little more\n # can't hurt and the kernel is going to spend some unknown amount of time\n # too).\n time.sleep(0.03)",
"def setModeAttributes(self) -> None:\n d = self.attributesDict\n aList = (\n ('default', 'null'),\n ('digit_re', ''),\n ('escape', ''), # New in Leo 4.4.2.\n ('highlight_digits', True),\n ('ignore_case', True),\n ('no_word_sep', ''),\n )\n for key, default in aList:\n val = d.get(key, default)\n if val in ('true', 'True'):\n val = True\n if val in ('false', 'False'):\n val = False\n setattr(self, key, val)",
"def set_mode(self, mode = \"CHP\"):\n return self.echo(\":INIT:\" + mode)",
"def set_tracking_mode(self, mode):\n self._send_command_and_validate_response('T' + chr(mode))",
"def getPatch(self) -> int:\n ...",
"def update_mode(self):\n self.__mode = self.__exp_mode | self.__exp_out_mode\n pvc.set_exp_modes(self.__handle, self.__mode)",
"def CmdPkgPatch(package, options):\n package.Patch()",
"def set_operation_mode(self, operation_mode: str) -> None:\n raise NotImplementedError()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Specifies the assessment mode. | def assessment_mode(self) -> Optional[str]:
return pulumi.get(self, "assessment_mode") | [
"def set_mode(self, mode):\n self._mode = mode",
"def set_ai(self):\n difficulty = self.difficulty()\n if difficulty == \"easy\":\n self._ai = EasyMode()\n elif difficulty == \"medium\":\n self._ai = MediumMode()\n elif difficulty == \"hard\":\n self._ai = HardMode()",
"def _set_mode(self, mode):\n self._parent._write(self, DacBase._COMMAND_SET_SLOT_MODE.format(mode))\n self._mode = mode",
"def trial_mode(self, trial_mode):\n\n self._trial_mode = trial_mode",
"def _change_mode(self, attr, old, new):\n self.exg_mode = new",
"def set_immersive_mode(self):\n if self.device.get_device_android_version().major >= 11:\n logw(\"immersive mode not available on Android 11+ devices\")\n return\n logi(\"setting immersive mode\")\n self.device.execute_command(f\"settings put global policy_control immersive.full={self.package_name}\", shell=True)\\\n .validate(Exception(\"error setting immersive mode\"))",
"def detected_mode_set(self, event):\n self.mode.set(2)\n self.change_mode()",
"def setMode( self, aMode ):\n if ( aMode != 0 ) and ( aMode != 1 ):\n raise VibroP_GraphCorrupted( \"ERROR: a wrong mode was set\" )\n else:\n self.__Mode = aMode",
"def set_engagement_mode():\n mode = request.args.get('mode', type=str)\n print(mode)\n\n ba_srv.setEngagementMode(mode)\n\n return {\n \"status\": \"ok\",\n \"mode\": mode\n }",
"def set_training_mode(self, mode: bool) -> None:\n self.actor.set_training_mode(mode)\n self.critic.set_training_mode(mode)\n self.training = mode",
"def set_mode(self, mode):\n self.mode = mode\n if mode == \"train\" or mode is True:\n self.models.train()\n elif mode in [\"val\", \"test\", \"eval\"] or mode is False:\n self.models.eval()\n else:\n raise ValueError(f\"Invalid model mode `{mode}`!\")",
"def set_mode(mode):\n master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')\n master.wait_heartbeat()\n\n mode_id = master.mode_mapping()[mode]\n master.mav.set_mode_send(\n master.target_system,\n mavutil.mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED,\n mode_id)\n print(\"Mode \" + mode + \" successfully set.\")\n return True",
"def device_execution_mode(self, value):\n\n self._device_execution_mode.set(value)",
"def captured_mode_set(self, event):\n self.mode.set(1)\n self.change_mode()",
"def __try_set_auto_assessment_mode(self):\n try:\n self.status_handler.set_current_operation(Constants.CONFIGURE_PATCHING_AUTO_ASSESSMENT)\n self.composite_logger.log_debug(\"Systemd information: {0}\".format(str(self.auto_assess_service_manager.get_version()))) # proactive support telemetry\n\n if self.execution_config.assessment_mode is None:\n self.composite_logger.log_debug(\"No assessment mode config was present. No configuration changes will occur.\")\n elif self.execution_config.assessment_mode == Constants.AssessmentModes.AUTOMATIC_BY_PLATFORM:\n self.composite_logger.log_debug(\"Enabling platform-based automatic assessment.\")\n if not self.auto_assess_service_manager.systemd_exists():\n raise Exception(\"Systemd is not available on this system, and platform-based auto-assessment cannot be configured.\")\n self.auto_assess_service_manager.create_and_set_service_idem()\n self.auto_assess_timer_manager.create_and_set_timer_idem()\n self.current_auto_assessment_state = Constants.AutoAssessmentStates.ENABLED\n elif self.execution_config.assessment_mode == Constants.AssessmentModes.IMAGE_DEFAULT:\n self.composite_logger.log_debug(\"Disabling platform-based automatic assessment.\")\n self.auto_assess_timer_manager.remove_timer()\n self.auto_assess_service_manager.remove_service()\n self.current_auto_assessment_state = Constants.AutoAssessmentStates.DISABLED\n else:\n raise Exception(\"Unknown assessment mode specified. [AssessmentMode={0}]\".format(self.execution_config.assessment_mode))\n\n self.__report_consolidated_configure_patch_status()\n self.composite_logger.log_debug(\"Completed processing automatic assessment mode configuration.\")\n except Exception as error:\n # deliberately not setting self.configure_patching_exception_error here as it does not feed into the parent object. Not a bug, if you're thinking about it.\n self.composite_logger.log_error(\"Error while processing automatic assessment mode configuration. [Error={0}]\".format(repr(error)))\n self.__report_consolidated_configure_patch_status(status=Constants.STATUS_TRANSITIONING, error=error)\n self.configure_patching_successful &= False\n\n # revert operation back to parent\n self.composite_logger.log_debug(\"Restoring status handler operation to {0}.\".format(Constants.CONFIGURE_PATCHING))\n self.status_handler.set_current_operation(Constants.CONFIGURE_PATCHING)",
"def set_mode(self, nt):\n return _radio_astro_swig.detect_set_mode(self, nt)",
"def setAccessMode(self, mode): \n self.__accessMode = mode",
"def experiment_mode(self):\n return self._experiment_mode",
"def config_mode(self):\n\n pass",
"def set_edid_mode(self, mode: str):\n if mode not in ['port1', 'remix', 'default']:\n _LOGGER.error(\"Bad EDID mode\")\n return\n result = self._avior.set_edid_mode(mode)\n if \"OK\" in result:\n pass\n else:\n _LOGGER.error(\"Set EDID mode error: {}\".format(result))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Specifies the patch mode. | def patch_mode(self) -> Optional[str]:
return pulumi.get(self, "patch_mode") | [
"def set_mode(self, mode):\n self._mode = mode",
"def set_reference_mode(self, mode='ext'):\n if mode == 'ext':\n self.write(\"FMOD 0\")\n else:\n self.write(\"FMOD 1\")",
"def _change_mode(self, attr, old, new):\n self.exg_mode = new",
"def patch(self, patch):\n if patch is None:\n raise ValueError(\"Invalid value for `patch`, must not be `None`\") # noqa: E501\n\n self._patch = patch",
"def __try_set_patch_mode(self):\n try:\n self.status_handler.set_current_operation(Constants.CONFIGURE_PATCHING)\n self.current_auto_os_patch_state = self.package_manager.get_current_auto_os_patch_state()\n\n # disable auto OS updates if VM is configured for platform updates only.\n # NOTE: this condition will be false for Assessment operations, since patchMode is not sent in the API request\n if self.current_auto_os_patch_state != Constants.AutomaticOSPatchStates.DISABLED and self.execution_config.patch_mode == Constants.PatchModes.AUTOMATIC_BY_PLATFORM:\n self.package_manager.disable_auto_os_update()\n\n self.current_auto_os_patch_state = self.package_manager.get_current_auto_os_patch_state()\n\n if self.execution_config.patch_mode == Constants.PatchModes.AUTOMATIC_BY_PLATFORM and self.current_auto_os_patch_state == Constants.AutomaticOSPatchStates.UNKNOWN:\n # NOTE: only sending details in error objects for customer visibility on why patch state is unknown, overall configurepatching status will remain successful\n self.configure_patching_exception_error = \"Could not disable one or more automatic OS update services. Please check if they are configured correctly\"\n\n self.composite_logger.log_debug(\"Completed processing patch mode configuration.\")\n except Exception as error:\n self.composite_logger.log_error(\"Error while processing patch mode configuration. [Error={0}]\".format(repr(error)))\n self.configure_patching_exception_error = error\n self.configure_patching_successful &= False",
"def set_mode_prop( self ):\n self.__is_text_file = eolfix_callback_prop\n self.__is_text_file_params = {}",
"def apply_patch(self, patch=None):\n if not any([patch, self.patch]):\n return\n if not patch:\n patch = self.patch\n self.cmd(f\"patch -p1 < {self.project.patch}/{self.ver}/{patch}\")",
"def set_mode(self, nt):\n return _radio_astro_swig.detect_set_mode(self, nt)",
"def apply_patch(self, patch=None):\n if not any([patch, self.patch]):\n return\n if not patch:\n patch = self.patch\n self.cmd(f'patch -p1 < {self.project.patch}/{self.ver}/{patch}')",
"def set_mode_prop(self):\n self.__is_text_file = eolfix_callback_prop\n self.__is_text_file_params = {}",
"def config_mode(self):\n\n pass",
"def setOperationMode(self):\n\n mode = ''\n if self.ui.checkEditNone.isChecked():\n mode = 'normal'\n elif self.ui.checkEditBuildPoints.isChecked():\n mode = 'build'\n elif self.ui.checkEditHorizonMask.isChecked():\n mode = 'horizon'\n elif self.ui.checkPolarAlignment.isChecked():\n mode = 'star'\n\n # styles\n if self.horizonMarker is not None:\n self.horizonMarker.set_marker(self.MODE[mode]['horMarker'])\n self.horizonMarker.set_color(self.MODE[mode]['horColor'])\n if self.pointsBuild is not None:\n self.pointsBuild.set_color(self.MODE[mode]['buildPColor'])\n if self.starsAlign is not None:\n # self.starsAlignAnnotate.set_color(self.MODE[mode]['horMarker'])\n self.starsAlign.set_color(self.MODE[mode]['starColor'])\n\n self.drawCanvas()\n return True",
"def set_mode(self, mode):\n self._write_byte(BNO055_OPR_MODE_ADDR, mode & 0xFF)\n # Delay for 30 milliseconds (datsheet recommends 19ms, but a little more\n # can't hurt and the kernel is going to spend some unknown amount of time\n # too).\n time.sleep(0.03)",
"def setModeAttributes(self) -> None:\n d = self.attributesDict\n aList = (\n ('default', 'null'),\n ('digit_re', ''),\n ('escape', ''), # New in Leo 4.4.2.\n ('highlight_digits', True),\n ('ignore_case', True),\n ('no_word_sep', ''),\n )\n for key, default in aList:\n val = d.get(key, default)\n if val in ('true', 'True'):\n val = True\n if val in ('false', 'False'):\n val = False\n setattr(self, key, val)",
"def set_mode(self, mode = \"CHP\"):\n return self.echo(\":INIT:\" + mode)",
"def set_tracking_mode(self, mode):\n self._send_command_and_validate_response('T' + chr(mode))",
"def getPatch(self) -> int:\n ...",
"def update_mode(self):\n self.__mode = self.__exp_mode | self.__exp_out_mode\n pvc.set_exp_modes(self.__handle, self.__mode)",
"def CmdPkgPatch(package, options):\n package.Patch()",
"def set_operation_mode(self, operation_mode: str) -> None:\n raise NotImplementedError()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The Private Endpoint Connection properties. | def properties(self) -> Optional['outputs.PrivateEndpointConnectionPropertiesResponse']:
return pulumi.get(self, "properties") | [
"def private_endpoint_connections(self) -> Sequence['outputs.PrivateEndpointConnectionDataModelResponse']:\n return pulumi.get(self, \"private_endpoint_connections\")",
"def private_interconnect_infos(self) -> pulumi.Output[Sequence['outputs.InterconnectAttachmentPrivateInterconnectInfo']]:\n return pulumi.get(self, \"private_interconnect_infos\")",
"def get_properties(self):\n return (\n f\"name: {self._internal_name}, uid: {self.uid}, ports:\"\n f\" {self.ports.keys()}, aliases {self.aliases.keys()}, number of\"\n f\" references: {len(self.references)}\"\n )",
"def private_interconnect_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InterconnectAttachmentPrivateInterconnectInfoArgs']]]]:\n return pulumi.get(self, \"private_interconnect_infos\")",
"def endpoints(self) -> object:\n return self._endpoints",
"def user_endpoint_config(self) -> dict[str, t.Any] | None:\n return self._user_endpoint_config",
"def properties(self) -> Optional['outputs.ManagedNetworkPeeringPolicyPropertiesResponse']:\n return pulumi.get(self, \"properties\")",
"def properties(self) -> 'outputs.LoadBalancerConfigurationPropertiesResponse':\n return pulumi.get(self, \"properties\")",
"def connection_dict(self):\n return {'lib_path': self.lib_path,\n 'instr_id': self.instr_id}",
"def read_connection_params(self):\n with open(\"model/conn_params.json\") as json_file:\n params = json.load(json_file)\n return params[\"host\"], params[\"user\"], params[\"password\"]",
"def instance_endpoint(self) -> \"Endpoint\":\n ...",
"def get_conn_info(self):\n return session.ConnectionInfo(self.request.remote_ip,\n self.request.cookies,\n self.request.arguments,\n self.request.headers,\n self.request.path)",
"def PROPERTIES(self):\n return \"properties\"",
"def testProperties(self):\n # Original endpoint description\n original = beans.EndpointDescription(\n self.svc_ref,\n {pelix.remote.PROP_ENDPOINT_ID: \"toto\",\n pelix.remote.PROP_IMPORTED_CONFIGS: ['titi'],\n pelix.constants.OBJECTCLASS: \"spec\"})\n for key in original.get_properties().keys():\n self.assertFalse(key.startswith(\"service.exported\"),\n \"An export property has been found\")",
"def is_private(self):\n return self.class_name.startswith(u'_') or getattr(self.endpoint_class, 'private', False)",
"def getProperties(self):\n # type: () -> Dict[str]\n pass",
"def get_conn_pmtr_client_state(self):\n connection_parameters = {\n 'host': getenv(\"SERVER_CLIENT\"),\n 'user': getenv(\"USER_NAME_CLIENT\"),\n 'password': getenv(\"PASSWORD_CLIENT\"),\n 'db': getenv(\"DB_CLIENT\"),\n 'charset': 'utf8mb4',\n 'cursorclass': pymysql.cursors.DictCursor\n }\n return connection_parameters",
"def get_endpoints(self):\n\n return self._get_component_metadata()['endpoints']",
"def printConnectionParam(self):\n log_func.info(u'UniReader <%s>. Communication parameters:' % self.getName())\n log_func.info(u'\\tHost <%s>' % self.getHost())\n log_func.info(u'\\tPort <%s>' % self.getPort())\n log_func.info(u'\\tNode <%s>' % self.getNode())\n log_func.info(u'\\tServer <%s>' % self.getServer())"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List of group IDs. | def group_ids(self) -> Sequence[str]:
return pulumi.get(self, "group_ids") | [
"def group_ids(self):\n if hasattr(self, '_group_ids'):\n return self._group_ids\n d = self.allele_and_contig_pairs_to_unique_ids()\n return [\n d[(allele, contig)]\n for (allele, contig)\n in zip(self.alleles, self.contigs)\n ]",
"def all_security_group_ids(self) -> Sequence[str]:\n return pulumi.get(self, \"all_security_group_ids\")",
"def get_group_list(self, globs):\n # special cases to speed things up:\n return self._expand_globs(globs,\n list(self.core.metadata.groups.keys()))",
"def groups(self):\n\n return list(self.grpimg.keys())",
"def get_list_group_ids(filename):\n # load dataframe from csv\n df = pd.read_csv(filename, error_bad_lines=False)\n # drop null values\n df = df.dropna(axis=0, how='any')\n # purely get 1st column\n video_ids = df.iloc[:, 1]\n # convert to 1d array\n video_ids = video_ids.ravel()\n\n individual_str = \"\"\n list_groups_of_ids = []\n i = 0\n for video_id in video_ids:\n if (i == 0):\n individual_str += video_id\n i += 1\n else:\n if (i < group_size):\n i += 1\n individual_str += ',' + video_id\n else:\n list_groups_of_ids.append(individual_str)\n individual_str = \"\"\n i = 0\n return list_groups_of_ids",
"def get_groups_list():\n try:\n i=0\n groups_list = API_INSTANCE.groups_user_list(CONTENT_TYPE, ACCEPT)\n group_names = [g.name for g in groups_list] \n group_ids = [g.id for g in groups_list]\n group_dict = dict(zip(group_ids, group_names))\n\n for id in group_dict.keys():\n print(id, '->', group_dict[id])\n return\n\n except ApiException as err:\n print(\"Exception when calling UserGroupsApi->groups_user_list: %s\\n\" % err)",
"def list_groups(self,iSurveyID):\n params = self.__format_params(locals().copy())\n method = \"list_groups\"\n r = self.call_rpc(method,params)\n return r.json()['result']",
"def ls_groups(self, **kwargs):\n status, data = self.run_gerrit_command('ls-groups', **kwargs)\n\n return data.split('\\n') if status == 0 else []",
"def getiddgroupdict(self):\n return iddgroups.commdct2grouplist(self.idd_info)",
"def get_group_repoids(self, group_id):\n repo_ids = seafserv_threaded_rpc.get_group_repoids(group_id)\n if not repo_ids:\n return []\n l = []\n for repo_id in repo_ids.split(\"\\n\"):\n if repo_id == '':\n continue\n l.append(repo_id)\n return l",
"def sagroups(self):\n sagroups = []\n # Get all groups\n if self.id:\n groups = self.groups.all()\n if groups:\n # Get fh groups tags\n for group in groups:\n if hasattr(group, 'sagroup'):\n sagroups.append(group.sagroup.tag)\n\n return sagroups",
"def geto365groups(self):\n\n request_string = f\"{self.base_url}/groups?$filter=groupTypes/any(c:c+eq+'Unified')\"\n response = requests.get(request_string, headers=self.header_params_GMC)\n data = response.json()\n groups = []\n for group in data['value']:\n groups.append(group['id'])\n return groups",
"def group_list(request, org_id):\n group_html, _ = view_util.group_list_html(int(org_id))\n\n return HttpResponse(group_html)",
"def group_members(self) -> list[str]:\n return self.player.group_members",
"def groups( self , pattern = None ):\n return EclSum.cNamespace().create_group_list( self , pattern )",
"def groupsGet(self, gids, callback):\n j = Json().put(u\"gids\", gids)\n self.callMethodRetList(u\"groups.get\", j.getJavaScriptObject(), Group.__class__, callback)",
"def group_labels(self):\n # Convert to list so that we can index immediately, as keys()\n # is a generator in Python 3\n return list(self._landmark_groups.keys())",
"def get_ids():",
"def get_groups(self):\n return sorted([k for k, v in self.TOKENIZED.groupindex.items()])",
"def get_group_index_lists(group_ids):\n groups = list(set(group_ids))\n r = []\n for group in groups:\n l = []\n for i in range(len(group_ids)):\n if group_ids[i] == group:\n l.append(i)\n r.append(l)\n return r"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The state of the guest configuration service on the Arcenabled machine. | def guest_configuration_service(self) -> Optional['outputs.ServiceStatusResponse']:
return pulumi.get(self, "guest_configuration_service") | [
"def state(self):\n ret = self._get_attr(\"state\")\n return GuestUserState(ret)",
"def status(self):\n ret = self._get_attr(\"status\")\n return GuestSessionStatus(ret)",
"def device_state_attributes(self):",
"def state(self):\n if self.device.vacuum_status is not None and self.device.is_available == True:\n return STATE_CODE_TO_STATE[self.device.vacuum_status]",
"def state(self):\n return self._state_env.state",
"def _get_systemd_svc_state(self, svc):\n cmd = 'sudo systemctl show ' + svc\n ret, stdout, stderr = self.cli_cmd_sync(cmd, shell=True)\n\n if ret != 0:\n logger.debug('Error executing systemctl show command, code: %d' % ret)\n return None, None\n\n load_state = None\n active_state = None\n\n lines = [x.strip() for x in stdout.split('\\n')]\n for line in lines:\n parts = line.split('=', 2)\n if len(parts) < 2:\n continue\n\n cmd, val = [x.strip().lower() for x in parts]\n if cmd == 'loadstate':\n load_state = val\n if cmd == 'activestate':\n active_state = val\n return load_state, active_state",
"def state(self):\n if self.device.vacuum_status is not None:\n return STATE_CODE_TO_STATE[self.device.vacuum_status]",
"def running_config(self):\n response = self.show(u'show running-config', raw_text=True)\n return response",
"def get_state(self):\n return self._skuld.cmd(SkuldCmd(name='get_state',\n args=None, block=True))",
"def get_garden_state(self):\n return self.garden.get_garden_state()",
"def virtual_machine_config(self) -> 'outputs.VirtualMachineConfigResponse':\n return pulumi.get(self, \"virtual_machine_config\")",
"def get_device_state(self):\n return self.__send_poll(\"device\")",
"def get_state(self):\n if self.connected is True:\n return self.__request(\n WemoSwitch.body_status, WemoSwitch.headers_get)\n else:\n return WemoSwitch.ERROR_STATE",
"def get_health_state(self):\n return self._power_mgmt.get_health_state()",
"def getACState(self):\r\n return self.aircon.actuators[0].getState()",
"def get_guest_entered_acpi_mode(self):\n entered = self._call(\"getGuestEnteredACPIMode\")\n return entered",
"def _get_ip_cfg_status(self):\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_SYSTEM, 2)\n if self.s.is_checked(self.info['loc_cfg_system_ip_manual_radio']):\n return \"static\"\n\n else:\n return \"dhcp\"",
"def state(self):\n\n try:\n out = self.__get_facts()\n except VsanNotPresent:\n return None\n if out:\n return out.get(get_key(vsankeys.VSAN_STATE, self._SW_VER))\n return None",
"def vm_read_config(self):\n\n self.cfg_details['displayName'] = self.vmx_info['displayName']\n self.cfg_details['memsize'] = self.vmx_info['memsize']\n self.cfg_details['virtualHW.version'] = \\\n self.vmx_info['virtualHW']['version']\n\n # ToDo:Check for the following keys needs to be added\n vmtypekey = \"%s__%s\" % (str(self.vmx_info['config']['version']), \n str(self.vmx_info['virtualHW']['version']))\n self.cfg_details['vm_type'] = HVM_VMWARE_NAMEMAP[vmtypekey]\n self.cfg_details['vm_type_str'] = HYPERVISORS[self.cfg_details['vm_type']]\n\n self.__get_primary_disk()\n\n return self.cfg_details",
"def device_state_attributes(self):\r\n if self.vacuum_state is not None:\r\n return {\r\n ATTR_STATUS: STATE_CODE_TO_STATE[self.vacuum_state],\r\n ATTR_WATERBOX_STATUS: WATERBOX_CODE_TO_NAME.get(\r\n self._waterbox_status, \"Unknown\"\r\n ),\r\n ATTR_OPERATION_STATUS: OPERATION_STATUS_CODE_TO_NAME.get(\r\n self._operation_status, \"Unknown\"\r\n ),\r\n ATTR_OPERATING_MODE: OPERATING_MODE_CODE_TO_NAME.get(\r\n self._operating_mode, \"Unknown\"\r\n ),\r\n ATTR_ERROR: ERROR_CODE_TO_ERROR.get(self.vacuum_error, \"Unknown\"),\r\n ATTR_CARPET_BOOST: self._carpet_boost,\r\n ATTR_MULTI_MAP_ENABLED: self._multi_map_enabled,\r\n ATTR_DND_ENABLED: self._dnd_enabled,\r\n ATTR_DND_START_TIME: self._dnd_start_time,\r\n ATTR_DND_STOP_TIME: self._dnd_stop_time,\r\n ATTR_AUDIO_VOLUME: self._audio_volume,\r\n ATTR_AUDIO_LANGUAGE: self._audio_language,\r\n ATTR_TIMEZONE: self._timezone,\r\n ATTR_FAN_SPEED: SPEED_CODE_TO_NAME.get(\r\n self._current_fan_speed, \"Unknown\"\r\n ),\r\n ATTR_MAIN_BRUSH_LEFT_TIME: self._main_brush_time_left,\r\n ATTR_MAIN_BRUSH_LIFE_LEVEL: self._main_brush_life_level,\r\n ATTR_SIDE_BRUSH_LEFT_TIME: self._side_brush_time_left,\r\n ATTR_SIDE_BRUSH_LIFE_LEVEL: self._side_brush_life_level,\r\n ATTR_FILTER_LIFE_LEVEL: self._filter_life_level,\r\n ATTR_FILTER_LEFT_TIME: self._filter_left_time,\r\n ATTR_CLEANING_AREA: self._cleaning_area,\r\n ATTR_CLEANING_TIME: self._cleaning_time,\r\n ATTR_CLEANING_LOG_START: time.strftime(\r\n \"%Y-%m-%d %H:%M:%S\", time.localtime(self._total_log_start)\r\n ),\r\n ATTR_CLEANING_TOTAL_TIME: self._total_clean_time,\r\n ATTR_CLEANING_TOTAL_COUNT: self._total_clean_count,\r\n ATTR_CLEANING_TOTAL_AREA: self._total_clean_area,\r\n ATTR_CLEAN_CLOTH_TIP: self._clean_cloth_tip,\r\n ATTR_SERIAL_NUMBER: self._serial_number,\r\n ATTR_WATER_LEVEL: WATER_CODE_TO_NAME.get(\r\n self._current_water_level, \"Unknown\"\r\n ),\r\n ATTR_WATER_LEVEL_LIST: self.water_level_list,\r\n ATTR_MAP_ID_LIST: dict(\r\n zip(\r\n list(\r\n \"map_\" + str(x)\r\n for x in range(len(self._schedule.split(\";\")))\r\n if len(self._schedule) > 0\r\n ),\r\n list(\r\n int(x.split(\"-\")[5])\r\n for x in self._schedule.split(\";\")\r\n if len(self._schedule) > 0\r\n ),\r\n )\r\n ),\r\n ATTR_ROOM_LIST: dict(\r\n zip(\r\n list(\r\n \"map_\" + str(x)\r\n for x in range(len(self._schedule.split(\";\")))\r\n if len(self._schedule) > 0\r\n ),\r\n list(\r\n [\r\n chr(int(item) + 64)\r\n for item in list(\r\n x.split(\",\")\r\n for x in list(\r\n x.split(\"-\")[8]\r\n for x in self._schedule.split(\";\")\r\n )\r\n )[i]\r\n ]\r\n for i in range(len(self._schedule.split(\";\")))\r\n if len(self._schedule) > 0\r\n ),\r\n )\r\n ),\r\n }"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Move the self.y value of the Raindrop down the screen (y increase) at the self.speed. | def move(self):
# TODO 11: Change the y position of this Raindrop by its speed.
pass | [
"def update(self):\n self.y += (self.settings.alien_speed * self.settings.alien_direction)\n self.rect.y = self.y",
"def update_y(self) -> None:\n self.y_vel += self.gravity\n self.position.y += self.y_vel",
"def move(self):\n self.tick_count += 1\n\n # equation for displacement of bird, downward acceleration\n d = self.vel * self.tick_count + 1.5 * self.tick_count ** 2\n\n # setting terminal velocity, so we don't go move too fast\n if d > self.TERMINAL_VEL:\n d = self.TERMINAL_VEL\n\n # before moving upward move a bit more\n if d < 0:\n d -= 2\n\n # change y position based on displacement\n self.y = self.y + d\n\n # make bird point upward if bird above starting point\n if d < 0 or self.y < (self.height + 50):\n if self.tilt < self.MAX_ROTATION:\n self.tilt = self.MAX_ROTATION\n else: # tilt down\n if self.tilt > -90:\n self.tilt -= self.ROT_VEL",
"def moveDown(self):\n if self.vel.stopped():\n self.vel.y += self.speed",
"def robot_up(self):\t\r\n\t self.y = self.y - 1\r\n\t if self.y < 0:\r\n\t\t self.y = 0",
"def robot_down(self):\t\r\n\t self.y = self.y + 1\r\n\t if self.y > 9:\r\n\t\t self.y = 9",
"def moveUp(self):\n if self.vel.stopped():\n self.vel.y -= self.speed",
"def _move_up(self, speed = 1):\n\t\tif self.offset_y + speed < 0:\n\t\t\tself.offset_y += speed",
"def _move_down(self, speed = 1):\n\t\tif self.offset_y - speed > -(self.world_grid_size[1]*self.tile_size[1] - self.screen_size[1]):\n\t\t\tself.offset_y -= speed",
"def move(self):\n\n self.rect.x += self.x_speed\n self.rect.y += self.y_speed\n\n # prevents the ball from going off the screen sideways.\n if self.rect.left <= 0:\n self.x_speed = 1\n if self.rect.right >= APPLICATION_WIDTH:\n self.x_speed = - 1\n\n # prevents the ball from going off the screen above.\n if self.rect.top <= 0:\n self.y_speed = - self.y_speed\n\n # prevents the ball from falling off the screen.\n if self.rect.bottom > APPLICATION_HEIGHT:\n self.y_speed = - 1\n\n # when the ball is above in the air, the speed of the ball increase like how the gravity works.\n if self.rect.y < APPLICATION_HEIGHT - RADIUS_OF_BALL:\n self.y_speed += 0.3",
"def move(self):\n\n self.rect.x += self.x_speed\n self.rect.y += self.y_speed\n\n # makes he ball bounce off the wall\n if self.rect.left <= 0 or self.rect.right >= self.windowWidth:\n self.x_speed = - self.x_speed\n if self.rect.top <= 0:\n self.y_speed = - self.y_speed",
"def update_position(self):\n \t\t\n self.x += math.sin(self.angle) * self.speed\n self.y -= math.cos(self.angle) * self.speed",
"def move_y(self, amount=40):\n self.y_coor += amount\n self.pos = [self.x_coor - self.node_size / 2, self.y_coor - self.node_size / 2]",
"def move(self):\n #The goal here is to have a bouncing movement.\n #So the first part of the code checks if the entity has\n #reached any of the screen's edges. If so, it changes to\n #the opposite direction.\n width, height = self.get_size()\n if self.x - width/2 <=0 and self.horizontal_dir == -1:\n self.horizontal_dir = 1\n elif self.x + width/2 >= SCREEN_WIDTH and self.horizontal_dir == 1:\n self.horizontal_dir = -1\n if self.y - height/2 <=0 and self.vertical_dir == -1:\n self.vertical_dir = 1\n elif self.y + height/2 >= SCREEN_HEIGHT and self.vertical_dir == 1:\n self.vertical_dir = -1\n\n #This is the movement part.\n self.x+=self.horizontal_dir*self.speed\n self.y+=self.vertical_dir*self.speed",
"def stopY(self):\r\n self.deltaY = 0",
"def move_y(self, val: int) -> None:\n self.y_pos += val",
"def move_waypoint_y(self, val: int) -> None:\n self.waypoint_y += val",
"def jump(self):\n if self.grounded:\n self.vy -= 30\n elif self.doublejump == 1:\n self.vy -= 30\n self.doublejump = 0",
"def deltaY(self, yDelta):\n self.y += yDelta\n self.rect.y = round(self.y)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true if the Raindrop y value is not shown on the screen, otherwise false. | def off_screen(self):
# Note: this will be used for testing, but not used in the final version of the code for the sake of simplicity.
# TODO 13: Return True if the y position of this Raindrop is greater than 800.
pass | [
"def y_axis_visibility(self) -> bool:\n return bool(self.GetYAxisVisibility())",
"def loses(self):\n return not (-2.5 < self.x < 2.5 and -0.262 < self.phi < 0.262)",
"def y_label_visibility(self) -> bool:\n return self._y_label_visibility",
"def at_top(self) -> bool:\n return self.ycor() >= self.max_top",
"def is_rainy():\n if not no_rain.is_active:\n return True\n else:\n return False",
"def isVisible(self) -> bool:\n return self._getBackend().isYRightAxisVisible()",
"def _get_isClosedInU(self) -> \"bool\" :\n return _core.SurfaceEvaluator__get_isClosedInU(self)",
"def outside_window(self):\n if self.ball.y >= self.window.height:\n return True",
"def is_false(self)->bool:\n return self.value == KgtkFormat.FALSE_SYMBOL",
"def DisplayMinY(self) -> float:",
"def _zero_in_bounds(self):\n vmin, vmax = self._axes.yaxis._scale.limit_range_for_scale(0, 1, 1e-5)\n return vmin == 0",
"def _validate_y(self, y):\n return y",
"def isInverted(self):\n return self._getBackend().isYAxisInverted()",
"def y_axis_minor_tick_visibility(self) -> bool:\n return bool(self.GetYAxisMinorTickVisibility())",
"def is_valid(self):\n return np.all(np.isfinite(self.delta_x)) and np.all(np.isfinite(self.delta_y))",
"def has_value(self) -> bool:\n return self.value != 0.0",
"def PlotWindowMinY(self) -> float:",
"def check_novelty(self):\r\n return self.novelty",
"def northern(self):\n return (self.latitude >= 0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true if the given raindrop is hitting this Hero, otherwise false. | def hit_by(self, raindrop):
# TODO 19: Return True if this Hero is currently colliding with the given Raindrop.
pass | [
"def inside_target(self, shot_coordinates):\n\n #checking distance\n dist = cv2.pointPolygonTest(self.t_points,(shot_coordinates[0], shot_coordinates[1]),True)\n\n #if distance is 0 or more, shot is inside the target\n if dist >= 0:\n self.update_target(shot_coordinates)\n return True\n else:\n return False",
"def can_shoot(self):\n\n return (self._cooldown <= 0)",
"def is_hitted(self, coord):\n return coord in self.__hit",
"def should_hit(self):\n \n return self.hand.compute_bj_count() < 17",
"def hit_a_wall(self) -> bool:\n\n snake = self.snake\n if snake.moving_direction == \"left\" and snake.head.x <= self.boundaries[\"left\"]:\n return True\n if snake.moving_direction == \"right\" and snake.head.x >= self.boundaries[\"right\"]:\n return True\n if snake.moving_direction == \"up\" and snake.head.y <= self.boundaries[\"up\"]:\n return True\n if snake.moving_direction == \"down\" and snake.head.y >= self.boundaries[\"down\"]:\n return True\n\n return False",
"def has_won(self, snake_index):\n snake = self.snakes[snake_index]\n\n # if there is only 1 agent, return true if it covers the entire grid.\n if MultiplayerSnakeEnv.num_agents == 1:\n return len(snake.body) == SnakeEnv.M * SnakeEnv.N\n\n # if there are multiple agents, return true if it is the last agent to die.\n if not snake.done:\n return False\n\n # return true if this snake is the last to die.\n return snake.died_number == MultiplayerSnakeEnv.num_agents",
"def other_can_shoot(self):\n\n if self._other == None:\n return None\n\n return self.other_cooldown <= 0",
"def is_within_ramp(ball, ramp):\n\tif ramp._height > 0:\n\t\tif (ball._x+ball._radius) > min(ramp._x, ramp._x+ramp._width) and (ball._x-ball._radius) < max(ramp._x, ramp._x+ramp._width) and (ball._y - ball._radius) < ramp._y and (ball._y + ball._radius) > (ramp._y - ramp._height):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\telif ramp._height < 0:\n\t\tif (ball._x+ball._radius) > min(ramp._x, ramp._x+ramp._width) and (ball._x-ball._radius) < max(ramp._x, ramp._x+ramp._width) and (ball._y + ball._radius) > ramp._y and (ball._y - ball._radius) < (ramp._y-ramp._height):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def on_target(agent):\n\n if agent.ship.position == agent.target.position:\n\n return True\n\n return False",
"def is_matching_shot_event(specific_event, play):\n # retrieving base event\n event = Event.find_by_id(specific_event.event_id)\n # if it's a goal we have to retrieve the accompanying shot separately\n if event.type == 'GOAL':\n # it could be a shot in a shootout, too\n if play['period_type'] == 'SHOOTOUT':\n specific_event = ShootoutAttempt.find_by_event_id(event.event_id)\n else:\n specific_event = Shot.find_by_event_id(event.event_id)\n\n play_distance = calculate_distance_from_goal(play)\n\n # trying to match shooter, shot type *and* shot distance first\n try:\n if (\n play['active'], play['shot_type'], play_distance\n ) == (\n specific_event.player_id,\n specific_event.shot_type.lower(),\n specific_event.distance\n ):\n return True\n except Exception as e:\n # TODO: propper logging\n pass\n\n # trying to match only shooter and shot type\n try:\n if (\n play['active'], play['shot_type'],\n ) == (\n specific_event.player_id,\n specific_event.shot_type.lower(),\n ):\n return True\n except Exception as e:\n # TODO: propper logging\n pass\n\n # trying to match shooter and distance\n try:\n if (\n play['active'], play_distance,\n ) == (\n specific_event.player_id,\n specific_event.distance,\n ):\n return True\n except Exception as e:\n # TODO: propper logging\n pass\n\n # finally trying to match only shooter\n try:\n if (\n play['active'],\n ) == (\n specific_event.player_id,\n ):\n return True\n except Exception as e:\n # TODO: propper logging\n pass\n\n return False",
"def check_shot(self,row,column):\n if self.board[row][column] == \"H\" or self.board[row][column] == \"M\":\n return \"You've already fired there\"\n\n elif self.board[row][column] == \"S\":\n self.place_hit(row,column)\n return \"Hit!\"\n \n elif self.board[row][column] == \"-\":\n self.place_miss(row,column)\n return \"Miss!\"",
"def check_hitler_chanc_win(self) -> bool:\n Log.log_elected_chancellor(self.chancellor, self.board.fascist_board)\n if 4 <= self.board.fascist_board and self.chancellor.role is Role.HITLER:\n self.winner = BoardStates.HITLER_CHANCELLOR\n return True\n return False",
"def check_hero(self, hero_name):\n\n try:\n cursor = self.conn.cursor()\n cursor.execute(\"SELECT * FROM heroes WHERE hero_name like ?\", (hero_name,))\n return cursor.fetchone() is not None\n except Error as e:\n print(\"Error in check_hero:\", e)\n return False",
"def check_if_next_tile_is_hit(self):\n board = self._board_object.get_board()\n if self._direction == 'down':\n if board[self._row + 1][self._column] == 'a' or board[self._row + 1][self._column] == 'h':\n return True\n if self._direction == 'up':\n if board[self._row - 1][self._column] == 'a' or board[self._row - 1][self._column] == 'h':\n return True\n if self._direction == 'right':\n if board[self._row][self._column + 1] == 'a' or board[self._row][self._column + 1] == 'h':\n return True\n if self._direction == 'left':\n if board[self._row][self._column - 1] == 'a' or board[self._row][self._column - 1] == 'h':\n return True\n return False",
"def is_sight(self,x_ego,x_obs):\n return self.norm(x_ego-x_obs)<=self.detection_radius",
"def _check_if_within(self, obstacle):\n # type: (obstacleMsg) -> bool\n uav_pos = np.array(\n [self.uav_pose.pose.position.x, self.uav_pose.pose.position.y, self.uav_pose.pose.position.z])\n obs_pos = np.array(obstacle.pose[:3])\n return np.linalg.norm((uav_pos, obs_pos)) <= self.radius",
"def is_win(self):\n if self._is_terminal:\n return self.board[self.player_goal_idx] > self.board[self.opponent_goal_idx]",
"def game_over(self):\n return self.winner() is not None",
"def check_invincibility(self):\n if not self.hittable and self.time_hit + 1200 <= pygame.time.get_ticks():\n self.hittable = True\n else:\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a Cloud sprite that will produce Raindrop objects. The cloud will be moving around. | def __init__(self, screen, x, y, image_filename):
# TODO 24: Initialize this Cloud, as follows:
# TODO - Store the screen.
# TODO - Set the initial position of this Cloud to x and y.
# TODO - Set the image of this Cloud to the given image filename.
# TODO - Create a list for Raindrop objects as an empty list called raindrops.
# TODO Use instance variables:
# TODO screen x y image raindrops.
pass | [
"def rain(self):\n # TODO 28: Append a new Raindrop to this Cloud's list of Raindrops,\n # TODO where the new Raindrop starts at:\n # TODO - x is a random integer between this Cloud's x and this Cloud's x + 300.\n # TODO - y is this Cloud's y + 100.\n pass",
"def create_sprite(self):\n rgb = (84, 170, 232)\n height = 15\n length = 15\n self.sprite = BaseStationSprite(rgb)",
"def draw_cloud(width=140, height=60, color=rgb(255, 255, 255)):\n\n cairo_color = color / rgb(255, 255, 255)\n\n surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)\n ctx = cairo.Context(surface)\n\n # A cloud consists of 4 circles\n draw_circle(ctx, width / 3, height / 2, height / 3, cairo_color)\n draw_circle(ctx, 2 * width / 3, height / 2, height / 3, cairo_color)\n draw_circle(ctx, width / 2, height / 3, height / 3, cairo_color)\n draw_circle(ctx, width / 2, 2 * height / 3, height / 3, cairo_color)\n\n surface.write_to_png('cloud.png')",
"def createDrawableObjects(self):\r\n num_rows = 4\r\n num_columns = 1\r\n droplet = 'images/droplet.png'\r\n animation = self.setup_animation(droplet,\r\n num_rows,\r\n num_columns)\r\n\r\n self.dropletSprite = pyglet.sprite.Sprite(animation)\r\n self.dropletSprite.position = (0,200)\r\n\r\n cloud = pyglet.image.load('images/cloud.png')\r\n self.cloudSprite = pyglet.sprite.Sprite(cloud)\r\n self.cloudSprite.y = 100\r\n\r\n lightening = pyglet.image.load('images/lightening.png')\r\n self.lSprite = pyglet.sprite.Sprite(lightening)\r\n self.lSprite.y = 200\r\n\r\n car = pyglet.image.load('images/car.png')\r\n self.carSprite = pyglet.sprite.Sprite(car, -500, 0)\r\n\r\n\r\n # Add these sprites to the list of drawables\r\n self.drawableObjects.append(self.cloudSprite)\r\n self.drawableObjects.append(self.lSprite)\r\n self.drawableObjects.append(self.dropletSprite)\r\n self.drawableObjects.append(self.carSprite)",
"def testSprite(self):\n\t\tplayer = Civilian(50, 170, \"blue\")\n\t\tself.sprites.add(player)",
"def create_sprite(location, image_name, animal_name):\n image = pygame.image.load(image_name)\n image_rect = Rect(location, (24, 24))\n new_sprite = Sprite(image, image_rect, animal_name)\n return new_sprite",
"def create_rain(settings, screen, raindrops, x):\n raindrop = Raindrop(settings, screen)\n raindrop.rect.x = x\n raindrops.add(raindrop)\n print(len(raindrops))",
"def create_sprite(self,x=-5000,y=-5000,spritetype=0):\n\t\trand = random.randint(0,len(self.all_sprites[spritetype])-1)\n\t\ttsprite = cAnimSprite(self.all_sprites[spritetype][rand],self.sprite_fps[spritetype])\n\t\ttsprite.move(x,y) #out of view\n\t\ttsprite.draw = True\n\t\treturn tsprite",
"def create_rocket():\n rocket = GCompound()\n core = create_filled_rect(0, 0, ROCKET_WIDTH, ROCKET_HEIGHT, \"oldlace\")\n rocket.add(core, -ROCKET_WIDTH / 2, -ROCKET_HEIGHT)\n\n # Add your code below to add more pieces to the rocket!\n\n\n\n return rocket",
"def __init__(self, position: list):\r\n self.offset = 128\r\n super().__init__([position[0] - self.offset/2, position[1] - self.offset/2])\r\n self.image = pg.image.load(\"images/goliath.png\")\r\n self.mask = pg.mask.from_surface(self.image)\r\n self.acceleration = .06\r\n self.drag = .008\r\n self.mass = .25\r\n self.max_speed = 6.5",
"def manageSnow(self):\n max_snow = 100\n new_snow = random.randint(0, self.MAX_Y)\n if len(self.snowItems) < max_snow:\n if new_snow < self.MAX_Y // max_snow:\n snow = Item(random.randint(0, self.MAX_X), 0, \"snowflake.png\")\n snow.set_vy(1)\n self.snowItems.append(snow)\n\n for i in self.snowItems:\n i.set_vx(self.wind * 25)\n if i.get_y() >= self.MAX_Y:\n i.set_y(0)\n if i.get_x() > self.MAX_X:\n i.set_x(1)\n elif i.get_x() < 1:\n i.set_x(self.MAX_X)",
"def __init__(self, player: Player):\n self._sprite = [\n \"&&&&.----.__ &\",\n \"&&&/---.__ \\\\&\",\n \"&&/ `\\\\ |\",\n \"&| o o \\\\|\",\n \"/| .vvvvv. |\\\\\",\n \"/| | | |\\\\\",\n \"&| `^vvvv' |&\",\n \"&\\\\__________|&\"\n ]\n self._position = (int(container.FRAME_ROWS / 2 - 3),\n container.FRAME_COLS)\n self._player = player\n self._bullets = []\n self.lives = container.BOSS_LIVES",
"def cloud_generator_small(img, data_path, seed=None, octaves=10, overwrite=False, alt=None):\n\n stack_path = data_path / 'images' / img / 'stack' / 'stack.tif'\n file_name = img + '_clouds.npy'\n cloud_dir = data_path / 'clouds' / 'small'\n cloud_file = cloud_dir / file_name\n\n if overwrite:\n try:\n cloud_file.unlink()\n print('Removing existing cloud image for ' + img + ' and creating new one')\n except FileNotFoundError:\n print('No existing cloud image for ' + img + '. Creating new one')\n if not overwrite:\n if cloud_file.exists():\n print('Cloud image already exists for ' + img)\n return\n else:\n print('No cloud image for ' + img + ', creating one')\n\n # Make directory for clouds if none exists\n if cloud_dir.is_dir() == False:\n cloud_dir.mkdir()\n print('Creating cloud imagery directory')\n\n if seed is None:\n seed = (random.randint(1, 10000))\n\n # Get shape of input image to be masked\n with rasterio.open(stack_path) as ds:\n shape = ds.shape\n\n # Create empty array of zeros to generate clouds on\n clouds = np.zeros(shape)\n freq = np.ceil(sqrt(np.sum(shape))) * 2 # Frequency calculated based on shape of image\n\n # Generate 2D (technically 3D, but uses a scalar for z) simplex noise\n for y in range(shape[1]):\n for x in range(shape[0]):\n clouds[x, y] = snoise3(x / freq, y / freq, seed, octaves)\n\n # Save cloud file as\n np.save(cloud_file, clouds)\n\n # Return clouds\n return clouds",
"def createDrawableObjects(self):\r\n num_rows = 4\r\n num_columns = 1\r\n droplet = 'images/droplet.png'\r\n animation = self.setup_animation(droplet,\r\n num_rows,\r\n num_columns)\r\n\r\n self.dropletSprite = pyglet.sprite.Sprite(animation)\r\n self.dropletSprite.position = (0,0)\r\n\r\n # Add these sprites to the list of drawables\r\n self.drawableObjects.append(self.dropletSprite)",
"def __init__(self, center):\n pygame.sprite.Sprite.__init__(self)\n self.image = loadImage(\"pointer.png\", True)\n self.rect = self.image.get_rect()\n self.rect.center = center",
"def __init__(self,x,y):\n food_image = pyglet.resource.image(\"food-boi.png\")\n \n food_image.anchor_x = food_image.width/2\n food_image.anchor_y = food_image.height/2\n\n super(Food, self).__init__(food_image)\n\n self.x = x\n self.y = y\n self.scale = 0.1",
"def __init__(self, player):\n self.platform_list = pygame.sprite.Group()\n self.enemy_list = pygame.sprite.Group()\n self.player = player\n\n # Background image\n self.background = None\n\n self.world_shift = 0\n self.level_limit = -1000",
"def spawn_stars(self):\n for i in range(STAR_COUNT):\n self.background_objects.append(Snow((random.randint(0, GAME_ARENA_DIMENSIONS[0]),\n random.randint(0, GAME_ARENA_DIMENSIONS[1]))))",
"def add_cloud(self, name):\r\n\r\n new_cloud = Cloud(name=name)\r\n\r\n self.session.add(new_cloud)\r\n self.session.commit()\r\n\r\n return new_cloud.id"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a Raindrop to the array of raindrops so that it looks like the Cloud is raining. | def rain(self):
# TODO 28: Append a new Raindrop to this Cloud's list of Raindrops,
# TODO where the new Raindrop starts at:
# TODO - x is a random integer between this Cloud's x and this Cloud's x + 300.
# TODO - y is this Cloud's y + 100.
pass | [
"def create_rain(settings, screen, raindrops, x):\n raindrop = Raindrop(settings, screen)\n raindrop.rect.x = x\n raindrops.add(raindrop)\n print(len(raindrops))",
"def drop_rain(settings, screen, raindrops):\n if len(raindrops) >= 20:\n print(\"were in here\")\n raindrops.update()",
"def remove_rain(settings, screen, raindrops):\n for raindrop in raindrops:\n if raindrop.rect.y >= settings.screen_height:\n raindrops.remove(raindrop)",
"def add_rocket(self, rocket):\r\n\t\tself.rockets.append(rocket)",
"def add_radar(self, radar):\n\n # Check if radar location is already in the radar list\n for rad in self.radar_list:\n if (rad.location_is_equal(radar.latitude['data'][0],\n radar.longitude['data'][0],\n radar.altitude['data'][0])):\n # warn(\"WARNING: Tried to add the same radar twice to the\"\n # \" radar list\")\n return rad\n\n rad = _Radar_Trajectory(radar.latitude['data'][0],\n radar.longitude['data'][0],\n radar.altitude['data'][0])\n self.radar_list.append(rad)\n\n # Convert trajectory WGS84 points to polara radar coordinates\n rad.convert_radpos_to_swissgrid()\n\n self._convert_traj_to_swissgrid()\n\n # Note: Earth curvature not considered yet!\n\n (rvec, azvec, elvec) = pyart.core.cartesian_to_antenna(\n self.swiss_chy - rad.ch_y, self.swiss_chx - rad.ch_x,\n self.swiss_chh - rad.ch_alt)\n\n rad.assign_trajectory(elvec, azvec, rvec)\n\n return rad",
"def rain_approximation(\n pr: xr.DataArray,\n tas: xr.DataArray,\n thresh: str = \"0 degC\",\n method: str = \"binary\",\n) -> xr.DataArray:\n prra = pr - snowfall_approximation(pr, tas, thresh=thresh, method=method)\n prra.attrs[\"units\"] = pr.attrs[\"units\"]\n return prra",
"def add_beacon(self):\r\n print('add_beacon()')\r\n beacon = [[1,1,0,0],\r\n [1,1,0,0],\r\n [0,0,1,1],\r\n [0,0,1,1]]\r\n self.universe[1:5, 1:5] = beacon",
"def add_grain(self, ri, l):\r\n self.engine.add_grain(self.engine.ri, ri, l)",
"def add_R_list(self,R_list):\n for tr,R in zip(self.instances,R_list):\n tr.stats.R = R",
"def add_to_reserve(self, pawn):\n self._reserve.append(pawn)",
"def make_it_rain(self, strength=None):\n for _ in range(self.num_droplets):\n random_position = np.array([random.random() * self.world.lx, random.random() * self.world.ly])\n if strength:\n random_strength = strength[0] + (random.random() * strength[1])\n self.cloud.append(WaterDroplet(random_position, water=random_strength))\n else:\n self.cloud.append(WaterDroplet(random_position))\n return self.cloud",
"def addStones(self, stones):\n self.rack = np.append(self.rack, stones, axis=0)",
"def hit_by(self, raindrop):\n # TODO 19: Return True if this Hero is currently colliding with the given Raindrop.\n pass",
"def add_turbine(self, turbine):\r\n\r\n self.turbines.append(turbine)",
"def set_rois(self, ROIlist):\n self.stats['ROIs'] = ROIlist\n self.create_rois()",
"def add_water(self):",
"def add_gripper(self, gripper, arm_name=None):\n if arm_name is None:\n arm_name = self.eef_name\n if arm_name in self.grippers:\n raise ValueError(\"Attempts to add multiple grippers to one body\")\n arm_subtree = self.worldbody.find(\".//body[@name='{}']\".format(arm_name))\n for actuator in gripper.actuator:\n if actuator.get(\"name\") is None:\n raise XMLError(\"Actuator has no name\")\n if not actuator.get(\"name\").startswith(\"gripper\"):\n raise XMLError(\"Actuator name {} does not have prefix 'gripper'\".format(actuator.get(\"name\")))\n for body in gripper.worldbody:\n arm_subtree.append(body)\n self.merge(gripper, merge_body=False)\n self.grippers[arm_name] = gripper\n # Update cameras in this model\n self.cameras = self.get_element_names(self.worldbody, \"camera\")",
"def add_brain(self, brain):\n brain.environment = self\n self.brains[brain.uuid] = brain",
"def _add_roi(self):\n with self.debug_output:\n cat = self._roi_cat_sel.value\n r_dict = self._copy_normalized_rois()\n r_dict[cat].append(None)\n self.rois = self._unnormalize_rois(r_dict)\n self._roi_multi_sel.index = len(self._roi_multi_sel.options) - 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs a semantic fusion unit on the input vector and the fusion vectors to produce an output. | def semantic_fusion(input_vector, input_dim, fusion_vectors, scope):
with tf.variable_scope(scope):
assert len(fusion_vectors) > 0
stacked_vectors = tf.concat(fusion_vectors + [input_vector], axis=-1) # size = [batch_size, ..., input_dim * (len(fusion_vectors) + 1)]
num_total_vectors = len(fusion_vectors) + 1
Wr = tf.get_variable("Wr", dtype=tf.float32, shape=[num_total_vectors * input_dim, input_dim])
Wg = tf.get_variable("Wg", dtype=tf.float32, shape=[num_total_vectors * input_dim, input_dim])
br = tf.get_variable("br", dtype=tf.float32, shape=[input_dim])
bg = tf.get_variable("bg", dtype=tf.float32, shape=[input_dim])
r = tf.tanh(multiply_tensors(stacked_vectors, Wr) + br) # size = [batch_size, ..., input_dim]
g = tf.sigmoid(multiply_tensors(stacked_vectors, Wg) + bg) # size = [batch_size, ..., input_dim]
return g * r + (1 - g) * input_vector # size = [batch_size, ..., input_dim] | [
"def infer_vector(self, text):\n raise NotImplementedError",
"def _conv2d_compute_fusion_para(inputs):\r\n input_memory_type = inputs.op.attrs[\"addr_type\"].value \\\r\n if \"addr_type\" in inputs.op.attrs else 0\r\n valid_shape = inputs.op.attrs[\"valid_shape\"] \\\r\n if \"valid_shape\" in inputs.op.attrs else ()\r\n slice_offset = inputs.op.attrs[\"slice_offset\"] \\\r\n if \"slice_offset\" in inputs.op.attrs else ()\r\n l1_fusion_type = inputs.op.attrs[\"L1_fusion_type\"].value \\\r\n if \"L1_fusion_type\" in inputs.op.attrs else -1\r\n\r\n fmap_l1_addr_flag = inputs.op.attrs[\"L1_addr_flag\"].value \\\r\n if \"L1_addr_flag\" in inputs.op.attrs else \"nothing\"\r\n fmap_l1_valid_size = inputs.op.attrs[\"L1_valid_size\"].value \\\r\n if \"L1_valid_size\" in inputs.op.attrs else -1\r\n\r\n l1_fusion_enable_flag = get_L1_info(\"L1_fusion_enabled\")\r\n if not l1_fusion_enable_flag:\r\n l1_fusion_type = -1\r\n\r\n valid_shape = _shape_to_list(valid_shape)\r\n slice_offset = _shape_to_list(slice_offset)\r\n\r\n l2_fusion_enable_flag = get_L1_info(\"L2_fusion_enabled\")\r\n l1_fusion_enable_flag = get_L1_info(\"L1_fusion_enabled\")\r\n\r\n if (not l2_fusion_enable_flag) and (not l1_fusion_enable_flag):\r\n input_memory_type = 0\r\n valid_shape = []\r\n slice_offset = []\r\n l1_fusion_type = -1\r\n\r\n if input_memory_type not in (0, 1, 2):\r\n err_man.raise_err_input_mem_type(\"conv2d\", input_memory_type)\r\n if valid_shape and not slice_offset:\r\n err_man.raise_err_specific_user(\"conv2d\", \"if valid_shape exists \"\\\r\n + \"slice_offset can not be []\")\r\n\r\n fusion_para = {\"input_memory_type\": input_memory_type,\r\n \"output_memory_type\": \"fuse_flag\",\r\n \"valid_shape\": valid_shape, \"slice_offset\": slice_offset,\r\n \"l1_fusion_type\": l1_fusion_type, \\\r\n \"fmap_l1_addr_flag\": fmap_l1_addr_flag, \\\r\n \"fmap_l1_valid_size\": fmap_l1_valid_size}\r\n\r\n return fusion_para",
"def _execute_vector(self):\r\n \r\n self._result = self.current_vector.execute(self.formatted_args)",
"def build_fvectors(bag, data):\n\tX, y = [], []\n\tfor sample in data:\n\n\t\tsample_vector = []\n\n\t\tfor word in bag:\n\n\t\t\tsample_vector += [1] if word in sample[0] else [0] \n\n\t\tX.append(sample_vector)\n\t\ty.append(sample[1])\n\n\treturn X, y",
"def UnitVectorCalculator(atom1, atom2, molecule):\n vector1 = molecule[1][atom1]\n vector2 = molecule[1][atom2]\n lenght = distanceMatrix[atom1, atom2]\n return (vector2 - vector1)/lenght",
"def transformToFloatVector(*args):\n return _almathswig.transformToFloatVector(*args)",
"def _transform(self, vector, word):\n for w in word:\n vector = np.dot(vector, self._reflections[w])\n return vector",
"def calculate_ast_vector(d, use_weights=False):\n global ast_feature_list\n global ast_feature_weights\n weights = None if not use_weights else ast_feature_weights\n return calculate_vector(d, feature_names=ast_feature_list, feature_weights=weights)",
"def vectorize(seg_dir, delimiter=\"\\t\", split=\"train\", min_df=1,\n voc=None, verbose=True, store_dir=None):\n bunch = RawDataset()\n # fetch the negative samples.\n with open(os.path.join(seg_dir, split + \"_0.txt\"), \"r\", encoding=\"utf-8\") as f:\n line = f.readline()\n while line:\n text = line.split(delimiter)\n # text [data_idx, raw, word_segment]\n bunch.append(text[1], text[2], 0, int(text[0]))\n line = f.readline()\n # fetch the positive samples.\n with open(os.path.join(seg_dir, split + \"_1.txt\"), \"r\", encoding=\"utf-8\") as f:\n line = f.readline()\n while line:\n text = line.split(delimiter)\n # text [data_idx, raw, word_segment]\n bunch.append(text[1], text[2], 1, int(text[0]))\n line = f.readline()\n bunch.shuffle()\n\n print(\"Vectorization begin...\")\n if split == \"train\":\n tfidf = TfidfVectorizer(sublinear_tf=True, min_df=min_df)\n bunch.tdm = tfidf.fit_transform(bunch.contents)\n bunch.voc = tfidf.vocabulary_\n else:\n tfidf = TfidfVectorizer(sublinear_tf=True, vocabulary=voc)\n bunch.tdm = tfidf.fit_transform(bunch.contents)\n bunch.voc = voc\n print(\"Done.\")\n\n if verbose:\n print(\"\\nTDM Matrix shape: {}.\".format(bunch.tdm.shape))\n print(\"The 10 highest frequent words:\")\n pprint(sorted(bunch.voc.items(), key=lambda kv: (\n kv[1], kv[0]), reverse=True)[:10])\n\n if store_dir is not None:\n print(\"\\nStoring bunch object...\")\n with open(os.path.join(store_dir, split + \".bunch\"), \"wb\") as f:\n pickle.dump(bunch, f)\n print(\"Done.\")\n return bunch",
"def unitvec(atom1, atom2):\n #Final - initial; Atom 1 is the final\n vec = (atom1-atom2) / ((((atom1-atom2)**2).sum(axis = 0)**0.5))\n return vec",
"def _conv2d_fusion_para(inputs, outputs):\r\n input_memory_type = inputs.get(\"addr_type\") \\\r\n if \"addr_type\" in inputs else 0\r\n output_memory_type = outputs.get(\"addr_type\") \\\r\n if \"addr_type\" in outputs else 0\r\n valid_shape = inputs.get(\"valid_shape\") \\\r\n if \"valid_shape\" in inputs else ()\r\n slice_offset = inputs.get(\"slice_offset\") \\\r\n if \"slice_offset\" in inputs else ()\r\n l1_fusion_type = inputs.get(\"L1_fusion_type\") \\\r\n if \"L1_fusion_type\" in inputs else -1\r\n\r\n fmap_l1_addr_flag = inputs.get(\"L1_addr_flag\", \"nothing\")\r\n fmap_l1_valid_size = inputs.get(\"L1_valid_size\", -1)\r\n\r\n l1_fusion_enable_flag = get_L1_info(\"L1_fusion_enabled\")\r\n if not l1_fusion_enable_flag:\r\n l1_fusion_type = -1\r\n\r\n valid_shape = _shape_to_list(valid_shape)\r\n slice_offset = _shape_to_list(slice_offset)\r\n\r\n l2_fusion_enable_flag = get_L1_info(\"L2_fusion_enabled\")\r\n\r\n if not l2_fusion_enable_flag and (not l1_fusion_enable_flag):\r\n input_memory_type = 0\r\n output_memory_type = 0\r\n valid_shape = []\r\n slice_offset = []\r\n l1_fusion_type = -1\r\n\r\n if input_memory_type not in (0, 1, 2):\r\n err_man.raise_err_input_mem_type(\"conv2d\", input_memory_type)\r\n if output_memory_type not in (0, 1, 2):\r\n err_man.raise_err_output_mem_type(\"conv2d\", output_memory_type)\r\n if valid_shape and not slice_offset:\r\n err_man.raise_err_specific_user(\"conv2d\", \"if valid_shape exists \"\\\r\n + \"slice_offset can not be []\")\r\n\r\n fusion_para = {\"input_memory_type\": input_memory_type,\r\n \"output_memory_type\": output_memory_type,\r\n \"valid_shape\": valid_shape, \"slice_offset\": slice_offset, \\\r\n \"l1_fusion_type\": l1_fusion_type, \\\r\n \"fmap_l1_addr_flag\": fmap_l1_addr_flag, \\\r\n \"fmap_l1_valid_size\": fmap_l1_valid_size}\r\n\r\n return fusion_para",
"def fusion(self):\n fusion_params = copy(self.base_params)\n fusion_params.update(\n {\n \"app\": \"ConvertToBinary: Striping\",\n \"run_conversion\": False,\n \"dst_level\": None,\n \"dst_image_type\": \"fused\",\n }\n )\n self.run_halted_queue(\n fusion_params,\n [\n {\n \"first\": self.frame_chunks[0][\"first\"],\n \"last\": self.frame_chunks[-1][\"last\"],\n }\n ],\n )",
"def main():\n\t\n\t# create an argument parser\n\tparser = argparse.ArgumentParser(description=\"Add new tokens, update counts, and add new token vectors from a new document inventory\")\n\t\n\t# add arguments\n\tparser.add_argument(\"--tokens_path\", help=\"The path to token directory.\", default=\"data/tokens\")\n\tparser.add_argument(\"--documents_path\", help=\"The path to document directory.\", default=\"data/documents\")\n\tparser.add_argument(\"--documents_version\", help=\"The version of the documents database to load.\", type=int)\n\tparser.add_argument(\"--min_count\", help=\"The minimum number of times a token must appear to be added to the new database.\", type=int, default=5)\n\t\n\t# parse the arguments\n\targs = parser.parse_args()\n\t\n\t# resolve the documents version\n\tdocuments_version = args.documents_version\n\tif not documents_version:\n\t\tdocuments_version = DocumentDatabase.get_latest_version(args.documents_path)\n\t\n\t# load document database\n\tdocument_database = DocumentDatabase.load(args.documents_path, documents_version)\n\t\n\t# print setup information\n\tprint \"\"\n\tprint \"OBER - TOKEN FREQUENCY AND VECTOR GENERATION SCRIPT\"\n\tprint \"\"\n\tprint \"\"\n\tprint \"OLD TOKENS:\\t\\t%s\" % args.tokens_path\n\tprint \"DOCUMENTS:\\t\\t%s [VERSION: %d]\" % (args.documents_path, documents_version)\n\tprint \"MINIMUM COUNT ALLOWED:\\t\\t%d\" % args.min_count\n\tprint \"\"\n\tprint \"\"\n\t\n\t# count vocab\n\tprint(\"COUNTING VOCAB ...\")\n\tcounts = count_vocab(document_database)\n\t\n\t# sort vocab and remove words of less than the min count\n\tprint(\"SORTING VOCAB ...\")\n\t# filter by count\n\tcounts = { token: counts[token] for token in counts if counts[token] >= args.min_count }\n\t# sort by count ascending\n\tcounts = sorted(counts.items(), key=operator.itemgetter(1))\n\t# reverse to get descending\n\tcounts.reverse()\n\t\n\t# load old token database and vectors\n\tprint(\"LOADING OLD TOKEN DATABASE ...\")\n\told_token_database = TokenDatabase.load(db_path=args.tokens_path)\n\t# save the old vectors\n\told_vectors = old_token_database.get_vectors()\n\t\n\t# create new TokenDatabase with same vector size and increment the version\n\tprint(\"CREATING NEW TOKEN DATABASE ...\")\n\tnew_token_database = TokenDatabase(vector_size=old_token_database.vector_size, version=old_token_database.version + 1)\n\t\n\t# add all vocabulary\n\tprint(\"TRANSFERING VOCABULARY ...\")\n\t# loop through each token and add to the new database\n\tfor token in counts:\n\t\tnew_token_database.add_token(token[0], count=token[1])\n\t\t\n\t# create new vectors\n\tprint(\"GENERATING NEW VECTORS ...\")\n\tnew_token_database.generate_random_vectors()\n\t# save the new vectors\n\tnew_vectors = new_token_database.get_vectors()\n\t\n\t# copy over any existing vectors from previous version\n\tprint(\"TRANSFERING EXISTING VECTORS ...\")\n\t# loop through each token in the new database\n\tfor token, _ in counts:\n\t\t# check if it is in the old database as well\n\t\tif token in old_token_database:\n\t\t\t# if it is, copy over the token vector using the token ids\n\t\t\tnew_vectors[new_token_database.encode_token(token)] = old_vectors[old_token_database.encode_token(token)]\n\t\t\t\n\tprint(\"SAVING ...\")\t\t\n\t\n\t# update vectors\n\tnew_token_database.update_vectors(new_vectors)\n\t\n\t# save (set new flags to false because we have already set the correct versions before)\n\tnew_token_database.save(new_version=False, new_vectors_version=False)",
"def create_tfidf_vectors():\n vector_list = {}\n vector_magnitude = {}\n for file,tokens in tf.items():\n \n \"\"\"calculates raw tf-idf\n For a given dict of tokens we extract keys using tokens.keys()\n Using Lambda we calculate tf-idf for each token in the tokens dict\n and then return a key:value pair dict\n where key -> token name , value -> un normalized tf-idf and store in vector_list\"\"\"\n vector_list[file] = dict(map(lambda token : (token,(1+log10(tokens[token]))*getidf(token)) ,tokens.keys()))\n \n \"\"\"calculates file magnitude\n Form the calculated vector_list using vector_list[file].values() \n Using Lambda we calculate magnitude of the each document\n and then return a key:value pair dict\n where key -> file name , value -> magnitude of the file\"\"\"\n vector_magnitude[file] = (sqrt(sum(map(lambda value : value * value ,vector_list[file].values()))))\n \n tfidf_vectors[file] = Counter()\n \n #normalization of each token with respect document in which they are present\n for token in vector_list[file]:\n tfidf_vectors[file][token] = vector_list[file][token] / vector_magnitude[file]",
"def UnitVector(vector):\n mag = Magnitude(vector)\n mult = 1./mag\n x,y,z = vector\n return (x*mult, y*mult, z*mult)",
"def update(self, *args):\n return _vnl_vectorPython.vnl_vectorF_update(self, *args)",
"def print_term_weigth_vector(twv):\n #\n # Program something here\n #\n pass",
"def get_embedding_variance(fusion, verbose=True):\n\n # we calculate embedding with consistent # of dimensions so we can average\n # across them (if left to be \"random\" we get different #s)\n embeddings, results = zip(*[\n utils.dme(net, n_components=10, return_result=True) for net in fusion\n ])\n\n # align embeddings w/generalized Procrustes and average across embeddings\n realigned, xfms = align.iterative_alignment([e for e in embeddings])\n embedding = np.mean(realigned, axis=0)\n\n # normalize lambdas based on sum of lambdas from all components and check\n # variance explained by first five components\n if verbose:\n lambdas = [res['lambdas'] for res in results]\n varexp = [np.sum(var[:5] / var.sum()) for var in lambdas]\n mvar, sdvar = np.mean(varexp) * 100, np.std(varexp, ddof=1) * 100\n print('Variance explained by 5 components: '\n f'{mvar:.2f}% +/- {sdvar:.2f}%')\n\n return embedding, realigned",
"def create_f_vector(x, y, simplices, func_source, d=2, source_dict=dict()):\n triangles = mesh.all_triangles(simplices, x, y)\n f = np.zeros(d*x.size)\n for tri, simplex in zip(triangles, simplices):\n ind = get_global_indices(simplex, d)\n f_tri = func_source(tri, **source_dict)\n f[ind] += f_tri\n return f"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds a shortest path from start to end using the provided edges | def shortest_path(edges, start, end):
# generate the graph
graph = {}
for edge in edges:
s = edge['start']
e = edge['end']
if s in graph:
graph[s].append(e)
else:
graph[s] = [e]
# build up a queue for BFS
path_total = []
# append initial node to the queue
# path_total.append([{'start':start}])
path_total.append([start])
# build up a set for recording if the node has been visited
visited = set()
# record the curent index
current_i = 0
while current_i < len(path_total):
# print(path_total, visited)
path = path_total[current_i]
# current_pos = path[-1]['end'] if len(path[-1]) > 1 else path[-1]['start']
current_pos = path[-1]
if current_pos not in visited and current_pos in graph:
visited.add(current_pos)
edges_from_current = graph[current_pos]
for source in edges_from_current:
if source in visited:
continue
else:
shortest = path.copy()
# shortest.append({'start':current_pos, 'end':source})
shortest.append(source)
# path.append(source)
# if reach the target node, return the path
if source == end:
# print(shortest)
result = convert(shortest)
# return shortest[1:]
return result
path_total.append(shortest)
current_i += 1
return None | [
"def shortest_path_no_lefts(edges, start, end):\n # build up the graph; the key is an edge, the value is a list of edges connecting to it\n graph = {}\n current_start_nodes = {}\n current_end_nodes = {}\n for edge in edges:\n s = edge['start']\n e = edge['end']\n # v1 = (e[0] - s[0], e[1] - s[1])\n if s in current_start_nodes:\n current_start_nodes[s].add((s, e))\n else:\n current_start_nodes[s] = set([(s, e)])\n if e in current_end_nodes:\n current_end_nodes[e].add((s, e))\n else:\n current_end_nodes[e] = set([(s, e)])\n \n if (s, e) not in graph:\n graph[(s, e)] = []\n if e in current_start_nodes:\n for s1, e1 in current_start_nodes[e]:\n graph[(s, e)].append((s1, e1))\n if s in current_end_nodes:\n for s0, e0 in current_end_nodes[s]:\n graph[(s0, e0)].append((s, e))\n# print(len(graph))\n \n # find the edges connecting to starting point\n start_trans = []\n for edge in graph:\n if edge[0] == start:\n start_trans.append(edge)\n# print(start_trans)\n \n # explore the graph from each edge conntecting to the starting node\n for i in start_trans:\n# print('xxxxxxxxxxxxxx')\n# print(i)\n path_total = []\n path_total.append([i])\n current_i = 0\n visited = set()\n while current_i < len(path_total):\n path = path_total[current_i]\n current_trans = path[-1]\n # obtain the vector representing the current edge\n v_current = (current_trans[1][0] - current_trans[0][0], current_trans[1][1] - current_trans[0][1])\n # if the current edge is not visited\n if current_trans not in visited and current_trans in graph:\n visited.add(current_trans)\n trans_from_current = graph[current_trans]\n # for each neighboring edges of current edge\n for edge in trans_from_current:\n if edge in visited:\n continue\n # if the edge is not visited, obtain the vector representing this edge\n v_edge = (edge[1][0] - edge[0][0], edge[1][1] - edge[0][1])\n # determine if the direction is valid\n d = direction(v_current, v_edge)\n if d == 'left' or d == 'U turn':\n continue\n shortest = path.copy()\n shortest.append(edge)\n if edge[1] == end:\n# print('yyyyyyyyyyyes')\n return [{'start':e[0],'end':e[1]} for e in shortest]\n path_total.append(shortest) \n current_i += 1\n \n return None",
"def shortest_path(graph, start, end):\n\n distances, predecessors = dijkstra(graph, start, end)\n path = []\n while 1:\n path.append(end)\n if end == start: break\n end = predecessors[end]\n path.reverse()\n return path",
"def nodes_and_edges_to_path(nodes, edges):\n out = []\n for i in range(len(nodes)-1):\n out.append(nodes[i])\n out.append(edges[i])\n else:\n out.append(nodes[-1])\n return out",
"def shortestPath(self, start, end):\n D, P = self.Dijkstra(start, end)\n Path = []\n while 1:\n Path.append(end)\n if end == start: break\n end = P[end]\n Path.reverse()\n return Path",
"def shortest_path(graph, start_node=1):\n search_queue = []\n start_vertex = graph.get_vertex(start_node)\n start_vertex.update_distance(0)\n search_queue.append(start_vertex)\n start_vertex.mark_explored()\n while len(search_queue) > 0:\n active_node = search_queue.pop(0)\n for edge in active_node.get_edges():\n linked_vertex = graph.get_vertex(edge)\n if not linked_vertex.get_nodes()[0][1]:\n linked_vertex.mark_explored()\n linked_vertex.update_distance(active_node.get_nodes()[0][2] + 1)\n search_queue.append(linked_vertex)",
"def _compute_all_shortest_paths(graph, source, target, exclude_edge=False):",
"def numShortestPaths(g, start, end):\n\n for node in [start, end]:\n # raises exception when any of the inputs are None\n if g is None or node is None:\n raise InvalidInputException(\"Invlaid input\")\n # raises exception when start or end nodes are not in g.\n if g.containsVertex(node) == False:\n \traise InvalidInputException(\"Invalid input\")\n\n for v in g.vertices():\n # sets distance of all vertices to a large value.\n v.distance = float('inf') \n\n start.distance = 0 # number of nodes away from start node.\n start.paths = 1 # number of paths there are from start node.\n\n order = []\n order.append(start)\n\n while order is not None:\n # BFS\n visited = order.pop(0)\n\n for edge in g.incidentEdges(visited):\n adjacent = g.opposite(visited, edge)\n order.append(adjacent)\n g.removeEdge(edge)\n\n if(adjacent.distance > visited.distance +1):\n # updates adjacent node's distance\n adjacent.distance = visited.distance +1\n # updates adjacent node's number of paths\n adjacent.paths = visited.paths\n\n elif(adjacent.distance == visited.distance +1):\n adjacent.paths += visited.paths\n\n # when end node is visited, returns the number of paths \n # stored in the end node.\n if(visited == end):\n return visited.paths",
"def all_shortest_paths(self,start_node, end_node):\n\n distance = self.min_dist(start_node,end_node)\n path = self.all_paths(start_node,end_node,distance,[])\n return path",
"def yens_shortest_paths(G, start, target, max_paths=10):\n letters = list(string.ascii_letters)\n shortestPaths = []\n k = 0\n try:\n paths = list(itertools.islice(nx.shortest_simple_paths(G, start, target), max_paths))\n except Exception:\n raise PyeMapShortestPathException(\"No paths between \" + str(start) + \" and \" + str(target) + \" were found.\")\n for k in range(0, len(paths)):\n path = paths[k]\n sum = 0\n weights = []\n for i in range(0, len(path) - 1): # sum up edge weights\n sum += (G[path[i]][path[i + 1]]['weight'])\n weights.append(G[path[i]][path[i + 1]]['weight'])\n path = ShortestPath(path, weights, sum)\n shortestPaths.append(path)\n shortestPaths = sorted(shortestPaths)\n for i in range(0, len(shortestPaths)):\n path = shortestPaths[i].path\n if i == 0: # shortest path gets bolder edges\n for j in range(len(path) - 1):\n G[path[j]][path[j + 1]]['penwidth'] = 6.0\n G[path[j]][path[j + 1]]['style'] = 'solid'\n G.nodes[path[j]]['penwidth'] = 6.0\n G.nodes[path[j + 1]]['penwidth'] = 6.0\n G[path[j]][path[j + 1]]['color'] = '#778899FF'\n # make the nodes look opaque if they are connected to the source\n if len(G.nodes[path[j]]['fillcolor']) != 9:\n G.nodes[path[j]]['fillcolor'] += 'FF'\n G.nodes[path[j]]['color'] = '#708090FF'\n if len(G.nodes[path[j + 1]]['fillcolor']) != 9:\n G.nodes[path[j + 1]]['fillcolor'] += 'FF'\n G.nodes[path[j + 1]]['color'] = '#708090FF'\n else:\n for j in range(len(path) - 1):\n G[path[j]][path[j + 1]]['penwidth'] = 6.0\n G[path[j]][path[j + 1]]['style'] = 'solid'\n G.nodes[path[j]]['penwidth'] = 6.0\n G.nodes[path[j + 1]]['penwidth'] = 6.0\n if G[path[j]][path[j + 1]]['color'] != '#778899FF':\n G[path[j]][path[j + 1]]['color'] = '#7788997F'\n # make the nodes look opaque if they are connected to the source\n if len(G.nodes[path[j]]['fillcolor']) != 9:\n G.nodes[path[j]]['fillcolor'] += '7F'\n G.nodes[path[j]]['color'] = '#7080907F'\n if len(G.nodes[path[j + 1]]['fillcolor']) != 9:\n G.nodes[path[j + 1]]['fillcolor'] += '7F'\n G.nodes[path[j + 1]]['color'] = '#7080907F'\n shortestPaths[i].set_id(\"1\" + letters[i])\n br = Branch(1, shortestPaths[0].path[-1])\n for pt in shortestPaths:\n br.add_path(pt)\n return [br]",
"def shortest_path(self):\n\n start = [v for v in self.nodes if self.nodes[v].get('start', False)]\n if len(start) != 1: raise ValueError(\"graph does not have exactly one start node\")\n frontier = collections.deque(start)\n pred = {start[0]: None}\n while len(frontier) > 0:\n u = frontier.popleft()\n if self.nodes[u].get('accept', False):\n path = []\n while u is not None:\n path.append(u)\n u = pred[u]\n path.reverse()\n return path\n for v in self.edges.get(u, ()):\n if v not in pred:\n frontier.append(v)\n pred[v] = u\n raise ValueError(\"graph does not have an accepting path\")",
"def find_shortest_path(graph):\n if len(graph) == 0:\n return\n source = min(graph.nodes())\n end = max(graph.nodes())\n return nx.dijkstra_path(graph, source=source, target=end)",
"def shortest_path(self, source, target):\n if source not in self.d or target not in self.d:\n raise KeyError(\"one or both nodes are not in Graph\")\n path = dict()\n visited = []\n queue = [source]\n marked = {source}\n #include endpoints in FINAL_PATH\n FINAL_PATH = []\n currentNode = source\n\n #### Perform the search\n while currentNode is not target:\n currentNode = queue.pop(0)\n neighborNodeSet = self.d.get(currentNode)\n #add neighbors of currentNodvisited.append(currentNode)e to Queue if they aren't in marked,\n #add the neighbors of currentNode to the marked list if they aren't in it already\n # pdb.set_trace()\n for i in neighborNodeSet:\n if i not in marked:\n marked.add(i)\n queue.append(i)\n #when a element goes into marked, add it to the path dictionary backwards\n # I could use add attribute for the path dictionary, not update, if I don't want it to overwrite which node arrived first to a given node\n path.update({i:currentNode})\n\n #add currentNode to visited and reset currentNode\n visited.append(currentNode)\n # print(path)\n\n #### Evaluate the search using the dictionary\n currentNode = target\n while True:\n if currentNode == source:\n FINAL_PATH.append(currentNode)\n break\n FINAL_PATH.append(currentNode)\n currentNode = path.get(currentNode)\n\n #reverse the path\n return FINAL_PATH[::-1]",
"def dijkstra(self,source:int,end:int,delivery_path:tuple) -> tuple:\r\n delivery_path_used = False\r\n self.vertices[source].cost = 0\r\n discovered = MinHeap(self.vertices) #create MinHeap and add all vertices into MinHeap\r\n discovered.rise(discovered.indx[source]) #rise the source vertex\r\n while (len(discovered)) > 0 :\r\n u = discovered.serve() \r\n\r\n if u.id == end: #reached our end, terminate early\r\n path,delivery_path_used = self.get_path(source,end,delivery_path) #backtrack to get path \r\n return (u.cost,path,delivery_path_used)\r\n\r\n u.visited = True\r\n for edge in u.edges: #edge relaxation\r\n v = self.vertices[edge.v]\r\n if v.discovered == False:\r\n v.discovered = True\r\n v.cost = u.cost + edge.w\r\n v.previous = u\r\n position = discovered.indx[v.id] #calculate positon of vertex v in heap\r\n discovered.rise(position)\r\n elif v.visited == False:\r\n if v.cost > u.cost + edge.w:\r\n v.cost = u.cost + edge.w\r\n v.previous = u\r\n position = discovered.indx[v.id] #calculate positon of vertex v in heap\r\n discovered.rise(position) \r\n \r\n path,delivery_path_used = self.get_path(source,end,delivery_path) #backtrack to get path\r\n return (self.vertices[end].cost, path, delivery_path_used)",
"def shortest_unweighted_path(graph, start, end):\n\n from queue import Queue\n\n node_distance = Queue()\n node_distance.put((start, 0))\n visited = set()\n\n while not node_distance.empty():\n current_node, current_distance = node_distance.get()\n\n if current_node in visited:\n continue\n\n visited.add(current_node)\n\n if current_node == end:\n return current_distance\n\n for new_node in graph[current_node]:\n node_distance.put((new_node, current_distance + 1))\n\n return -1",
"def get_path_between_nodes(graph, start, end, path=None):\n if path is None:\n path = []\n path = path + [start]\n if start == end:\n return path\n if start not in graph:\n return None\n for node in graph[start]:\n if node not in path:\n new_path = get_path_between_nodes(graph, node, end, path)\n if new_path:\n return new_path\n return None",
"def shortest_path(start, end):\n\n moves = rubik.quarter_twists\n\n parentS = {}\n parentE = {}\n parentS[start] = None\n parentE[end] = None\n\n start_current_positions = set()\n end_current_positions = set()\n start_current_positions.add(start)\n end_current_positions.add(end)\n\n if end in parentS:\n return get_moves(parentS, parentE, end)\n\n for i in range(7):\n start_next_positions = set()\n for position in start_current_positions:\n for move in moves:\n next_position = rubik.perm_apply(move, position)\n if next_position not in parentS:\n parentS[next_position] = (position, move)\n start_next_positions.add(next_position)\n if next_position in parentE:\n return get_moves(parentS,\n parentE,\n next_position)\n \n start_current_positions = start_next_positions\n end_next_positions = set()\n for position in end_current_positions:\n for move in moves:\n next_position = rubik.perm_apply(move, position)\n if next_position not in parentE:\n parentE[next_position] = (position, move)\n end_next_positions.add(next_position)\n if next_position in parentS:\n return get_moves(parentS,\n parentE,\n next_position)\n\n end_current_positions = end_next_positions\n\n return None",
"def shortest_path_k_lefts(edges, start, end, k):\n # build up the graph; the key is an edge, the value is a list of edges connecting to it\n graph = {}\n current_start_nodes = {}\n current_end_nodes = {}\n for edge in edges:\n s = edge['start']\n e = edge['end']\n # v1 = (e[0] - s[0], e[1] - s[1])\n if s in current_start_nodes:\n current_start_nodes[s].add((s, e))\n else:\n current_start_nodes[s] = set([(s, e)])\n if e in current_end_nodes:\n current_end_nodes[e].add((s, e))\n else:\n current_end_nodes[e] = set([(s, e)])\n \n if (s, e) not in graph:\n graph[(s, e)] = []\n if e in current_start_nodes:\n for s1, e1 in current_start_nodes[e]:\n graph[(s, e)].append((s1, e1))\n if s in current_end_nodes:\n for s0, e0 in current_end_nodes[s]:\n graph[(s0, e0)].append((s, e))\n# print(len(graph))\n \n start_trans = []\n for edge in graph:\n if edge[0] == start:\n start_trans.append((edge, k))\n# print(start_trans)\n \n # initialize the shortest path and its length\n current_shortest = None\n current_len = float('inf')\n for start_edge in start_trans:\n# print('xxxxxxxxxxxxxx')\n# print(i)\n path_total = [[start_edge]]\n current_i = 0\n visited = {}\n while current_i < len(path_total):\n path = path_total[current_i]\n current_trans, current_k = path[-1]\n# print('current')\n# print(current_trans)\n v_current = (current_trans[1][0] - current_trans[0][0], current_trans[1][1] - current_trans[0][1])\n # if the edge is not visited, condition is true\n if current_trans not in visited:\n cond = True\n # if the edge is visited but reaching it with less left turns, contidion is true\n elif visited[current_trans] < current_k:\n cond = True\n else:\n cond = False\n \n # if condition is true\n if cond and current_trans in graph:\n visited[current_trans] = current_k\n # visited_set.add(path[-1])\n trans_from_current = graph[current_trans]\n for edge in trans_from_current:\n # record how many remaining left turns \n new_k = current_k\n# print('edge')\n# print(edge)\n# print('x' + 1)\n # if edge in visited:\n # continue\n v_edge = (edge[1][0] - edge[0][0], edge[1][1] - edge[0][1])\n d = direction(v_current, v_edge)\n if d == 'U turn':\n continue\n \n # if the direction is left, see if there is remaining left turns, if yes, turn left\n if d == 'left':\n if current_k < 1:\n continue\n else:\n new_k -= 1\n shortest = path.copy()\n shortest.append((edge, new_k))\n if edge[1] == end:\n# print('yyyyyyyyyyyes')\n # print([{'start':e[0][0],'end':e[0][1]} for e in shortest])\n if len(shortest) < current_len:\n current_len = len(shortest)\n current_shortest = shortest\n # return [{'start':e[0][0],'end':e[0][1]} for e in shortest]\n path_total.append(shortest) \n current_i += 1\n \n if current_shortest != None:\n return [{'start':e[0][0],'end':e[0][1]} for e in current_shortest] \n else:\n return",
"def shortest_path(map: Map, start: int, goal: int) -> list:\n paths = list()\n path_goal_min_val = float('inf')\n path_goal_min = None\n\n # Check if already in goal\n if start == goal:\n return [start]\n\n # Initialize paths\n goal_initial_distance = estimated_distance(path_frontier_point=map.intersections[start],\n goal_point=map.intersections[goal])\n path = Path(Cost(goal_initial_distance, 0, goal_initial_distance), [start], start, start)\n heapq.heappush(paths, path)\n\n while len(paths) >= 1:\n nearest_frontier_path = heapq.heappop(paths)\n for neighbor_road in map.roads[nearest_frontier_path.frontier]:\n\n if neighbor_road == nearest_frontier_path.previous: # Avoid returning to backwards\n continue\n else: # Continue\n\n new_path = update_path(map=map, path=nearest_frontier_path, new_frontier=neighbor_road, goal=goal)\n\n if neighbor_road == goal: # Reached destination with a path\n if new_path.cost.total < path_goal_min_val: # Better than previous path\n path_goal_min_val = new_path.cost.total\n path_goal_min = new_path.intersections\n else: # Reached destination, with higher cost -> disregard\n pass\n else:\n if path_goal_min is not None: # Already found the goal with a path\n if new_path.cost.total >= path_goal_min_val: # Path not reached goal and already costly\n pass\n else: # Cheaper path, keep exploring\n heapq.heappush(paths, new_path)\n else: # Not yet found the goal, keep exploring\n heapq.heappush(paths, new_path)\n\n if path_goal_min is not None:\n return path_goal_min\n else:\n return -1",
"def directedDFS(digraph, start, end, maxTotalDist, maxDistOutdoors):\n #TODO\n paths = findAllPaths(digraph, Node(start), Node(end), maxTotalDist, maxDistOutdoors,path=[],paths=[],shortestD = 1e20)\n if paths == []:\n raise ValueError\n else:\n #选择best_path\n x = [calPathDistance(digraph,path)[0] for path in paths]\n bestpath = paths[x.index(min(x))]\n for i in range(len(bestpath)):\n bestpath[i] = str(bestpath[i])\n return bestpath"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds a shortest path without any left turns that goes from start to end using the provided edges. (reversing turns are also not allowed) | def shortest_path_no_lefts(edges, start, end):
# build up the graph; the key is an edge, the value is a list of edges connecting to it
graph = {}
current_start_nodes = {}
current_end_nodes = {}
for edge in edges:
s = edge['start']
e = edge['end']
# v1 = (e[0] - s[0], e[1] - s[1])
if s in current_start_nodes:
current_start_nodes[s].add((s, e))
else:
current_start_nodes[s] = set([(s, e)])
if e in current_end_nodes:
current_end_nodes[e].add((s, e))
else:
current_end_nodes[e] = set([(s, e)])
if (s, e) not in graph:
graph[(s, e)] = []
if e in current_start_nodes:
for s1, e1 in current_start_nodes[e]:
graph[(s, e)].append((s1, e1))
if s in current_end_nodes:
for s0, e0 in current_end_nodes[s]:
graph[(s0, e0)].append((s, e))
# print(len(graph))
# find the edges connecting to starting point
start_trans = []
for edge in graph:
if edge[0] == start:
start_trans.append(edge)
# print(start_trans)
# explore the graph from each edge conntecting to the starting node
for i in start_trans:
# print('xxxxxxxxxxxxxx')
# print(i)
path_total = []
path_total.append([i])
current_i = 0
visited = set()
while current_i < len(path_total):
path = path_total[current_i]
current_trans = path[-1]
# obtain the vector representing the current edge
v_current = (current_trans[1][0] - current_trans[0][0], current_trans[1][1] - current_trans[0][1])
# if the current edge is not visited
if current_trans not in visited and current_trans in graph:
visited.add(current_trans)
trans_from_current = graph[current_trans]
# for each neighboring edges of current edge
for edge in trans_from_current:
if edge in visited:
continue
# if the edge is not visited, obtain the vector representing this edge
v_edge = (edge[1][0] - edge[0][0], edge[1][1] - edge[0][1])
# determine if the direction is valid
d = direction(v_current, v_edge)
if d == 'left' or d == 'U turn':
continue
shortest = path.copy()
shortest.append(edge)
if edge[1] == end:
# print('yyyyyyyyyyyes')
return [{'start':e[0],'end':e[1]} for e in shortest]
path_total.append(shortest)
current_i += 1
return None | [
"def shortest_path(edges, start, end):\n # generate the graph\n graph = {}\n for edge in edges:\n s = edge['start']\n e = edge['end']\n if s in graph:\n graph[s].append(e)\n else:\n graph[s] = [e]\n # build up a queue for BFS\n path_total = []\n # append initial node to the queue\n# path_total.append([{'start':start}])\n path_total.append([start])\n # build up a set for recording if the node has been visited\n visited = set()\n # record the curent index\n current_i = 0\n while current_i < len(path_total):\n# print(path_total, visited)\n path = path_total[current_i] \n# current_pos = path[-1]['end'] if len(path[-1]) > 1 else path[-1]['start']\n current_pos = path[-1]\n if current_pos not in visited and current_pos in graph:\n visited.add(current_pos)\n edges_from_current = graph[current_pos]\n for source in edges_from_current:\n if source in visited:\n continue\n else:\n shortest = path.copy()\n# shortest.append({'start':current_pos, 'end':source})\n shortest.append(source)\n# path.append(source)\n # if reach the target node, return the path\n if source == end:\n# print(shortest)\n result = convert(shortest)\n# return shortest[1:]\n return result\n path_total.append(shortest)\n current_i += 1\n \n return None",
"def nodes_and_edges_to_path(nodes, edges):\n out = []\n for i in range(len(nodes)-1):\n out.append(nodes[i])\n out.append(edges[i])\n else:\n out.append(nodes[-1])\n return out",
"def shortest_path(graph, start, end):\n\n distances, predecessors = dijkstra(graph, start, end)\n path = []\n while 1:\n path.append(end)\n if end == start: break\n end = predecessors[end]\n path.reverse()\n return path",
"def shortest_path_k_lefts(edges, start, end, k):\n # build up the graph; the key is an edge, the value is a list of edges connecting to it\n graph = {}\n current_start_nodes = {}\n current_end_nodes = {}\n for edge in edges:\n s = edge['start']\n e = edge['end']\n # v1 = (e[0] - s[0], e[1] - s[1])\n if s in current_start_nodes:\n current_start_nodes[s].add((s, e))\n else:\n current_start_nodes[s] = set([(s, e)])\n if e in current_end_nodes:\n current_end_nodes[e].add((s, e))\n else:\n current_end_nodes[e] = set([(s, e)])\n \n if (s, e) not in graph:\n graph[(s, e)] = []\n if e in current_start_nodes:\n for s1, e1 in current_start_nodes[e]:\n graph[(s, e)].append((s1, e1))\n if s in current_end_nodes:\n for s0, e0 in current_end_nodes[s]:\n graph[(s0, e0)].append((s, e))\n# print(len(graph))\n \n start_trans = []\n for edge in graph:\n if edge[0] == start:\n start_trans.append((edge, k))\n# print(start_trans)\n \n # initialize the shortest path and its length\n current_shortest = None\n current_len = float('inf')\n for start_edge in start_trans:\n# print('xxxxxxxxxxxxxx')\n# print(i)\n path_total = [[start_edge]]\n current_i = 0\n visited = {}\n while current_i < len(path_total):\n path = path_total[current_i]\n current_trans, current_k = path[-1]\n# print('current')\n# print(current_trans)\n v_current = (current_trans[1][0] - current_trans[0][0], current_trans[1][1] - current_trans[0][1])\n # if the edge is not visited, condition is true\n if current_trans not in visited:\n cond = True\n # if the edge is visited but reaching it with less left turns, contidion is true\n elif visited[current_trans] < current_k:\n cond = True\n else:\n cond = False\n \n # if condition is true\n if cond and current_trans in graph:\n visited[current_trans] = current_k\n # visited_set.add(path[-1])\n trans_from_current = graph[current_trans]\n for edge in trans_from_current:\n # record how many remaining left turns \n new_k = current_k\n# print('edge')\n# print(edge)\n# print('x' + 1)\n # if edge in visited:\n # continue\n v_edge = (edge[1][0] - edge[0][0], edge[1][1] - edge[0][1])\n d = direction(v_current, v_edge)\n if d == 'U turn':\n continue\n \n # if the direction is left, see if there is remaining left turns, if yes, turn left\n if d == 'left':\n if current_k < 1:\n continue\n else:\n new_k -= 1\n shortest = path.copy()\n shortest.append((edge, new_k))\n if edge[1] == end:\n# print('yyyyyyyyyyyes')\n # print([{'start':e[0][0],'end':e[0][1]} for e in shortest])\n if len(shortest) < current_len:\n current_len = len(shortest)\n current_shortest = shortest\n # return [{'start':e[0][0],'end':e[0][1]} for e in shortest]\n path_total.append(shortest) \n current_i += 1\n \n if current_shortest != None:\n return [{'start':e[0][0],'end':e[0][1]} for e in current_shortest] \n else:\n return",
"def _compute_all_shortest_paths(graph, source, target, exclude_edge=False):",
"def spanning_edges(G, start):\n visited = set()\n todo = [(start, None)]\n span = set()\n\n while todo:\n (cur, e) = todo.pop()\n\n if cur in visited: continue\n\n visited.add(cur)\n if e: span.add(e)\n\n for n in G.adj_to(cur):\n if n not in visited:\n todo.append((n, (cur, n)))\n\n return span",
"def shortest_path(graph, start_node=1):\n search_queue = []\n start_vertex = graph.get_vertex(start_node)\n start_vertex.update_distance(0)\n search_queue.append(start_vertex)\n start_vertex.mark_explored()\n while len(search_queue) > 0:\n active_node = search_queue.pop(0)\n for edge in active_node.get_edges():\n linked_vertex = graph.get_vertex(edge)\n if not linked_vertex.get_nodes()[0][1]:\n linked_vertex.mark_explored()\n linked_vertex.update_distance(active_node.get_nodes()[0][2] + 1)\n search_queue.append(linked_vertex)",
"def shortest_unweighted_path(graph, start, end):\n\n from queue import Queue\n\n node_distance = Queue()\n node_distance.put((start, 0))\n visited = set()\n\n while not node_distance.empty():\n current_node, current_distance = node_distance.get()\n\n if current_node in visited:\n continue\n\n visited.add(current_node)\n\n if current_node == end:\n return current_distance\n\n for new_node in graph[current_node]:\n node_distance.put((new_node, current_distance + 1))\n\n return -1",
"def shortestPath(self, start, end):\n D, P = self.Dijkstra(start, end)\n Path = []\n while 1:\n Path.append(end)\n if end == start: break\n end = P[end]\n Path.reverse()\n return Path",
"def shortest_path(self):\n\n start = [v for v in self.nodes if self.nodes[v].get('start', False)]\n if len(start) != 1: raise ValueError(\"graph does not have exactly one start node\")\n frontier = collections.deque(start)\n pred = {start[0]: None}\n while len(frontier) > 0:\n u = frontier.popleft()\n if self.nodes[u].get('accept', False):\n path = []\n while u is not None:\n path.append(u)\n u = pred[u]\n path.reverse()\n return path\n for v in self.edges.get(u, ()):\n if v not in pred:\n frontier.append(v)\n pred[v] = u\n raise ValueError(\"graph does not have an accepting path\")",
"def dropEdge(graph={}, startnode='',endnode=''):\r\n graph1 = graph\r\n graph2 = {startnode:[endnode]}\r\n return differenceGraphs(graph1, graph2)",
"def all_simple_paths(graph, start_node, end_node):\n return set(_gen(graph, start_node, end_node))",
"def numShortestPaths(g, start, end):\n\n for node in [start, end]:\n # raises exception when any of the inputs are None\n if g is None or node is None:\n raise InvalidInputException(\"Invlaid input\")\n # raises exception when start or end nodes are not in g.\n if g.containsVertex(node) == False:\n \traise InvalidInputException(\"Invalid input\")\n\n for v in g.vertices():\n # sets distance of all vertices to a large value.\n v.distance = float('inf') \n\n start.distance = 0 # number of nodes away from start node.\n start.paths = 1 # number of paths there are from start node.\n\n order = []\n order.append(start)\n\n while order is not None:\n # BFS\n visited = order.pop(0)\n\n for edge in g.incidentEdges(visited):\n adjacent = g.opposite(visited, edge)\n order.append(adjacent)\n g.removeEdge(edge)\n\n if(adjacent.distance > visited.distance +1):\n # updates adjacent node's distance\n adjacent.distance = visited.distance +1\n # updates adjacent node's number of paths\n adjacent.paths = visited.paths\n\n elif(adjacent.distance == visited.distance +1):\n adjacent.paths += visited.paths\n\n # when end node is visited, returns the number of paths \n # stored in the end node.\n if(visited == end):\n return visited.paths",
"def shortest_path(start, end):\n\n moves = rubik.quarter_twists\n\n parentS = {}\n parentE = {}\n parentS[start] = None\n parentE[end] = None\n\n start_current_positions = set()\n end_current_positions = set()\n start_current_positions.add(start)\n end_current_positions.add(end)\n\n if end in parentS:\n return get_moves(parentS, parentE, end)\n\n for i in range(7):\n start_next_positions = set()\n for position in start_current_positions:\n for move in moves:\n next_position = rubik.perm_apply(move, position)\n if next_position not in parentS:\n parentS[next_position] = (position, move)\n start_next_positions.add(next_position)\n if next_position in parentE:\n return get_moves(parentS,\n parentE,\n next_position)\n \n start_current_positions = start_next_positions\n end_next_positions = set()\n for position in end_current_positions:\n for move in moves:\n next_position = rubik.perm_apply(move, position)\n if next_position not in parentE:\n parentE[next_position] = (position, move)\n end_next_positions.add(next_position)\n if next_position in parentS:\n return get_moves(parentS,\n parentE,\n next_position)\n\n end_current_positions = end_next_positions\n\n return None",
"def traverse_graph(G, start, nextn):\n start_coords = array([G.node[start]['x'], G.node[start]['y']])\n nodes_visited = [start]\n nodes_visited_set = set()\n edges_visited = []\n coords = [start_coords]\n\n prev = start\n cur = nextn\n\n while cur != start:\n cur_coords = array([G.node[cur]['x'], G.node[cur]['y']])\n # We ignore all neighbors we alreay visited to avoid multiple loops\n\n neighs = [n for n in G.neighbors(cur) if n != prev and n != cur]\n\n edges_visited.append((prev, cur))\n nodes_visited.append(cur)\n coords.append(cur_coords)\n\n n_neighs = len(neighs)\n if n_neighs > 1:\n # Choose path that keeps the loop closest on the left hand side\n prev_coords = array([G.node[prev]['x'], G.node[prev]['y']])\n neigh_coords = array([[G.node[n]['x'], G.node[n]['y']] \\\n for n in neighs])\n\n ## Construct vectors and normalize\n u = cur_coords - prev_coords\n vs = neigh_coords - cur_coords\n\n # calculate cos and sin between direction vector and neighbors\n u /= sqrt((u*u).sum(-1))\n vs /= sqrt((vs*vs).sum(-1))[...,newaxis]\n\n coss = dot(u, vs.T)\n sins = cross(u, vs)\n\n # this is a function between -2 and +2, where the\n # leftmost path corresponds to -2, rightmost to +2\n # sgn(alpha)(cos(alpha) - 1)\n ranked = sign(sins)*(coss - 1.)\n\n prev = cur\n cur = neighs[argmin(ranked)]\n else:\n # No choice to make\n prev = cur\n cur = neighs[0]\n\n # Remove pathological protruding loops\n if prev in nodes_visited_set:\n n_ind = nodes_visited.index(prev)\n\n del nodes_visited[n_ind+1:]\n del coords[n_ind+1:]\n del edges_visited[n_ind:]\n\n nodes_visited_set.add(prev)\n\n edges_visited.append((nodes_visited[-1], nodes_visited[0]))\n\n return nodes_visited, edges_visited, array(coords)",
"def cheapest_path(start, stop, exits):\n\n todo = [_WorkItem(0, start, None)]\n heapq.heapify(todo)\n visited = set()\n while todo:\n item = heapq.heappop(todo)\n here = item.vertex\n if here == stop:\n return Path.unravel(start, item.path_so_far)\n if here in visited:\n continue\n visited.add(here)\n for edge in exits(here):\n there = edge.destination\n if there in visited:\n continue\n dt = getattr(edge, 'cost', 1)\n heapq.heappush(todo, _WorkItem(item.cost_so_far + dt, there, (item.path_so_far, edge)))\n return None",
"def findPathsDown(graph={}, startnode='', operator='==', pathlength=None): \r\n pathsDownGraph ={}\r\n # Define operators that can be used for pathlength comparisons\r\n operatorDict = {'<': '<', '<=': '<=',\r\n '>' : '>', '>=': '>=',\r\n '==': '==', '!=': '!=',\r\n '<>' : '<>'}\r\n # Check that only valid operators are input by user\r\n if not operatorDict.has_key(operator):\r\n return \"Operator must be one of -- '<', '<=', '>', '>=', '==', '!=', '<>'\"\r\n if pathlength <> None:\r\n # Check that pathlength is an integer\r\n try:\r\n int(pathlength)\r\n except ValueError:\r\n return \"Pathlength must be an integer\"\r\n # Create a list of unique endnodes (for iteration and will become keys in output graph)\r\n endnodesList = graph.values()\r\n uniqueendnodesList = [ ]\r\n for nodeList in endnodesList:\r\n for node in nodeList:\r\n if node not in uniqueendnodesList:\r\n uniqueendnodesList.append(node)\r\n # For each unique endnode find all paths between the startnode and endnodes\r\n for endnode in uniqueendnodesList:\r\n # Ignore case where endnode == startnode -- path to self\r\n if endnode == startnode:\r\n continue\r\n pathsList = findAllPathsAsLists(graph, startnode, endnode)\r\n # Check that resulting list of paths is not empty\r\n if len (pathsList) <> 0:\r\n # If pathlength == None -- no Filter is applied and path entered\r\n # in output graph for key(startnode)\r\n if pathlength == None:\r\n pathsDownGraph[endnode] = pathsList\r\n else:\r\n # apply the user input operator and pathlength info to filter paths\r\n validpathsList = [path for path in\r\n pathsList if\r\n eval('len(path)' + operatorDict[operator] + 'pathlength')]\r\n # Check that remaining valid paths results in a non-empty list.\r\n if len(validpathsList) > 0:\r\n # Apply valid non-empty paths to output graph for key (startnode)\r\n pathsDownGraph[endnode] = validpathsList\r\n else:\r\n continue\r\n else:\r\n continue\r\n return pathsDownGraph",
"def strongly_connected_components_path(vertices, edges):\n identified = set()\n stack = []\n index = {}\n boundaries = []\n\n def dfs(v):\n index[v] = len(stack)\n stack.append(v)\n boundaries.append(index[v])\n\n for w in edges[v]:\n if w not in index:\n # For Python >= 3.3, replace with \"yield from dfs(w)\"\n for scc in dfs(w):\n yield scc\n elif w not in identified:\n while index[w] < boundaries[-1]:\n boundaries.pop()\n\n if boundaries[-1] == index[v]:\n boundaries.pop()\n scc = set(stack[index[v]:])\n del stack[index[v]:]\n identified.update(scc)\n yield scc\n\n for v in vertices:\n if v not in index:\n # For Python >= 3.3, replace with \"yield from dfs(v)\"\n for scc in dfs(v):\n yield scc",
"def get_path_between_nodes(graph, start, end, path=None):\n if path is None:\n path = []\n path = path + [start]\n if start == end:\n return path\n if start not in graph:\n return None\n for node in graph[start]:\n if node not in path:\n new_path = get_path_between_nodes(graph, node, end, path)\n if new_path:\n return new_path\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds a shortest path with no more than k left turns that goes from start to end using the provided edges. (reversing turns are also not allowed) | def shortest_path_k_lefts(edges, start, end, k):
# build up the graph; the key is an edge, the value is a list of edges connecting to it
graph = {}
current_start_nodes = {}
current_end_nodes = {}
for edge in edges:
s = edge['start']
e = edge['end']
# v1 = (e[0] - s[0], e[1] - s[1])
if s in current_start_nodes:
current_start_nodes[s].add((s, e))
else:
current_start_nodes[s] = set([(s, e)])
if e in current_end_nodes:
current_end_nodes[e].add((s, e))
else:
current_end_nodes[e] = set([(s, e)])
if (s, e) not in graph:
graph[(s, e)] = []
if e in current_start_nodes:
for s1, e1 in current_start_nodes[e]:
graph[(s, e)].append((s1, e1))
if s in current_end_nodes:
for s0, e0 in current_end_nodes[s]:
graph[(s0, e0)].append((s, e))
# print(len(graph))
start_trans = []
for edge in graph:
if edge[0] == start:
start_trans.append((edge, k))
# print(start_trans)
# initialize the shortest path and its length
current_shortest = None
current_len = float('inf')
for start_edge in start_trans:
# print('xxxxxxxxxxxxxx')
# print(i)
path_total = [[start_edge]]
current_i = 0
visited = {}
while current_i < len(path_total):
path = path_total[current_i]
current_trans, current_k = path[-1]
# print('current')
# print(current_trans)
v_current = (current_trans[1][0] - current_trans[0][0], current_trans[1][1] - current_trans[0][1])
# if the edge is not visited, condition is true
if current_trans not in visited:
cond = True
# if the edge is visited but reaching it with less left turns, contidion is true
elif visited[current_trans] < current_k:
cond = True
else:
cond = False
# if condition is true
if cond and current_trans in graph:
visited[current_trans] = current_k
# visited_set.add(path[-1])
trans_from_current = graph[current_trans]
for edge in trans_from_current:
# record how many remaining left turns
new_k = current_k
# print('edge')
# print(edge)
# print('x' + 1)
# if edge in visited:
# continue
v_edge = (edge[1][0] - edge[0][0], edge[1][1] - edge[0][1])
d = direction(v_current, v_edge)
if d == 'U turn':
continue
# if the direction is left, see if there is remaining left turns, if yes, turn left
if d == 'left':
if current_k < 1:
continue
else:
new_k -= 1
shortest = path.copy()
shortest.append((edge, new_k))
if edge[1] == end:
# print('yyyyyyyyyyyes')
# print([{'start':e[0][0],'end':e[0][1]} for e in shortest])
if len(shortest) < current_len:
current_len = len(shortest)
current_shortest = shortest
# return [{'start':e[0][0],'end':e[0][1]} for e in shortest]
path_total.append(shortest)
current_i += 1
if current_shortest != None:
return [{'start':e[0][0],'end':e[0][1]} for e in current_shortest]
else:
return | [
"def shortest_path_no_lefts(edges, start, end):\n # build up the graph; the key is an edge, the value is a list of edges connecting to it\n graph = {}\n current_start_nodes = {}\n current_end_nodes = {}\n for edge in edges:\n s = edge['start']\n e = edge['end']\n # v1 = (e[0] - s[0], e[1] - s[1])\n if s in current_start_nodes:\n current_start_nodes[s].add((s, e))\n else:\n current_start_nodes[s] = set([(s, e)])\n if e in current_end_nodes:\n current_end_nodes[e].add((s, e))\n else:\n current_end_nodes[e] = set([(s, e)])\n \n if (s, e) not in graph:\n graph[(s, e)] = []\n if e in current_start_nodes:\n for s1, e1 in current_start_nodes[e]:\n graph[(s, e)].append((s1, e1))\n if s in current_end_nodes:\n for s0, e0 in current_end_nodes[s]:\n graph[(s0, e0)].append((s, e))\n# print(len(graph))\n \n # find the edges connecting to starting point\n start_trans = []\n for edge in graph:\n if edge[0] == start:\n start_trans.append(edge)\n# print(start_trans)\n \n # explore the graph from each edge conntecting to the starting node\n for i in start_trans:\n# print('xxxxxxxxxxxxxx')\n# print(i)\n path_total = []\n path_total.append([i])\n current_i = 0\n visited = set()\n while current_i < len(path_total):\n path = path_total[current_i]\n current_trans = path[-1]\n # obtain the vector representing the current edge\n v_current = (current_trans[1][0] - current_trans[0][0], current_trans[1][1] - current_trans[0][1])\n # if the current edge is not visited\n if current_trans not in visited and current_trans in graph:\n visited.add(current_trans)\n trans_from_current = graph[current_trans]\n # for each neighboring edges of current edge\n for edge in trans_from_current:\n if edge in visited:\n continue\n # if the edge is not visited, obtain the vector representing this edge\n v_edge = (edge[1][0] - edge[0][0], edge[1][1] - edge[0][1])\n # determine if the direction is valid\n d = direction(v_current, v_edge)\n if d == 'left' or d == 'U turn':\n continue\n shortest = path.copy()\n shortest.append(edge)\n if edge[1] == end:\n# print('yyyyyyyyyyyes')\n return [{'start':e[0],'end':e[1]} for e in shortest]\n path_total.append(shortest) \n current_i += 1\n \n return None",
"def shortest_path(edges, start, end):\n # generate the graph\n graph = {}\n for edge in edges:\n s = edge['start']\n e = edge['end']\n if s in graph:\n graph[s].append(e)\n else:\n graph[s] = [e]\n # build up a queue for BFS\n path_total = []\n # append initial node to the queue\n# path_total.append([{'start':start}])\n path_total.append([start])\n # build up a set for recording if the node has been visited\n visited = set()\n # record the curent index\n current_i = 0\n while current_i < len(path_total):\n# print(path_total, visited)\n path = path_total[current_i] \n# current_pos = path[-1]['end'] if len(path[-1]) > 1 else path[-1]['start']\n current_pos = path[-1]\n if current_pos not in visited and current_pos in graph:\n visited.add(current_pos)\n edges_from_current = graph[current_pos]\n for source in edges_from_current:\n if source in visited:\n continue\n else:\n shortest = path.copy()\n# shortest.append({'start':current_pos, 'end':source})\n shortest.append(source)\n# path.append(source)\n # if reach the target node, return the path\n if source == end:\n# print(shortest)\n result = convert(shortest)\n# return shortest[1:]\n return result\n path_total.append(shortest)\n current_i += 1\n \n return None",
"def kpath(network, src, dst, k):\n assert k > 0\n assert network.has_node(src)\n assert network.has_node(dst)\n for a, b in network.edges:\n assert 'weight' in network[a][b]\n\n shortest_path = nx.dijkstra_path(network, src, dst)\n\n kpath = [tuple(shortest_path)]\n candidate_paths = {} # {path1: cost1, path2: cost2...}\n\n for i in range(k-1):\n # take the newly found path and use each node except dst as spur node\n path = kpath[i]\n for j in range(len(path) - 1):\n spur_node = path[j]\n root_path = list(path[:j+1])\n next_hops = find_next_hops_to_remove(kpath, root_path)\n # save the complete edge attributes so that they can be added later\n edges_saved = [(spur_node, b, network.edges[spur_node, b]) for b in next_hops]\n network.remove_edges_from(edges_saved)\n\n try:\n spur_path = nx.dijkstra_path(network, spur_node, dst)\n root_path.extend(spur_path[1:])\n candidate_paths[tuple(root_path)] = path_cost(network, root_path)\n except nx.NetworkXNoPath:\n # no spur path found\n pass\n\n # restore the previously removed edges\n network.add_edges_from(edges_saved)\n\n if candidate_paths:\n # candidate is not empty\n min_cost = -1\n new_path = None\n for p, cost in candidate_paths.items():\n if min_cost < 0 or min_cost > cost:\n new_path = p\n min_cost = cost\n\n # move new_path to kpath\n kpath.append(new_path)\n candidate_paths.pop(new_path)\n else:\n break\n\n return kpath",
"def spanning_edges(G, start):\n visited = set()\n todo = [(start, None)]\n span = set()\n\n while todo:\n (cur, e) = todo.pop()\n\n if cur in visited: continue\n\n visited.add(cur)\n if e: span.add(e)\n\n for n in G.adj_to(cur):\n if n not in visited:\n todo.append((n, (cur, n)))\n\n return span",
"def shortest_path(start, end):\n\n moves = rubik.quarter_twists\n\n parentS = {}\n parentE = {}\n parentS[start] = None\n parentE[end] = None\n\n start_current_positions = set()\n end_current_positions = set()\n start_current_positions.add(start)\n end_current_positions.add(end)\n\n if end in parentS:\n return get_moves(parentS, parentE, end)\n\n for i in range(7):\n start_next_positions = set()\n for position in start_current_positions:\n for move in moves:\n next_position = rubik.perm_apply(move, position)\n if next_position not in parentS:\n parentS[next_position] = (position, move)\n start_next_positions.add(next_position)\n if next_position in parentE:\n return get_moves(parentS,\n parentE,\n next_position)\n \n start_current_positions = start_next_positions\n end_next_positions = set()\n for position in end_current_positions:\n for move in moves:\n next_position = rubik.perm_apply(move, position)\n if next_position not in parentE:\n parentE[next_position] = (position, move)\n end_next_positions.add(next_position)\n if next_position in parentS:\n return get_moves(parentS,\n parentE,\n next_position)\n\n end_current_positions = end_next_positions\n\n return None",
"def kargermincut(graph):\n for i in range(len(graph) - 2):\n # randomly pick one edge\n start = random.choice(list(graph.keys()))\n end = random.choice(graph[start])\n # merge end node into start node\n for j in set(graph[end]):\n graph[j] = [n if n != end else start for n in graph[j]]\n # note that extend is in-place modification operation, return None\n graph[start].extend(graph[end])\n graph[start] = [n for n in graph[start] if n != start]\n del graph[end]\n return len(list(graph.values())[0])",
"def shortestReach(n, m, edges, s):\n\n nodes = [i for i in range(1, n + 1)]\n graph = AdjacencyList(nodes, edges).graph\n visited = {i : False for i in range(1, n + 1)}\n distances = {i : -1 for i in range(1, n + 1)}\n \n print(graph)\n\n root_level = [s]\n q = [root_level]\n curr_dist = 0\n while q:\n print(visited)\n level = q[0]\n del q[0]\n next_level = []\n for node in level:\n if not visited[node]:\n next_level += graph[node]\n visited[node] = True\n distances[node] = curr_dist\n if next_level:\n q.append(next_level)\n curr_dist += 6\n\n dists = list(distances.values())\n return dists[:s - 1] + dists[s : ]",
"def dijkstra1(self, game, graph, start, player):\n graph = {key: value for (key, value) in graph.items()} # Create a new dict to avoid the orignal one be replaced\n shortest_distance = {} # In the following 18 line of codes, which are derived and adjused from the Ian Sullivan(2017)(start) \n unseenNodes = graph # the code source: Implementation of dijkstra in python https://www.youtube.com/watch?v=IG1QioWSXRI&t=1s\n inf = 5000 \n size_board = game.size\n\n for node in unseenNodes:\n shortest_distance[node] = inf\n shortest_distance[start] = 0\n while unseenNodes:\n minNode = -10\n for node in unseenNodes:\n if minNode == -10:\n minNode = node\n elif shortest_distance[node] < shortest_distance[minNode]:\n minNode = node\n\n for childNode, distance in graph[minNode].items():\n if distance + shortest_distance[minNode] < shortest_distance[childNode]:\n shortest_distance[childNode] = distance + shortest_distance[minNode]\n\n unseenNodes.pop(minNode) # In the upper 18 line of codes, which are derived and adjused from the Ian Sullivan(2017)(end)\n\n # In the below, all codes is to identify the smallest distnace for red/blue pieces to the two side border\n if player == HexBoard.RED: # red is vertical\n edgeupper1 = []\n edgelower2 = []\n\n for i in range(size_board):\n a_edge1 = (i, 0)\n a_edge2 = (i, size_board - 1)\n edgeupper1.append(a_edge1)\n edgelower2.append(a_edge2)\n else: # blue is horizontal\n edgeupper1 = []\n edgelower2 = []\n\n for i in range(size_board):\n a_edge1 = (0, i)\n a_edge2 = (size_board - 1, i)\n edgeupper1.append(a_edge1)\n edgelower2.append(a_edge2)\n target_upper = inf\n for candidate in edgeupper1:\n if shortest_distance[candidate] < target_upper:\n target_upper = shortest_distance[candidate]\n target_lower = inf\n for candidate2 in edgelower2:\n if shortest_distance[candidate2] < target_lower:\n target_lower = shortest_distance[candidate2]\n return target_lower + target_upper",
"def dijkstra(graph, start, end):\n\n\n #init S ensemble with start_node inside\n S = [start]\n #defin V ensemble with all node of graph\n V = [x for x in range(len(graph))]\n #init distance dictionnary\n distance = {}\n #init previous history dictionnary\n previous = {}\n\n #init all of node distances to inf exept for start node\n for v in V:\n if v != start:\n distance[v] = inf\n\n #loop until S != V\n while len(S) != len(V):\n #for all element of V exept for the element which are in S\n for v in (set(V)-set(S)):\n #init uc as the last element added in S\n uc = S[-1]\n\n #if uc == 0 that signified we are in the start node\n if uc == 0:\n\n #add set uc as previous[v] if the new distance if shortest than the current\n if 0+graph[uc][v] < distance[v]:\n previous[v] = uc\n\n #set the v distance as the min beetween the current v distance and the edge of uc and v.\n distance[v] = min(distance[v], 0+graph[uc][v])\n\n else:\n #add set uc as previous[v] if the new distance if shortest than the current\n if distance[uc]+graph[uc][v] <distance[v]:\n previous[v] = uc\n #set the v distance as the min beetween the current v distance and the distance of u + the edge of uc and v.\n distance[v] = min(distance[v], distance[uc]+graph[uc][v])\n\n #find the node with the shortest distance\n #init vmin as inf\n vmin = inf\n x = inf\n #loop for all v in V / S\n for v in (set(V)-set(S)):\n #if v distance < vmin\n if distance[v] < vmin:\n vmin = distance[v]\n # x = the node with the shortest distance\n x = v\n\n\n # UPDATE STATEMENT\n # define new uc as x\n uc = x\n # add new uc to S\n S.append(uc)\n\n #define total_cost to cost of the ending distance\n total_cost= distance[end]\n #init shortest path\n path = []\n\n #loop to insert in path the previous node from end's node\n while(end != start):\n path.insert(0, end)\n end = previous[end]\n path.insert(0, start)\n\n #return the shortest_way and total cost of dijkstra from start to end\n return path, total_cost",
"def numShortestPaths(g, start, end):\n\n for node in [start, end]:\n # raises exception when any of the inputs are None\n if g is None or node is None:\n raise InvalidInputException(\"Invlaid input\")\n # raises exception when start or end nodes are not in g.\n if g.containsVertex(node) == False:\n \traise InvalidInputException(\"Invalid input\")\n\n for v in g.vertices():\n # sets distance of all vertices to a large value.\n v.distance = float('inf') \n\n start.distance = 0 # number of nodes away from start node.\n start.paths = 1 # number of paths there are from start node.\n\n order = []\n order.append(start)\n\n while order is not None:\n # BFS\n visited = order.pop(0)\n\n for edge in g.incidentEdges(visited):\n adjacent = g.opposite(visited, edge)\n order.append(adjacent)\n g.removeEdge(edge)\n\n if(adjacent.distance > visited.distance +1):\n # updates adjacent node's distance\n adjacent.distance = visited.distance +1\n # updates adjacent node's number of paths\n adjacent.paths = visited.paths\n\n elif(adjacent.distance == visited.distance +1):\n adjacent.paths += visited.paths\n\n # when end node is visited, returns the number of paths \n # stored in the end node.\n if(visited == end):\n return visited.paths",
"def traverse_graph(G, start, nextn):\n start_coords = array([G.node[start]['x'], G.node[start]['y']])\n nodes_visited = [start]\n nodes_visited_set = set()\n edges_visited = []\n coords = [start_coords]\n\n prev = start\n cur = nextn\n\n while cur != start:\n cur_coords = array([G.node[cur]['x'], G.node[cur]['y']])\n # We ignore all neighbors we alreay visited to avoid multiple loops\n\n neighs = [n for n in G.neighbors(cur) if n != prev and n != cur]\n\n edges_visited.append((prev, cur))\n nodes_visited.append(cur)\n coords.append(cur_coords)\n\n n_neighs = len(neighs)\n if n_neighs > 1:\n # Choose path that keeps the loop closest on the left hand side\n prev_coords = array([G.node[prev]['x'], G.node[prev]['y']])\n neigh_coords = array([[G.node[n]['x'], G.node[n]['y']] \\\n for n in neighs])\n\n ## Construct vectors and normalize\n u = cur_coords - prev_coords\n vs = neigh_coords - cur_coords\n\n # calculate cos and sin between direction vector and neighbors\n u /= sqrt((u*u).sum(-1))\n vs /= sqrt((vs*vs).sum(-1))[...,newaxis]\n\n coss = dot(u, vs.T)\n sins = cross(u, vs)\n\n # this is a function between -2 and +2, where the\n # leftmost path corresponds to -2, rightmost to +2\n # sgn(alpha)(cos(alpha) - 1)\n ranked = sign(sins)*(coss - 1.)\n\n prev = cur\n cur = neighs[argmin(ranked)]\n else:\n # No choice to make\n prev = cur\n cur = neighs[0]\n\n # Remove pathological protruding loops\n if prev in nodes_visited_set:\n n_ind = nodes_visited.index(prev)\n\n del nodes_visited[n_ind+1:]\n del coords[n_ind+1:]\n del edges_visited[n_ind:]\n\n nodes_visited_set.add(prev)\n\n edges_visited.append((nodes_visited[-1], nodes_visited[0]))\n\n return nodes_visited, edges_visited, array(coords)",
"def _compute_all_shortest_paths(graph, source, target, exclude_edge=False):",
"def graph_search(start, end, length, weights, config):\r\n\r\n if max(np.abs(start[0]-end[0]), np.abs(start[1]-end[1])) > length:\r\n raise Exception('length {0} too short to plan path from start {1} to end {2}'.format(length, start, end))\r\n\r\n graph = nx.DiGraph()\r\n nodes = [(start, length)]\r\n\r\n # for each node, find other nodes that can be moved to with the remaining amount of path length\r\n while nodes:\r\n current_node, current_length = nodes.pop(0)\r\n if current_length == 0:\r\n continue\r\n\r\n for (dr, dc) in config.movements:\r\n neighbor_node = (current_node[0] + dr, current_node[1] + dc)\r\n\r\n if max(abs(current_node[0]+dr-end[0]), abs(current_node[1]+dc-end[1])) >= current_length:\r\n continue\r\n\r\n neighbor = (neighbor_node, int(current_length-1))\r\n edge = ((current_node, current_length), neighbor)\r\n if graph.has_edge(edge[0], edge[1]):\r\n continue\r\n\r\n if 0 <= neighbor_node[0] < config.dimension and 0 <= neighbor_node[1] < config.dimension:\r\n nodes.append(neighbor)\r\n graph.add_edge(edge[0], edge[1], weight=1e-4+weights[neighbor_node[0], neighbor_node[1]])\r\n\r\n if len(graph.edges()) == 1:\r\n return [start, end], weights[end[0], end[1]]\r\n\r\n path = nx.algorithms.dag_longest_path(graph)\r\n path_weight = sum([graph.get_edge_data(path[i], path[i+1])['weight'] for i in range(len(path)-1)])\r\n path = [element[0] for element in path]\r\n\r\n return path, path_weight",
"def k_shortest_paths(G, source, target, k=1, weight='weight'):\n if source == target:\n return ([0], [[source]])\n\n length, path = nx.single_source_dijkstra(G, source, weight=weight)\n# print(length,path)\n if target not in length:\n print(\"node %s not reachable from %s\" % (target, source))\n return [],[]\n # raise nx.NetworkXNoPath(\"node %s not reachable from %s\" % (source, target))\n\n lengths = [length[target]]\n paths = [path[target]]\n c = count()\n B = []\n G_original = G.copy()\n\n for i in range(1, k):\n for j in range(len(paths[-1]) - 1):\n spur_node = paths[-1][j]\n root_path = paths[-1][:j + 1]\n\n edges_removed = []\n for c_path in paths:\n if len(c_path) > j and root_path == c_path[:j + 1]:\n u = c_path[j]\n v = c_path[j + 1]\n if G.has_edge(u, v):\n edge_attr = G.edges[u,v]\n G.remove_edge(u, v)\n edges_removed.append((u, v, edge_attr))\n\n for n in range(len(root_path) - 1):\n node = root_path[n]\n for u in G.nodes:\n if [u,node] in G.edges:\n edge_attr = G.edges[u,node]\n G.remove_edge(u, node)\n edges_removed.append((u, node, edge_attr))\n if [node,u] in G.edges:\n edge_attr = G.edges[node,u]\n G.remove_edge(node,u)\n edges_removed.append((node,u, edge_attr))\n spur_path_length, spur_path = nx.single_source_dijkstra(G, spur_node, weight=weight)\n if target in spur_path and spur_path[target]:\n total_path = root_path[:-1] + spur_path[target]\n total_path_length = get_path_length(G_original, root_path, weight) + spur_path_length[target]\n heappush(B, (total_path_length, next(c), total_path))\n\n for e in edges_removed:\n u, v, edge_attr = e\n if weight:\n G.add_edge(u, v, weight = edge_attr[weight])\n\n if B:\n (l, _, p) = heappop(B)\n lengths.append(l)\n paths.append(p)\n else:\n break\n G = G_original\n\n return (lengths, paths)",
"def findPathsDown(graph={}, startnode='', operator='==', pathlength=None): \r\n pathsDownGraph ={}\r\n # Define operators that can be used for pathlength comparisons\r\n operatorDict = {'<': '<', '<=': '<=',\r\n '>' : '>', '>=': '>=',\r\n '==': '==', '!=': '!=',\r\n '<>' : '<>'}\r\n # Check that only valid operators are input by user\r\n if not operatorDict.has_key(operator):\r\n return \"Operator must be one of -- '<', '<=', '>', '>=', '==', '!=', '<>'\"\r\n if pathlength <> None:\r\n # Check that pathlength is an integer\r\n try:\r\n int(pathlength)\r\n except ValueError:\r\n return \"Pathlength must be an integer\"\r\n # Create a list of unique endnodes (for iteration and will become keys in output graph)\r\n endnodesList = graph.values()\r\n uniqueendnodesList = [ ]\r\n for nodeList in endnodesList:\r\n for node in nodeList:\r\n if node not in uniqueendnodesList:\r\n uniqueendnodesList.append(node)\r\n # For each unique endnode find all paths between the startnode and endnodes\r\n for endnode in uniqueendnodesList:\r\n # Ignore case where endnode == startnode -- path to self\r\n if endnode == startnode:\r\n continue\r\n pathsList = findAllPathsAsLists(graph, startnode, endnode)\r\n # Check that resulting list of paths is not empty\r\n if len (pathsList) <> 0:\r\n # If pathlength == None -- no Filter is applied and path entered\r\n # in output graph for key(startnode)\r\n if pathlength == None:\r\n pathsDownGraph[endnode] = pathsList\r\n else:\r\n # apply the user input operator and pathlength info to filter paths\r\n validpathsList = [path for path in\r\n pathsList if\r\n eval('len(path)' + operatorDict[operator] + 'pathlength')]\r\n # Check that remaining valid paths results in a non-empty list.\r\n if len(validpathsList) > 0:\r\n # Apply valid non-empty paths to output graph for key (startnode)\r\n pathsDownGraph[endnode] = validpathsList\r\n else:\r\n continue\r\n else:\r\n continue\r\n return pathsDownGraph",
"def opt_delivery(n:int,roads:list,start:int,end:int,delivery:tuple) -> tuple:\r\n my_graph = Graph(n)\r\n my_graph.add_edges(roads) #create graph and add edges for all vertices\r\n pickup_city,delivery_city,profit = delivery\r\n delivery_path = (delivery[0],delivery[1])\r\n\r\n\r\n start_end = my_graph.dijkstra(start,end,delivery_path) #run dijkstra from start city to end city, reset vertices afterwards\r\n reset(my_graph)\r\n start_pickup = my_graph.dijkstra(start,pickup_city,delivery_path) #run dijkstra from start city to pickup city, reset vertices afterwards\r\n reset(my_graph)\r\n pickup_delivery = my_graph.dijkstra(pickup_city,delivery_city,delivery_path) #run dijkstra from pickup city to delivery city, reset vertices afterwards\r\n reset(my_graph)\r\n delivery_end = my_graph.dijkstra(delivery_city,end,delivery_path) #run dijkstra from delivery city to end city\r\n\r\n delivery_profit = start_pickup[0] + pickup_delivery[0] + delivery_end[0] - profit #as delivery path will be used, subtract profit from total cost\r\n\r\n if start_end[2]:\r\n start_end[0] -= profit #check if delivery path used in start city to end city path\r\n\r\n if start_end[0] <= delivery_profit:\r\n return (start_end[0], [start_end[1][i] for i in range(len(start_end[1])-1,-1,-1)]) #reconstruct path as our path is stored backwards\r\n\r\n else:\r\n path = []\r\n for i in range(len(start_pickup[1])-1,-1,-1):\r\n path.append(start_pickup[1][i])\r\n\r\n for j in range(len(pickup_delivery[1])-2,-1,-1): #reconstruct path as our path is stored backwards\r\n path.append(pickup_delivery[1][j])\r\n \r\n for l in range(len(delivery_end[1])-2,-1,-1):\r\n path.append(delivery_end[1][l])\r\n return (delivery_profit,path)",
"def dijkstra(self,source:int,end:int,delivery_path:tuple) -> tuple:\r\n delivery_path_used = False\r\n self.vertices[source].cost = 0\r\n discovered = MinHeap(self.vertices) #create MinHeap and add all vertices into MinHeap\r\n discovered.rise(discovered.indx[source]) #rise the source vertex\r\n while (len(discovered)) > 0 :\r\n u = discovered.serve() \r\n\r\n if u.id == end: #reached our end, terminate early\r\n path,delivery_path_used = self.get_path(source,end,delivery_path) #backtrack to get path \r\n return (u.cost,path,delivery_path_used)\r\n\r\n u.visited = True\r\n for edge in u.edges: #edge relaxation\r\n v = self.vertices[edge.v]\r\n if v.discovered == False:\r\n v.discovered = True\r\n v.cost = u.cost + edge.w\r\n v.previous = u\r\n position = discovered.indx[v.id] #calculate positon of vertex v in heap\r\n discovered.rise(position)\r\n elif v.visited == False:\r\n if v.cost > u.cost + edge.w:\r\n v.cost = u.cost + edge.w\r\n v.previous = u\r\n position = discovered.indx[v.id] #calculate positon of vertex v in heap\r\n discovered.rise(position) \r\n \r\n path,delivery_path_used = self.get_path(source,end,delivery_path) #backtrack to get path\r\n return (self.vertices[end].cost, path, delivery_path_used)",
"def nodes_and_edges_to_path(nodes, edges):\n out = []\n for i in range(len(nodes)-1):\n out.append(nodes[i])\n out.append(edges[i])\n else:\n out.append(nodes[-1])\n return out",
"def least_cost_path(graph, start, dest, cost):\n distance = 0\n reached = dict()\n runners = [(distance, start, start)]\n heapq.heapify(runners)\n\n while len(runners) != 0:\n current = heapq.heappop(runners)\n\n if current[1] in reached.keys():\n continue\n reached[current[1]] = current[2]\n for v in graph.neighbours(current[1]):\n if v in reached.keys():\n continue\n else:\n heapq.heappush(\n runners, (current[0] + cost(current[1], v), v, current[1]))\n\n path = list()\n at = dest\n if dest not in reached.keys():\n return path\n else:\n while at != start:\n path.append(at)\n at = reached[at]\n path.append(reached[at])\n return path[::-1]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
In entries, find one that matches the name. Name can be prefixed with scope, in the format of "scope.name". There must be only one matching entry. | def _find_entry(self, entries, name):
scope, _, name = name.rpartition('.')
matching_entries = [
entry for entry in entries
if entry.name == name and
(not scope or entry.scope == scope)]
if not matching_entries:
raise NotFoundError(name)
if len(matching_entries) != 1:
raise AmbiguousName(
'Expected 1 value, but found {0}'
.format(len(matching_entries)))
return matching_entries[0] | [
"def ls_entry_by_name(entries, name):\n return next(e for e in entries if os.path.basename(os.path.normpath(e['path'])) == name)",
"def find_by_name(self, name_prefix):\n for project in self:\n if project['name'].startswith(name_prefix):\n return project\n return None",
"def _LookupScopedName(self, name: str) -> Optional[pytd.Node]:\n scopes = [self.unit.name]\n prefix = f\"{self.unit.name}.\"\n if self.class_names and not self.class_names[0].startswith(prefix):\n # For imported modules, the class names are already prefixed with the\n # module name. But for the inferred type stub for the current module, the\n # class names are bare, so we add the prefix here.\n scopes.extend(prefix + name for name in self.class_names)\n else:\n scopes.extend(self.class_names)\n for inner in scopes:\n lookup_name = f\"{inner}.{name}\"[len(prefix):]\n try:\n return self._LookupItemRecursive(lookup_name)\n except KeyError:\n pass\n return None",
"def get_entry_from_name(self, name: Name) -> ThunkTableEntry:\n for e in self.container:\n if e.name == name:\n return e",
"def find(self, _name):\n for c in self.__items:\n if c.name == _name:\n return c\n raise RepositoryException(\"Found no contacts with name : \" + _name)",
"def by_name_prefix(self, nameprefix):\n nameprefix = self.full_name(nameprefix)\n matches = [b\n for b in self.iter()\n if b.name.startswith(nameprefix)]\n num_matches = len(matches)\n if num_matches == 1:\n return matches[0]\n elif num_matches < 1:\n raise NoSuchBranchError(\n 'There is no %s branch matching the '\n 'prefix \"%s\"' % (self.identifier, nameprefix))\n else:\n raise PrefixNotUniqueError(\n 'There are multiple %s branches '\n 'matching the prefix \"%s\": %s' % (self.identifier, nameprefix, matches))",
"def get_item_by_name(self, partialname):\n for item in self.items:\n itemobj=globalitemloader.get_item(item)\n if partialname.lower() in itemobj.name.lower():\n return itemobj\n return None",
"def find_by_name(countries, name):\n for country in countries:\n if country.name == name:\n return country",
"def get_entry_starting_by(self, entry_name):\n entries = self.get_all_entries()\n results = []\n for e in entries:\n if e.name.startswith(entry_name):\n results.append(e)\n\n if not results:\n info_msg(\"No corresponding name found 😕\")\n return results",
"def find_by_name(self, name):\n name = str(name)\n for locator in self._locators:\n if locator[\"name\"] == name:\n return locator\n return None",
"def find(self, line):\r\n for anEntry in self.db.get_ncf_entries():\r\n if anEntry.isMatch(line):\r\n return anEntry\r\n \r\n return None",
"def lookup(self, name: str) -> Strain:\n if name in self:\n return self._strain_dict_name[name]\n raise KeyError(f\"Strain {name} not found in strain collection.\")",
"def entity_named(name):\n\tfor i in game_entities:\n\t\tif game_entities[i] == name:\n\t\t\treturn game_entities[i]",
"def get_person_by_name(self, name):\n print(name)\n urlpath = '/people?where={{\"name\":\"{}\"}}'.format(name)\n resd = self.getdict(urlpath)\n res = resd['_items']\n if len(res) == 1:\n return res[0]\n elif len(res) == 0:\n print('Not Found')\n return None\n else:\n print('Found multiple', len(res))\n return res",
"def get_recipe_by_name(self, name):\n for _, recipe in self.recipe_list.items():\n if recipe.name == name:\n print(recipe)\n return recipe",
"def find_by_name_like(cls, value):\n # return the first row matching with the filter using FlaskSQLAlchemy\n return cls.query.filter(cls.name.like(value + '%')).all()",
"def match(self, query: str) -> HPOTerm:\n\n for term in self:\n if query == term.name:\n return term\n raise RuntimeError(\"No HPO entry with name {}\".format(query))",
"def expand_scope_by_name(scope, name):\n\n if isinstance(scope, basestring):\n scope += '/' + name\n return scope\n\n if scope is not None:\n return scope.name + '/' + name\n else:\n return scope",
"def expand_scope_by_name(scope, name):\n\n if isinstance(scope, string_types):\n scope += '/' + name\n return scope\n\n if scope is not None:\n return scope.name + '/' + name\n else:\n return scope",
"def find_node(self, name):\n for node in self._nodes:\n if node.name == name:\n return node\n self._lexer.raise_KeyError()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test matrix square root | def test_sqrtm():
C = 2*np.eye(3)
Ctrue = np.sqrt(2)*np.eye(3)
assert_array_almost_equal(sqrtm(C), Ctrue) | [
"def test_sqrtm(self):\n\n matrices = jax.random.normal(self.rng, (self.batch, self.dim, 2 * self.dim))\n\n for x in (matrices, matrices[0, :, :]): # try with many and only one.\n x = jnp.matmul(x, jnp.swapaxes(x, -1, -2))\n threshold = 1e-4\n\n sqrt_x, inv_sqrt_x, errors = matrix_square_root.sqrtm(\n x, min_iterations=self.dim, threshold=threshold\n )\n err = errors[errors > -1][-1]\n assert threshold > err\n np.testing.assert_allclose(\n x, jnp.matmul(sqrt_x, sqrt_x), rtol=1e-3, atol=1e-3\n )\n ids = jnp.eye(self.dim)\n if jnp.ndim(x) == 3:\n ids = ids[jnp.newaxis, :, :]\n np.testing.assert_allclose(\n jnp.zeros_like(x),\n jnp.matmul(x, jnp.matmul(inv_sqrt_x, inv_sqrt_x)) - ids,\n atol=1e-2\n )",
"def test_square_root():\n # Print table header\n print(\"a\".ljust(5) + \"mysqrt(a)\".ljust(15) + \"math.sqrt(a)\".ljust(15) + \"diff\".ljust(4) + \"\\n\" + \" \".rjust(4, \"-\") + \" \".rjust(15, \"-\") + \" \".rjust(15, \"-\") + \"----\")\n # begin looping through the integers\n for i in range(1, 10, 1):\n my_square_root = mysqrt(i)\n math_square_root = math.sqrt(i)\n diff = my_square_root - math_square_root\n print(str(float(i)).ljust(5) + str(round(my_square_root, 10)).ljust(15) + str(round(math_square_root, 10)).ljust(15) + str(diff))",
"def testSqrtValue(self):\n\n self.assertEqual(sqrtValue(9), 3)",
"def sqrt(x):\n z = np.sqrt(x)\n return z",
"def test_regular():\n assert newton.square_root_with_newton_method(25, 1) == 7.25",
"def sqrt (x):\n if x > 0:\n return np.sqrt(x)\n else :\n return 0;",
"def test_small_iteration():\n assert newton.square_root_with_newton_method(10, 0.5) == 5.0",
"def test_iteration_zero():\n assert newton.square_root_with_newton_method(25, 0) == 12.5",
"def test_sqrt2():\n from math import sqrt\n avalues = [0.25, 3.45,23.5,13342534.1]\n tol = 1.e-14\n for a in avalues:\n s = sqrt(a)\n s2 = sqrt2(a)\n delta_s = abs(s-s2)\n assert delta_s<tol, \"error, test failed with a=%f\" %(a)\n print(\"all tests passed\")",
"def sqrtmat(\n a, b,\n c, d,\n module=sy\n):\n τ = a + d\n δ = a * d - b * c\n s = module.sqrt(δ)\n t = module.sqrt(τ + 2 * s)\n a, b, c, d = (a + s), b, c, (d + s)\n a, b, c, d = a / t, b / t, c / t, d / t\n return a, b, c, d",
"def test_round_twice():\n assert newton.square_root_with_newton_method(25, 2) == 5.349",
"def test_trace_sqrt_product_value(self):\n np.random.seed(0)\n\n # Make num_examples > num_features to ensure scipy's sqrtm function\n # doesn't return a complex matrix.\n test_pool_real_a = np.float32(np.random.randn(512, 256))\n test_pool_gen_a = np.float32(np.random.randn(512, 256))\n\n cov_real = np.cov(test_pool_real_a, rowvar=False)\n cov_gen = np.cov(test_pool_gen_a, rowvar=False)\n\n trace_sqrt_prod_op = _run_with_mock(gan_metrics.trace_sqrt_product, cov_real, cov_gen)\n\n with self.test_session() as sess:\n # trace_sqrt_product: tsp\n actual_tsp = sess.run(trace_sqrt_prod_op)\n\n expected_tsp = _expected_trace_sqrt_product(cov_real, cov_gen)\n\n self.assertAllClose(actual_tsp, expected_tsp, 0.01)",
"def test_sqrt(val: Val, expected: Val):\n assert dataclasses.astuple(val.sqrt()) == pytest.approx(dataclasses.astuple(expected))",
"def test_matrix_45(self):\r\n m = matrix([[0.707106781186548, -0.707106781186547, 0], [0.707106781186547, 0.707106781186548, 0], [0, 0, 1]])\r\n self.__check(45, m)",
"def test_high_numbers():\n assert newton.square_root_with_newton_method(12335435, 20) == 3512.184",
"def squareRoot(self, num):\n num = float (num)\n from math import sqrt\n result = sqrt(num)\n return result",
"def test(self, data): \n sum_x = 0.0\n sum_y = 0.0\n sum_x_squared = 0.0\n sum_y_squared = 0.0\n sum_xy = 0.0\n for i in range(len(data)):\n sum_x += data[i][0]\n sum_y += data[i][1]\n sum_xy += data[i][0] * data[i][1]\n sum_x_squared += data[i][0] * data[i][0]\n sum_y_squared += data[i][1] * data[i][1]\n\n r = float(len(data)) * sum_xy - sum_x * sum_y\n r /= math.sqrt(len(data) * sum_x_squared - sum_x * sum_x) * math.sqrt(len(data) * sum_y_squared - sum_y * sum_y)\n print \"r value: \" + str(r)\n\n return r",
"def test_matrix_90(self):\r\n m = matrix([[6.12303176911189E-17, -1, 0], [1, 6.12303176911189E-17, 0], [0, 0, 1]])\r\n self.__check(90, m)",
"def test_arcsine_sqrt_transform(self):\n self.result1 = bc.relative_abundance(self.biomf)\n self.result2 = bc.arcsine_sqrt_transform(self.result1)\n\n # Obtaining results to compare.\n hand_calc = [0, 0.32175055439, 0.463647609, 0.57963974036, 0.684719203]\n func_calc = self.result2.values()[3].values()\n\n # Testing validity of the transforms.\n for hand, func in zip(hand_calc, func_calc):\n self.assertAlmostEqual(\n hand, func, places=7,\n msg=\"Function did not calculate transformation accurately.\"\n )",
"def is_square(block): \n \n return is_matrix(block) and (len(block[0]) == len(block))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates Lambda deployment package | def build_lambda():
try:
os.system("mkdir -p ./build")
os.system("cp -r ./lambda ./build")
os.system("pip3 install -r ./build/lambda/requirements.txt -t ./build/lambda")
shutil.make_archive("./build/lambda", 'zip', "./build/lambda")
os.system("rm -rf ./build/lambda")
print("Lambda deployment package built!")
except Exception as e:
print(f"Error building deployment package. Exception: {e}.") | [
"def _generate_lambda(appName, _lambda, roleARN, config, projPath):\n\n if( not os.path.exists(projPath+'/.tmp') ):\n os.mkdir(projPath+'/.tmp')\n\n if( not os.path.isfile(projPath+'/.tmp/dist.zip') ):\n AWSSetup._compress_app_package(\n projPath+'/.tmp/dist',\n projPath+'/.tmp/dist.zip',\n ['.git/']\n )\n\n funcName = appName+'-uxy-app-'+config['app:stage']\n zipFile = open(projPath+'/.tmp/dist.zip', 'rb')\n zipFileBin = zipFile.read()\n zipFile.close()\n\n statusCode = AWSSetup._function_exists(funcName, _lambda)\n if( statusCode == AWSSetup.FUNCTION_NOT_FOUND ):\n runtime = None\n if( config['app:runtime'] == 'go' ):\n runtime = 'go1.x'\n if( config['app:runtime'] == 'python' ):\n runtime = 'python3.9'\n\n AWSSetup._log(\"+ Creating lambda function...\")\n AWSSetup._log(\"+ Runtime: \"+runtime)\n response = _lambda.create_function(\n FunctionName = funcName,\n Runtime = runtime,\n Role = roleARN,\n Handler = config['aws:config']['lambda:handler'],\n Code = {\n 'ZipFile' : zipFileBin\n },\n Timeout = config['aws:config']['lambda:timeout']\n )\n AWSSetup._log(\"=> Lambda package deployed\")\n AWSSetup._add_function_permission(appName, _lambda, config)\n elif ( statusCode == AWSSetup.FUNCTION_FOUND ):\n AWSSetup._log('+ Updating lambda function...')\n response = _lambda.update_function_code(\n FunctionName = funcName,\n ZipFile = zipFileBin\n )\n AWSSetup._log(\"=> Lambda package deployed\")\n AWSSetup._add_function_permission(appName, _lambda, config)\n else:\n AWSSetup._log('=> ERROR: error getting lambda function')\n response = {}\n\n\n return response",
"def upload_lambda():\n\n s3 = session.resource('s3')\n\n try:\n s3.Bucket(f\"lambda-source-{os.environ['AWS_ACCOUNT']}\").upload_file('./build/lambda.zip', 'lambda.zip')\n print(\"Lambda deployment package uploaded to S3!\")\n\n except Exception as e:\n print(f\"Error uploading deployment package. Exception: {e}.\")",
"def create_lambda_zip(self, prefix='lambda_package', handler_file=None,\n minify=True, exclude=None, use_precompiled_packages=True, include=None, venv=None):\n import pip\n\n print(\"Packaging project as zip...\")\n\n if not venv:\n if 'VIRTUAL_ENV' in os.environ:\n venv = os.environ['VIRTUAL_ENV']\n elif os.path.exists('.python-version'): # pragma: no cover\n logger.debug(\"Pyenv's local virtualenv detected.\")\n try:\n subprocess.check_output('pyenv', stderr=subprocess.STDOUT)\n except OSError:\n print(\"This directory seems to have pyenv's local venv\"\n \"but pyenv executable was not found.\")\n with open('.python-version', 'r') as f:\n env_name = f.read()[:-1]\n logger.debug('env name = {}'.format(env_name))\n bin_path = subprocess.check_output(['pyenv', 'which', 'python']).decode('utf-8')\n venv = bin_path[:bin_path.rfind(env_name)] + env_name\n logger.debug('env path = {}'.format(venv))\n else: # pragma: no cover\n print(\"Zappa requires an active virtual environment.\")\n quit()\n\n cwd = os.getcwd()\n zip_fname = prefix + '-' + str(int(time.time())) + '.zip'\n zip_path = os.path.join(cwd, zip_fname)\n\n # Files that should be excluded from the zip\n if exclude is None:\n exclude = list()\n\n # Exclude the zip itself\n exclude.append(zip_path)\n\n def splitpath(path):\n parts = []\n (path, tail) = os.path.split(path)\n while path and tail:\n parts.append(tail)\n (path, tail) = os.path.split(path)\n parts.append(os.path.join(path, tail))\n return map(os.path.normpath, parts)[::-1]\n split_venv = splitpath(venv)\n split_cwd = splitpath(cwd)\n\n # Ideally this should be avoided automatically,\n # but this serves as an okay stop-gap measure.\n if split_venv[-1] == split_cwd[-1]: # pragma: no cover\n print(\n \"Warning! Your project and virtualenv have the same name! You may want \"\n \"to re-create your venv with a new name, or explicitly define a \"\n \"'project_name', as this may cause errors.\"\n )\n\n # First, do the project..\n temp_project_path = os.path.join(tempfile.gettempdir(), str(int(time.time())))\n\n if minify:\n excludes = ZIP_EXCLUDES + exclude + [split_venv[-1]]\n copytree(cwd, temp_project_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))\n else:\n copytree(cwd, temp_project_path, symlinks=False)\n\n # Then, do the site-packages..\n temp_package_path = os.path.join(tempfile.gettempdir(), str(int(time.time() + 1)))\n if os.sys.platform == 'win32':\n site_packages = os.path.join(venv, 'Lib', 'site-packages')\n else:\n site_packages = os.path.join(venv, 'lib', 'python2.7', 'site-packages')\n if minify:\n excludes = ZIP_EXCLUDES + exclude\n copytree(site_packages, temp_package_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))\n else:\n copytree(site_packages, temp_package_path, symlinks=False)\n\n # We may have 64-bin specific packages too.\n site_packages_64 = os.path.join(venv, 'lib64', 'python2.7', 'site-packages')\n if os.path.exists(site_packages_64):\n if minify:\n excludes = ZIP_EXCLUDES + exclude\n copytree(site_packages_64, temp_package_path, symlinks=False, ignore=shutil.ignore_patterns(*excludes))\n else:\n copytree(site_packages_64, temp_package_path, symlinks=False)\n\n copy_tree(temp_package_path, temp_project_path, update=True)\n\n # Then the pre-compiled packages..\n if use_precompiled_packages:\n installed_packages_name_set = {package.project_name.lower() for package in\n pip.get_installed_distributions()}\n\n for name, details in lambda_packages.items():\n if name.lower() in installed_packages_name_set:\n tar = tarfile.open(details['path'], mode=\"r:gz\")\n for member in tar.getmembers():\n # If we can, trash the local version.\n if member.isdir():\n shutil.rmtree(os.path.join(temp_project_path, member.name), ignore_errors=True)\n continue\n\n tar.extract(member, temp_project_path)\n\n # If a handler_file is supplied, copy that to the root of the package,\n # because that's where AWS Lambda looks for it. It can't be inside a package.\n if handler_file:\n filename = handler_file.split(os.sep)[-1]\n shutil.copy(handler_file, os.path.join(temp_project_path, filename))\n\n # Then zip it all up..\n try:\n # import zlib\n compression_method = zipfile.ZIP_DEFLATED\n except ImportError: # pragma: no cover\n compression_method = zipfile.ZIP_STORED\n\n zipf = zipfile.ZipFile(zip_path, 'w', compression_method)\n for root, dirs, files in os.walk(temp_project_path):\n\n for filename in files:\n\n # If there is a .pyc file in this package,\n # we can skip the python source code as we'll just\n # use the compiled bytecode anyway..\n if filename[-3:] == '.py':\n abs_filname = os.path.join(root, filename)\n abs_pyc_filename = abs_filname + 'c'\n if os.path.isfile(abs_pyc_filename):\n\n # but only if the pyc is older than the py,\n # otherwise we'll deploy outdated code!\n py_time = os.stat(abs_filname).st_mtime\n pyc_time = os.stat(abs_pyc_filename).st_mtime\n\n if pyc_time > py_time:\n continue\n\n zipf.write(os.path.join(root, filename), os.path.join(root.replace(temp_project_path, ''), filename))\n\n if '__init__.py' not in files:\n tmp_init = os.path.join(temp_project_path, '__init__.py')\n open(tmp_init, 'a').close()\n zipf.write(tmp_init, os.path.join(root.replace(temp_project_path, ''), os.path.join(root.replace(temp_project_path, ''), '__init__.py')))\n\n # And, we're done!\n zipf.close()\n\n # Trash the temp directory\n shutil.rmtree(temp_project_path)\n shutil.rmtree(temp_package_path)\n\n # Warn if this is too large for Lambda.\n file_stats = os.stat(zip_path)\n if file_stats.st_size > 52428800: # pragma: no cover\n print(\"\\n\\nWarning: Application zip package is likely to be too large for AWS Lambda.\\n\\n\")\n\n return zip_fname",
"def deploy_pypi():\n test()\n register_pypi()\n deploy_src()\n deploy_eggs()",
"def deploy(function_name=None, arn=None):\n if not function_name:\n abort('Must provide function_name')\n if not arn:\n abort('Must provide arn')\n\n lambda_root = os.path.join(LAMBDA_DIR, function_name)\n builds_dir = os.path.join(lambda_root, BUILDS_SUBDIR)\n builds = sorted(glob.glob(os.path.join(builds_dir, '*.{0}'.format('zip'))))\n if not builds:\n abort('No builds exist. Run a `build` command first')\n latest_build = builds[-1]\n logging.info('Preparing to deploy build: {0}'.format(latest_build))\n\n client = boto3.client('lambda')\n with open(latest_build, 'rb') as zip_file:\n response = client.update_function_code(FunctionName=arn, ZipFile=zip_file.read())\n logger.info(json.dumps(response, indent=2))",
"def package_deploy(ctx):\n ctx.run(\"twine upload dist/*\")",
"def build(function_name=None):\n if not function_name:\n abort('Must provide function_name')\n\n lambda_root = os.path.join(LAMBDA_DIR, function_name)\n module_dir = os.path.join(lambda_root, function_name)\n lambda_config_dir = os.path.join(lambda_root, LAMBDA_CONFIG_SUBDIR)\n staging_dir = os.path.join(lambda_root, STAGING_SUBDIR)\n builds_dir = os.path.join(lambda_root, BUILDS_SUBDIR)\n build_filename = '{0}-{1}.zip'.format(\n datetime.datetime.now().isoformat().replace(':', '.'), function_name)\n\n # Erase previous runs of the build task.\n local('rm -rf {0}'.format(staging_dir))\n\n # Set up staging and builds directories.\n local('mkdir -p {0}'.format(staging_dir))\n local('mkdir -p {0}'.format(builds_dir))\n\n # Install the lambda specific requirements.\n local('pip install -r {0}/requirements.txt -t {1}'.format(lambda_root, staging_dir))\n\n # Copy the top level *.py (e.g. index.py) and lambda_config dir into the staging_dir.\n local('cp -R {0}/*.py {1}'.format(lambda_root, staging_dir))\n local('cp -R {0} {1}'.format(lambda_config_dir, staging_dir))\n\n # Copy the module directory into the staging dir.\n local('cp -R {0} {1}'.format(module_dir, staging_dir))\n\n # Zip the whole thing up, and move it to the builds dir.\n local('cd {0}; zip -r {1} ./*; mv {1} {2}'.format(staging_dir, build_filename, builds_dir))",
"def deploy_app():\r\n upload_and_explode_code_bundle()\r\n symlink_current_release()",
"def deploy():\n test()\n require('hosts', provided_by=servers)\n require('path')\n env.release = time.strftime('%Y-%m-%d-%H-%M')\n upload_tar_from_git()\n install_requirements()\n install_site()\n symlink_current_release()\n migrate()\n collect_static()\n restart_webserver()\n remove_remote_package()",
"def deploy():\n \n # We probably don't need these, remove them if and when we can.\n packages = [\n # 'python-configobj',\n # 'python-dev',\n # 'python-m2crypto',\n # 'python-mysqldb',\n 'python-paramiko',\n 'python-pip',\n 'python-crypto',\n 'python-setuptools',\n # 'python-lxml',\n ]\n\n os.system('apt-get install -y %s' % ' '.join(packages))\n os.system('pip install fabric==0.9.3')\n os.system('cd /opt/pergola/fab && fab initialize')",
"def create_dependency_layer():\n\n # file paths\n requirements_file_path = \"requirements.txt\"\n target_directory = \"python\"\n zip_file_path = \"dependency-layer.zip\"\n\n # change directory so that relative paths work\n cwd = os.getcwd()\n os.chdir(\"lambda\")\n\n # create new dependency zip only if it doesn't exist\n if not os.path.isfile(zip_file_path):\n\n pip_main(\n [\n \"install\",\n \"-r\",\n requirements_file_path,\n \"--target\",\n target_directory,\n ]\n )\n\n # package dependencies as a zip file\n dep_zip = zipfile.ZipFile(zip_file_path, \"w\", zipfile.ZIP_DEFLATED)\n\n for root, dirs, files in os.walk(target_directory):\n for file in files:\n dep_zip.write(os.path.join(root, file))\n\n dep_zip.close()\n\n # change directory back\n os.chdir(cwd)",
"def functionapp_deploy(app_name=None, repository=None, skip_secrets_generation=False,\n branch_name=None, do_not_wait=False):\n repo_name, repository = resolve_repository(repository)\n\n get_github_pat_token(token_prefix=functionapp_token_prefix + repo_name, display_warning=True)\n logger.warning('Setting up your workflow.')\n\n languages = get_languages_for_repo(repo_name)\n if not languages:\n raise CLIError('Language detection failed for this repository.')\n language = choose_supported_language(languages)\n if language:\n logger.warning('%s repository detected.', language)\n else:\n logger.debug('Languages detected : %s', languages)\n raise CLIError('The languages in this repository are not yet supported from up command.')\n\n # assuming the host.json is in the root directory for now\n # Todo - atbagga\n ensure_function_app(repo_name=repo_name)\n\n from azext_aks_deploy.dev.common.azure_cli_resources import get_functionapp_details\n app_details = get_functionapp_details(app_name)\n logger.debug(app_details)\n app_name = app_details['name']\n\n # create azure service principal and display json on the screen for user to configure it as Github secrets\n if not skip_secrets_generation:\n get_azure_credentials_functionapp(app_name)\n\n print('')\n files = get_functionapp_yaml_template_for_repo(app_name, repo_name)\n\n # File checkin\n for file_name in files:\n logger.debug(\"Checkin file path: %s\", file_name.path)\n logger.debug(\"Checkin file content: %s\", file_name.content)\n\n default_branch = get_default_branch(repo_name)\n workflow_commit_sha = push_files_to_repository(\n files=files, default_branch=default_branch, repo_name=repo_name, branch_name=branch_name,\n message=CHECKIN_MESSAGE_FUNCTIONAPP)\n print('Creating workflow...')\n check_run_id = get_work_flow_check_runID(repo_name, workflow_commit_sha)\n workflow_url = 'https://github.com/{repo_id}/runs/{checkID}'.format(repo_id=repo_name, checkID=check_run_id)\n print('GitHub Action workflow has been created - {}'.format(workflow_url))\n\n if not do_not_wait:\n poll_workflow_status(repo_name, check_run_id)",
"def build_archive(mod, cache):\n\n mod_pathname = os.path.abspath(os.path.dirname(__file__) + \"/../lambdas/{}.py\".format(mod))\n awsflow_basedir = os.path.abspath(os.path.dirname(__file__) + \"/../../\")\n\n pkg_dir_suffix = \".lambda\"\n\n if cache:\n # Instead of generating a new temporary directory, reuse the existing one if existing,\n # so that we can avoid re-downloading all the dependencies again. this saves lots of time.\n # The cache is valid for any lamda function defined internally in the awsflow package.\n pkg_dir = \"/tmp/awsflow{}-{}\".format(pkg_dir_suffix, cache)\n\n # check if package directory is empty.\n pkg_dir_empty = not os.path.exists(pkg_dir)\n\n # make sure that the directory exists.\n local(\"mkdir -p {}\".format(pkg_dir))\n else:\n pkg_dir = mkdtemp(pkg_dir_suffix)\n\n logging.info(\"Assembling archive for lambda function ...\")\n\n local('cp {mod_pathname} {pkg_dir}'.format(mod_pathname=mod_pathname, pkg_dir=pkg_dir))\n\n if not cache or pkg_dir_empty:\n local('pip-3.6 install {awsflow_basedir} --find-links {awsflow_basedir} --target {pkg_dir} --upgrade'.format(\n awsflow_basedir=awsflow_basedir, pkg_dir=pkg_dir))\n else:\n logging.info(\"Using cached package directory\")\n\n local('cp -r {awsflow_basedir}/awsflow {pkg_dir}'.format(awsflow_basedir=awsflow_basedir,\n pkg_dir=pkg_dir))\n make_archive(base_name=pkg_dir, format='zip', root_dir=pkg_dir)\n\n logging.info(\"Archive ready.\")\n\n archive_contents = open('{}.zip'.format(pkg_dir), \"rb\").read()\n\n if not cache:\n local(\"rm -rf {pkg_dir}.zip {pkg_dir}\".format(pkg_dir=pkg_dir))\n\n return archive_contents",
"def deploy_eis_app():",
"def deploy(ctx):\n ctx.run(\"rm -rf build/* dist/*\")\n # ctx.run(\"bumpversion {bump} --verbose\")\n ctx.run(\"python3 setup.py sdist bdist_wheel\")\n ctx.run(\"python3 -m twine check dist/*\")\n ctx.run(\"python3 -m twine upload dist/*\")\n # ctx.run(\"git push origin --tags\")\n # ctx.run(\"git push kristy --tags\")",
"def _deploy_release():\n require('hosts')\n require('path')\n symlink_current_release()\n install_requirements()\n install_site()\n migrate()\n restart_webserver()",
"async def create_package(self) -> None:\n try:\n package_path = self._config.package_path\n except ImportError:\n self._logger.info('Creating package `%s`', self._config.package)\n package_path = join(os.getcwd(), self._config.package)\n\n touch(join(package_path, '__init__.py'))\n\n models_path = join(package_path, 'models.py')\n if not exists(models_path):\n template = load_template('models.py')\n models_code = template.render()\n write(models_path, models_code)\n\n for subpackage in ('handlers', 'hooks'):\n subpackage_path = join(self._config.package_path, subpackage)\n touch(join(subpackage_path, '__init__.py'))\n\n sql_path = join(self._config.package_path, 'sql')\n touch(join(sql_path, '.keep'))\n\n graphql_path = join(self._config.package_path, 'graphql')\n touch(join(graphql_path, '.keep'))",
"def create_helloworld_just_deployment(manager, logger, tenant=None):\n upload_helloworld(\n manager,\n 'test-ec2-bp.yaml',\n NOINSTALL_BLUEPRINT_ID,\n tenant,\n logger,\n )\n\n inputs = {\n 'image_id': 'does not matter',\n }\n\n deploy_helloworld(\n manager,\n inputs,\n NOINSTALL_BLUEPRINT_ID,\n NOINSTALL_DEPLOYMENT_ID,\n tenant,\n logger,\n )",
"def deploy(test=True):\n run('python setup.py sdist upload -r test')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Uploads Lambda deployment package to S3 | def upload_lambda():
s3 = session.resource('s3')
try:
s3.Bucket(f"lambda-source-{os.environ['AWS_ACCOUNT']}").upload_file('./build/lambda.zip', 'lambda.zip')
print("Lambda deployment package uploaded to S3!")
except Exception as e:
print(f"Error uploading deployment package. Exception: {e}.") | [
"def _deploy_to_s3():\n s3cmd = 's3cmd -P --add-header=Cache-Control:max-age=5 --guess-mime-type --recursive --exclude-from gzip_types.txt put gzip/ %s'\n s3cmd_gzip = 's3cmd -P --add-header=Cache-Control:max-age=5 --add-header=Content-encoding:gzip --guess-mime-type --recursive --exclude \"*\" --include-from gzip_types.txt put gzip/ %s'\n\n for bucket in env.s3_buckets:\n env.s3_bucket = bucket\n local(s3cmd % ('s3://%(s3_bucket)s/' % env))\n local(s3cmd_gzip % ('s3://%(s3_bucket)s/' % env))",
"def _package_and_upload(\n scratch_bucket: str, package_name: str, project_path: str\n) -> str:\n log.info(\"Packaging '%s'\", project_path)\n zip_filename = \"{}_{}.zip\".format(package_name, socket.gethostname())\n full_zip_filename = tempfile.mkstemp(\".zip\")[1]\n project_path = os.path.abspath(project_path)\n log.debug(\"project_path is %s\", project_path)\n log_archive = ZipFile(full_zip_filename, \"w\")\n\n files = []\n for root, directories, filenames in os.walk(\n project_path, followlinks=False, topdown=True\n ):\n # skip everything starting with a .\n if any([l.startswith(\".\") for l in root.split(\"/\")]):\n continue\n\n for filename in filenames:\n if not filename.startswith(\".\"):\n full_filename = os.path.join(root, filename)\n # ignore symlinks\n if not os.path.islink(full_filename):\n files.append(full_filename)\n\n for f in files:\n write_filename = f.replace(project_path, \"\")\n write_filename = package_name + write_filename\n log_archive.write(f, arcname=write_filename)\n log_archive.close()\n\n if len(files) == 0:\n raise RuntimeError(\"No files found in '%s'\" % project_path)\n s3_resource = boto3.resource(\"s3\")\n b = s3_resource.Bucket(scratch_bucket) # pylint: disable=no-member\n s3_path = \"builds/\" + zip_filename\n zip_size = os.stat(full_zip_filename).st_size\n if zip_size > 50e6:\n log.warn(f\"upload size is {int(zip_size/1e6)} MB\")\n b.upload_file(full_zip_filename, s3_path)\n os.remove(full_zip_filename)\n log.info(\"Uploaded %s to %s (%s files)\", zip_filename, s3_path, len(files))\n s3_client = boto3.client(\"s3\")\n url = s3_client.generate_presigned_url(\n ClientMethod=\"get_object\",\n Params={\"Bucket\": scratch_bucket, \"Key\": s3_path},\n ExpiresIn=EXPIRATION_SECONDS,\n )\n return url",
"def deploy_assets_to_s3():\r\n# run('s3cmd del --recursive s3://%(s3_bucket)s/%(application)s/%(admin_media_prefix)s/' % env)\r\n# run('s3cmd -P --guess-mime-type sync %(venv_path)s/src/django/django/contrib/admin/media/ s3://%(s3_bucket)s/%(application)s/%(site_media_prefix)s/' % env)\r\n# run('s3cmd del --recursive s3://%(s3_bucket)s/%(application)s/%(newsapps_media_prefix)s/' % env)\r\n# run('s3cmd -P --guess-mime-type sync %(venv_path)s/src/newsapps/newsapps/na_media/ s3://%(s3_bucket)s/%(application)s/%(newsapps_media_prefix)s/' % env)\r\n pass",
"def upload_bundle():\n s3 = boto3.client('s3', region_name=os.environ['TF_VAR_aws_region'])\n\n try:\n s3.put_object(\n Body=os.environ['TF_VAR_elastic_beanstalk_s3_key'],\n Bucket=os.environ['TF_VAR_elastic_beanstalk_s3_bucket'],\n Key=os.environ['TF_VAR_elastic_beanstalk_s3_key']\n )\n except Exception as e:\n raise e",
"def deploy_to_s3(self):\r\n self.tempdir = tempfile.mkdtemp('s3deploy')\r\n\r\n for keyname, absolute_path in self.find_file_paths():\r\n self.s3_upload(keyname, absolute_path)\r\n\r\n shutil.rmtree(self.tempdir, True)\r\n return True",
"def deploy_website_to_target_bucket(event, context, target_bucket, files):\n\n print(f'Starting admin website deployment to {target_bucket} bucket')\n\n try: \n for webSiteFile in files:\n with open(webSiteFile) as f:\n content = f.read()\n\n encoded_string = content.encode(\"utf-8\")\n website_key = os.path.relpath(webSiteFile, '/tmp/website-contents') \n guessed_mime_type = mimetypes.guess_type(webSiteFile)\n \n if website_key.startswith('../'):\n file_key = website_key[len('../'):]\n else:\n file_key = website_key\n \n print('Key being uploaded to S3: ' + file_key)\n\n if guessed_mime_type is None:\n raise Exception(\"Failed to guess mimetype\")\n \n mime_type = guessed_mime_type[0] \n \n if mime_type is None:\n mime_type = 'binary/octet-stream'\n \n s3.Bucket(target_bucket).put_object(\n Key=file_key, \n Body=encoded_string,\n ContentType=mime_type\n )\n\n print(f'{file_key} uploaded to {target_bucket}')\n\n print(f'Admin website deployed successfully to {target_bucket} bucket') \n except ClientError as ex: \n print(f'Target Bucket {target_bucket} with error: {ex}') \n cfnResponse.send(event, context, cfnResponse.FAILED, {}, \"CustomResourcePhysicalID\")",
"def create_or_update_s3_bucket(aws_account, lambda_zip_filename):\n # ensure S3 bucket exists\n s3_client = boto3.client('s3', region_name=DEFAULT_REGION)\n ebs_bucket_name = 'ebs-snapper-{}'.format(aws_account)\n LOG.info(\"Creating S3 bucket %s if it doesn't exist\", ebs_bucket_name)\n s3_client.create_bucket(\n ACL='private',\n Bucket=ebs_bucket_name)\n\n # upload files to S3 bucket\n LOG.info(\"Uploading files into S3 bucket\")\n upload_files = ['cloudformation.json', lambda_zip_filename]\n for filename in upload_files:\n\n local_hash = None\n try:\n local_hash = md5sum(filename).strip('\"')\n except:\n raise\n\n try:\n # check if file in bucket is already there and up to date\n object_summary = s3_client.get_object(Bucket=ebs_bucket_name, Key=filename)\n\n remote_hash = object_summary['ETag'].strip('\"')\n\n LOG.debug(\"Local file MD5 sum: \" + local_hash)\n LOG.debug(\"ETag from AWS: \" + remote_hash)\n\n if local_hash == remote_hash:\n LOG.info(\"Skipping upload of %s, already up-to-date in S3\", filename)\n continue\n except:\n LOG.info(\"Failed to checksum remote file %s, uploading it anyway\", filename)\n\n with open(filename, 'rb') as data:\n LOG.info('Uploading %s to bucket %s', filename, ebs_bucket_name)\n s3_client.put_object(Bucket=ebs_bucket_name, Key=filename, Body=data)\n\n return ebs_bucket_name",
"def build_lambda():\n\n try:\n os.system(\"mkdir -p ./build\")\n os.system(\"cp -r ./lambda ./build\")\n os.system(\"pip3 install -r ./build/lambda/requirements.txt -t ./build/lambda\")\n shutil.make_archive(\"./build/lambda\", 'zip', \"./build/lambda\")\n os.system(\"rm -rf ./build/lambda\")\n\n print(\"Lambda deployment package built!\")\n\n except Exception as e:\n print(f\"Error building deployment package. Exception: {e}.\")",
"def upload_artifacts(file_name, bucket_name, object_name):\n print('Uploading artifacts to {}/{}'.format(bucket_name, object_name))\n try:\n s3_client.upload_file(file_name, bucket_name, object_name)\n except Exception as e:\n print('Failed to upload artifacts\\nException: {}'.format(e))\n sys.exit(1)\n print('Successfully uploaded artifacts')",
"def upload_bootstrapped_file(self):\n AWSApi.instance().s3.put_object(\n bucket_name=self.name,\n body=\"bucket is configured successfully.\",\n key=\"/\".join([self._root_directory, self._bootstrapped_file_name]),\n )",
"def _generate_lambda(appName, _lambda, roleARN, config, projPath):\n\n if( not os.path.exists(projPath+'/.tmp') ):\n os.mkdir(projPath+'/.tmp')\n\n if( not os.path.isfile(projPath+'/.tmp/dist.zip') ):\n AWSSetup._compress_app_package(\n projPath+'/.tmp/dist',\n projPath+'/.tmp/dist.zip',\n ['.git/']\n )\n\n funcName = appName+'-uxy-app-'+config['app:stage']\n zipFile = open(projPath+'/.tmp/dist.zip', 'rb')\n zipFileBin = zipFile.read()\n zipFile.close()\n\n statusCode = AWSSetup._function_exists(funcName, _lambda)\n if( statusCode == AWSSetup.FUNCTION_NOT_FOUND ):\n runtime = None\n if( config['app:runtime'] == 'go' ):\n runtime = 'go1.x'\n if( config['app:runtime'] == 'python' ):\n runtime = 'python3.9'\n\n AWSSetup._log(\"+ Creating lambda function...\")\n AWSSetup._log(\"+ Runtime: \"+runtime)\n response = _lambda.create_function(\n FunctionName = funcName,\n Runtime = runtime,\n Role = roleARN,\n Handler = config['aws:config']['lambda:handler'],\n Code = {\n 'ZipFile' : zipFileBin\n },\n Timeout = config['aws:config']['lambda:timeout']\n )\n AWSSetup._log(\"=> Lambda package deployed\")\n AWSSetup._add_function_permission(appName, _lambda, config)\n elif ( statusCode == AWSSetup.FUNCTION_FOUND ):\n AWSSetup._log('+ Updating lambda function...')\n response = _lambda.update_function_code(\n FunctionName = funcName,\n ZipFile = zipFileBin\n )\n AWSSetup._log(\"=> Lambda package deployed\")\n AWSSetup._add_function_permission(appName, _lambda, config)\n else:\n AWSSetup._log('=> ERROR: error getting lambda function')\n response = {}\n\n\n return response",
"def package_deploy(ctx):\n ctx.run(\"twine upload dist/*\")",
"def s3bucket(ec2, env, source):\n\tmime_types = {\n\t\t\"eot\" : \"application/vnd.ms-fontobject\",\n\t\t\"ttf\" : \"font/truetype\",\n\t\t\"otf\" : \"font/opentype\",\n\t\t\"woff\": \"font/woff\",\n\t}\n\ts3b = boto.connect_s3(ec2.access_key,ec2.secret_key)\n\tfor machine in env:\n\t\tif 's3bucket' in machine.keys():\n\t\t\tprint 'Copying static media for %s' % machine['name']\n\t\t\ts3bucket = machine['s3bucket']\n\n\t\t\t# Get the expires\n\t\t\ttime_format = '%a, %d %b %Y %H:%M:%S'\n\t\t\tnow = datetime.datetime.now().strftime(time_format)\n\t\t\texpires = s3bucket.get('expires',datetime.datetime.utcnow().strftime(time_format))\n\t\t\ttry:\n\t\t\t\tdatetime.datetime.strptime(expires,time_format)\n\t\t\texcept:\n\t\t\t\terror('Improperly formatted datetime: %s' % expires)\n\n\t\t\t# Get or create bucket using the name\n\t\t\tname = s3bucket.get('name','s3%s'%machine['name'])\n\t\t\ttry: b = s3b.get_bucket(name)\n\t\t\texcept: b = s3b.create_bucket(name)\n\t\t\t\n\t\t\t# Set ACL Public for all items in the bucket\n\t\t\tb.set_acl('public-read')\n\n\t\t\tk = Key(b)\n\t\t\tstatic_dir = os.path.join(source,'project','static')\n\t\t\tfor root, dirs, files in os.walk(static_dir):\n\t\t\t\tif '.svn' in dirs: dirs.remove('.svn')\n\t\t\t\tkey_root = root.split('static')[1]\n\n\t\t\t\tfor file in files:\n\t\t\t\t\tfilename = os.path.join(root,file)\n\n\t\t\t\t\t# Set the headers\n\t\t\t\t\theaders = {'Expires':expires}\n\t\t\t\t\tif '.gz' in file:\n\t\t\t\t\t\theaders.update({'Content-Encoding':'gzip'})\n\n\t\t\t\t\tif os.path.isfile(filename):\n\t\t\t\t\t\t# Set the mime-type\n\t\t\t\t\t\text = file.split('.')[-1]\n\t\t\t\t\t\tif ext in mime_types.keys():\n\t\t\t\t\t\t\tk.content_type = mime_types[ext]\n\n\t\t\t\t\t\t# Send the file\n\t\t\t\t\t\tk.key = os.path.join(key_root,file)\n\t\t\t\t\t\tprint '\\nTransfering %s' % filename\n\t\t\t\t\t\tk.set_contents_from_filename(filename, headers=headers, cb=s3_percent_cb, num_cb=10)\n\t\t\tprint '\\nTransfer complete'\n\n\tinvalidate_cache(ec2, env, source)",
"def lambda_handler(event, context):\n\n\n record_gen = fetch_record(event)\n image_dir = os.environ.get(\"IMAGE_DIR\", \"/tmp\")\n\n client = boto3.client(\"s3\", endpoint_url=os.environ.get(\"S3_ENDPOINT\", None))\n\n try:\n for bucket, objkey in record_gen:\n # downalod\n ret = download_file(client, bucket, objkey, image_dir)\n if ret is not None:\n return json.dumps(ret)\n \n # create thumbnail\n ret = create_thumbnail(objkey, image_dir)\n if ret is not None:\n return json.dumps(ret)\n\n ret = upload_file(client, bucket, objkey, image_dir)\n if ret is not None:\n return json.dumps(ret)\n finally:\n filename = os.path.join(image_dir, objkey.split(\"/\")[-1])\n if os.path.exists(filename):\n os.remove(filename)\n\n return json.dumps({\"status\" : 200})",
"def create_bucket():\n\n s3 = session.resource('s3')\n\n try:\n s3.create_bucket(Bucket=f\"lambda-source-{os.environ['AWS_ACCOUNT']}\", ACL='private')\n print('Created S3 bucket!')\n\n except Exception as e:\n print(f\"Error creating S3 bucket. Exception: {e}.\")",
"def sync_s3():\n\n subprocess.run([\"aws\", \"s3\", \"sync\", \"./\", \"s3://{0}\".format(BLOG_BUCKET_NAME)])",
"def deploy():\n require('settings', provided_by=[production, staging])\n\n render()\n _gzip_www()\n _deploy_to_s3()",
"def download_lambda_handler(event: Dict[str, Any], _) -> str:\n LOGGER.info('Invoked with event %s', event)\n\n binary = CARBON_BLACK.select(Binary, event['md5'])\n download_path = _download_from_carbon_black(binary)\n metadata = _build_metadata(binary)\n s3_object_key = _upload_to_s3(binary.md5, download_path, metadata)\n\n # Truncate and remove the downloaded file (os.remove does not work as expected in Lambda).\n with open(download_path, 'w') as file:\n file.truncate()\n os.remove(download_path)\n\n return s3_object_key",
"def _UploadToStageBucket(region, function_name, zip_file_path, stage_bucket):\n dest_object = storage_util.ObjectReference.FromBucketRef(\n storage_util.BucketReference.FromArgument(stage_bucket),\n '{}-{}-{}.zip'.format(\n region, function_name,\n ''.join(random.choice(string.ascii_lowercase) for _ in range(12))))\n storage_api.StorageClient().CopyFileToGCS(zip_file_path, dest_object)\n return dest_object"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Publishes a new Lambda version | def update_lambda():
client = session.client('lambda')
try:
client.update_function_code(
FunctionName='process_csv',
S3Key='lambda.csv',
S3Bucket=f"lambda-source-{os.environ['AWS_ACCOUNT']}",
Publish=True
)
print("Lambda function published!")
except Exception as e:
print(f"Error publishing lambda. Exception: {e}.") | [
"def upload_lambda():\n\n s3 = session.resource('s3')\n\n try:\n s3.Bucket(f\"lambda-source-{os.environ['AWS_ACCOUNT']}\").upload_file('./build/lambda.zip', 'lambda.zip')\n print(\"Lambda deployment package uploaded to S3!\")\n\n except Exception as e:\n print(f\"Error uploading deployment package. Exception: {e}.\")",
"def update_lambda(self):\n\n try:\n AWSSetup._update_lambda(self.appName, self._lambda, self.config)\n except Exception as e:\n AWSSetup._log(str(e))\n AWSSetup._log(\"Failed to update application code.\")",
"def build_lambda():\n\n try:\n os.system(\"mkdir -p ./build\")\n os.system(\"cp -r ./lambda ./build\")\n os.system(\"pip3 install -r ./build/lambda/requirements.txt -t ./build/lambda\")\n shutil.make_archive(\"./build/lambda\", 'zip', \"./build/lambda\")\n os.system(\"rm -rf ./build/lambda\")\n\n print(\"Lambda deployment package built!\")\n\n except Exception as e:\n print(f\"Error building deployment package. Exception: {e}.\")",
"def update_function_and_version(ebs_bucket_name, lambda_zip_filename):\n lambda_client = boto3.client('lambda', region_name=DEFAULT_REGION)\n lambda_function_list = lambda_client.list_functions()\n lambda_function_map = dict()\n for entry in lambda_function_list['Functions']:\n if 'ebs-snapper' in entry['FunctionName']:\n lambda_function_map[entry['FunctionName']] = entry\n\n if len(lambda_function_map.keys()) > 0:\n LOG.info(\"EBS Snapper functions found: %s\", lambda_function_map.keys())\n else:\n LOG.warn('No EBS snapshot functions were found.')\n LOG.warn('Please check that EBS snapper stack exists on this account.')\n\n bytes_read = open(lambda_zip_filename, \"rb\").read()\n existing_hash = base64.b64encode(hashlib.sha256(bytes_read).digest())\n\n # publish new version / activate them\n for function_name in lambda_function_map.keys():\n # cleanup opportunity, only retain last 2 versions\n versions_found = []\n version_list = lambda_client.list_versions_by_function(FunctionName=function_name)\n for function_info in version_list['Versions']:\n if function_info['Version'] == '$LATEST':\n continue\n\n versions_found.append(long(function_info['Version']))\n\n if len(versions_found) > 2:\n LOG.warn('Found more than 2 old versions of EBS Snapper. Cleaning.')\n try:\n # take off those last 2\n versions_found.sort()\n versions_found.pop()\n versions_found.pop()\n\n for v in versions_found:\n LOG.warn('Removing %s function version %s...',\n function_name,\n str(v))\n lambda_client.delete_function(\n FunctionName=function_name,\n Qualifier=str(v)\n )\n except:\n LOG.warn('EBS Snapper cleanup failed!')\n\n new_hash = lambda_function_map[function_name]['CodeSha256']\n\n if existing_hash == new_hash:\n LOG.info('Skipping %s, as it is already up to date', function_name)\n continue\n\n update_response = lambda_client.update_function_code(\n FunctionName=function_name,\n S3Bucket=ebs_bucket_name,\n S3Key=lambda_zip_filename,\n Publish=True\n )\n LOG.info(\"Updated function code for %s: %s\",\n function_name, update_response['ResponseMetadata'])\n\n publish_response = lambda_client.publish_version(\n FunctionName=function_name,\n CodeSha256=update_response['CodeSha256'],\n Description=str(ebs_snapper.__version__)\n )\n LOG.info(\"Published new version for %s: %s\",\n function_name, publish_response['ResponseMetadata'])",
"def _generate_lambda(appName, _lambda, roleARN, config, projPath):\n\n if( not os.path.exists(projPath+'/.tmp') ):\n os.mkdir(projPath+'/.tmp')\n\n if( not os.path.isfile(projPath+'/.tmp/dist.zip') ):\n AWSSetup._compress_app_package(\n projPath+'/.tmp/dist',\n projPath+'/.tmp/dist.zip',\n ['.git/']\n )\n\n funcName = appName+'-uxy-app-'+config['app:stage']\n zipFile = open(projPath+'/.tmp/dist.zip', 'rb')\n zipFileBin = zipFile.read()\n zipFile.close()\n\n statusCode = AWSSetup._function_exists(funcName, _lambda)\n if( statusCode == AWSSetup.FUNCTION_NOT_FOUND ):\n runtime = None\n if( config['app:runtime'] == 'go' ):\n runtime = 'go1.x'\n if( config['app:runtime'] == 'python' ):\n runtime = 'python3.9'\n\n AWSSetup._log(\"+ Creating lambda function...\")\n AWSSetup._log(\"+ Runtime: \"+runtime)\n response = _lambda.create_function(\n FunctionName = funcName,\n Runtime = runtime,\n Role = roleARN,\n Handler = config['aws:config']['lambda:handler'],\n Code = {\n 'ZipFile' : zipFileBin\n },\n Timeout = config['aws:config']['lambda:timeout']\n )\n AWSSetup._log(\"=> Lambda package deployed\")\n AWSSetup._add_function_permission(appName, _lambda, config)\n elif ( statusCode == AWSSetup.FUNCTION_FOUND ):\n AWSSetup._log('+ Updating lambda function...')\n response = _lambda.update_function_code(\n FunctionName = funcName,\n ZipFile = zipFileBin\n )\n AWSSetup._log(\"=> Lambda package deployed\")\n AWSSetup._add_function_permission(appName, _lambda, config)\n else:\n AWSSetup._log('=> ERROR: error getting lambda function')\n response = {}\n\n\n return response",
"def deploy(function_name=None, arn=None):\n if not function_name:\n abort('Must provide function_name')\n if not arn:\n abort('Must provide arn')\n\n lambda_root = os.path.join(LAMBDA_DIR, function_name)\n builds_dir = os.path.join(lambda_root, BUILDS_SUBDIR)\n builds = sorted(glob.glob(os.path.join(builds_dir, '*.{0}'.format('zip'))))\n if not builds:\n abort('No builds exist. Run a `build` command first')\n latest_build = builds[-1]\n logging.info('Preparing to deploy build: {0}'.format(latest_build))\n\n client = boto3.client('lambda')\n with open(latest_build, 'rb') as zip_file:\n response = client.update_function_code(FunctionName=arn, ZipFile=zip_file.read())\n logger.info(json.dumps(response, indent=2))",
"def lambda_handler(event, context):\n \n try:\n repo_name = event['Records'][0]['eventSourceARN'].split(':')[-1]\n reference = event['Records'][0]['codecommit']['references'][0]\n commit_id = reference['commit']\n ref = os.path.split(reference[\"ref\"])\n root = os.path.basename(ref[0])\n created = reference.get(\"created\")\n deleted = reference.get(\"deleted\")\n if created and root == \"heads\" and ref[1] and ref[1] != \"master\":\n data = json.loads(event['Records'][0]['customData'])\n logger.info('Putting updates trigger for branch %s' % ref[1])\n put_trigger(repo_name, ref[1], data)\n pipeline_name = data[\"pipeline_name\"]\n bucket = data[\"bucket\"]\n logger.info('Getting and archiving codecommit repository content')\n codecommit = AWSCodeCommit(cc_client, repo_name, logger)\n commit_info = cc_client.get_commit(\n repositoryName=repo_name, \n commitId=commit_id\n )\n commit_info['commit']['branchName'] = ref[1]\n commit_info['commit']['RepositoryName'] = repo_name\n codecommit.archive(commit_id, {\"commit_info.json\": json.dumps(commit_info, indent=4)})\n s3_client.put_object(Bucket=bucket,\n Key=\"artifacts/%s\" % pipeline_name,\n Body=codecommit.content)\n logger.info('Starting pipeline execution')\n cp_client.start_pipeline_execution(name=pipeline_name)\n if deleted and root == \"heads\" and ref[1] and ref[1] != \"master\":\n logger.info('Poping updates trigger for branch %s' % ref[1])\n pop_trigger(repo_name, ref[1])\n except Exception as e:\n logger.exception(\"An error occured when processing codecommit trigger event : %s\" % str(e), exc_info=1)",
"def commit_release():\n print blue(\"Deploying new release\")\n env.releases.commit()",
"def task_update(mod, func, cache):\n\n cli = boto3.client('lambda')\n\n archive_contents = build_archive(mod, cache)\n\n try:\n res = cli.update_function_code(FunctionName=func,\n ZipFile=archive_contents)\n except Exception as e:\n fatal(\"Operation failed: {}\".format(e))\n\n logging.info(\"Operation completed: {}\".format(res[\"FunctionArn\"]))",
"def download_lambda_handler(event: Dict[str, Any], _) -> str:\n LOGGER.info('Invoked with event %s', event)\n\n binary = CARBON_BLACK.select(Binary, event['md5'])\n download_path = _download_from_carbon_black(binary)\n metadata = _build_metadata(binary)\n s3_object_key = _upload_to_s3(binary.md5, download_path, metadata)\n\n # Truncate and remove the downloaded file (os.remove does not work as expected in Lambda).\n with open(download_path, 'w') as file:\n file.truncate()\n os.remove(download_path)\n\n return s3_object_key",
"def lambda_handler(event, context):\n set_logging(level=logging.INFO)\n logging.debug(\"Initiating public EBS snapshots checking\")\n\n try:\n sns_arn = os.environ[\"SNS_ARN\"]\n config = Config()\n\n if not config.ebsSnapshot.enabled:\n logging.debug(\"Public EBS snapshots checking disabled\")\n return\n\n logging.debug(\"Iterating each account to initiate EBS snapshots checking\")\n for account_id, account_name in config.ebsSnapshot.accounts.items():\n payload = {\"account_id\": account_id,\n \"account_name\": account_name,\n \"regions\": config.aws.regions,\n \"sns_arn\": sns_arn\n }\n logging.debug(f\"Initiating public EBS snapshots checking for '{account_name}'\")\n Sns.publish(sns_arn, payload)\n except Exception:\n logging.exception(\"Error occurred while initiation of public EBS snapshots checking\")\n return\n\n logging.debug(\"Public EBS snapshot checking initiation done\")",
"def release():\n shell(\"python setup.py register\")\n shell(\"python setup.py sdist upload\")",
"def lambda_handler(event, context):\n # First, get access token\n access_token = soundprintutils.get_access_token()\n\n # Initialize Spotify client and query tracks played in the last hour\n spotify = tk.Spotify(access_token)\n current_timestamp_ms = int(datetime.now(tz=timezone.utc).timestamp() * 1000)\n snapshot_begin_timestamp_ms = current_timestamp_ms - 3600*1000\n tracks_df = get_tracks_played_after(spotify, snapshot_begin_timestamp_ms)\n\n # Calculate time spent in listening to each track\n tracks_df = update_listened_to_durations(tracks_df, current_timestamp_ms)\n\n # Upload to S3 as a CSV\n dt = datetime.fromtimestamp(current_timestamp_ms/1000, tz=timezone.utc)\n s3_file_name = f\"{ListenerCommon.FILE_PATH_PREFIX}{dt.year}/{dt.month}/{dt.day}/\" \\\n f\"{dt.hour}-{dt.day}-{dt.month}-{dt.year}.csv\"\n soundprintutils.upload_df_to_s3_csv(df=tracks_df, include_index=False, file_name=s3_file_name)\n\n return s3_file_name",
"def release():\n local(\"twine upload dist/*\")",
"def deploy_app():\r\n upload_and_explode_code_bundle()\r\n symlink_current_release()",
"def deploy(ctx):\n ctx.run(\"rm -rf build/* dist/*\")\n # ctx.run(\"bumpversion {bump} --verbose\")\n ctx.run(\"python3 setup.py sdist bdist_wheel\")\n ctx.run(\"python3 -m twine check dist/*\")\n ctx.run(\"python3 -m twine upload dist/*\")\n # ctx.run(\"git push origin --tags\")\n # ctx.run(\"git push kristy --tags\")",
"def APIGWProxy(app: Flask):\n return lambda event, context: aws_lambda_wsgi.response(app, event, context)",
"def build_archive(mod, cache):\n\n mod_pathname = os.path.abspath(os.path.dirname(__file__) + \"/../lambdas/{}.py\".format(mod))\n awsflow_basedir = os.path.abspath(os.path.dirname(__file__) + \"/../../\")\n\n pkg_dir_suffix = \".lambda\"\n\n if cache:\n # Instead of generating a new temporary directory, reuse the existing one if existing,\n # so that we can avoid re-downloading all the dependencies again. this saves lots of time.\n # The cache is valid for any lamda function defined internally in the awsflow package.\n pkg_dir = \"/tmp/awsflow{}-{}\".format(pkg_dir_suffix, cache)\n\n # check if package directory is empty.\n pkg_dir_empty = not os.path.exists(pkg_dir)\n\n # make sure that the directory exists.\n local(\"mkdir -p {}\".format(pkg_dir))\n else:\n pkg_dir = mkdtemp(pkg_dir_suffix)\n\n logging.info(\"Assembling archive for lambda function ...\")\n\n local('cp {mod_pathname} {pkg_dir}'.format(mod_pathname=mod_pathname, pkg_dir=pkg_dir))\n\n if not cache or pkg_dir_empty:\n local('pip-3.6 install {awsflow_basedir} --find-links {awsflow_basedir} --target {pkg_dir} --upgrade'.format(\n awsflow_basedir=awsflow_basedir, pkg_dir=pkg_dir))\n else:\n logging.info(\"Using cached package directory\")\n\n local('cp -r {awsflow_basedir}/awsflow {pkg_dir}'.format(awsflow_basedir=awsflow_basedir,\n pkg_dir=pkg_dir))\n make_archive(base_name=pkg_dir, format='zip', root_dir=pkg_dir)\n\n logging.info(\"Archive ready.\")\n\n archive_contents = open('{}.zip'.format(pkg_dir), \"rb\").read()\n\n if not cache:\n local(\"rm -rf {pkg_dir}.zip {pkg_dir}\".format(pkg_dir=pkg_dir))\n\n return archive_contents",
"def deploy_new_revision():\n try:\n client = boto3.client('codedeploy', config=BOTO3_CONFIG))\n except ClientError as err:\n print(\"Failed to create boto3 client.\\n\" + str(err))\n return False\n\n try:\n response = client.create_deployment(\n applicationName=str(os.getenv('APPLICATION_NAME')),\n deploymentGroupName=str(os.getenv('DEPLOYMENT_GROUP_NAME')),\n revision={\n 'revisionType': 'S3',\n 's3Location': {\n 'bucket': os.getenv('S3_BUCKET'),\n 'key': BUCKET_KEY,\n 'bundleType': 'zip'\n }\n },\n deploymentConfigName=str(os.getenv('DEPLOYMENT_CONFIG')),\n description='New deployment from BitBucket',\n ignoreApplicationStopFailures=True\n )\n except ClientError as err:\n print(\"Failed to deploy application revision.\\n\" + str(err))\n return False \n \n \"\"\"\n Wait for deployment to complete\n \"\"\"\n while 1:\n try:\n deploymentResponse = client.get_deployment(\n deploymentId=str(response['deploymentId'])\n )\n deploymentStatus=deploymentResponse['deploymentInfo']['status']\n if deploymentStatus == 'Succeeded':\n print (\"Deployment Succeeded\")\n return True\n elif (deploymentStatus == 'Failed') or (deploymentStatus == 'Stopped') :\n print (\"Deployment Failed\")\n return False\n elif (deploymentStatus == 'InProgress') or (deploymentStatus == 'Queued') or (deploymentStatus == 'Created'):\n deploymentCounter += 1\n deploymentDelay = (deploymentCounter * DEPLOYMENT_BACKOFF_SECS)\n print(\"Deployment \" + deploymentStatus + \" (Exponential back off \" + str(deploymentDelay) + \"s)\")\n sleep(deploymentDelay)\n continue\n except ClientError as err:\n print(\"Failed to deploy application revision.\\n\" + str(err))\n return False \n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates S3 Bucket for Lambda source code | def create_bucket():
s3 = session.resource('s3')
try:
s3.create_bucket(Bucket=f"lambda-source-{os.environ['AWS_ACCOUNT']}", ACL='private')
print('Created S3 bucket!')
except Exception as e:
print(f"Error creating S3 bucket. Exception: {e}.") | [
"def create_bucket(self):\n AWSApi.instance().s3.create_bucket(bucket_name=self.name, region=self.region)",
"def bucket_create():\r\n conn = connect_s3()\r\n bucket = conn.create_bucket(BUCKET_NAME, policy='public-read')\r\n bucket.configure_website('index.html', 'error.html')\r\n print 'Bucket %r created.' % BUCKET_NAME",
"def create_asset_bucket(self):\n return s3.Bucket(\n self,\n self.config.get(\"stack_name\") + \"_s3\",\n removal_policy=core.RemovalPolicy.DESTROY\n )",
"def create_bucket(name):\r\n s3.create_bucket(Bucket=name)",
"def s3bucket(ec2, env, source):\n\tmime_types = {\n\t\t\"eot\" : \"application/vnd.ms-fontobject\",\n\t\t\"ttf\" : \"font/truetype\",\n\t\t\"otf\" : \"font/opentype\",\n\t\t\"woff\": \"font/woff\",\n\t}\n\ts3b = boto.connect_s3(ec2.access_key,ec2.secret_key)\n\tfor machine in env:\n\t\tif 's3bucket' in machine.keys():\n\t\t\tprint 'Copying static media for %s' % machine['name']\n\t\t\ts3bucket = machine['s3bucket']\n\n\t\t\t# Get the expires\n\t\t\ttime_format = '%a, %d %b %Y %H:%M:%S'\n\t\t\tnow = datetime.datetime.now().strftime(time_format)\n\t\t\texpires = s3bucket.get('expires',datetime.datetime.utcnow().strftime(time_format))\n\t\t\ttry:\n\t\t\t\tdatetime.datetime.strptime(expires,time_format)\n\t\t\texcept:\n\t\t\t\terror('Improperly formatted datetime: %s' % expires)\n\n\t\t\t# Get or create bucket using the name\n\t\t\tname = s3bucket.get('name','s3%s'%machine['name'])\n\t\t\ttry: b = s3b.get_bucket(name)\n\t\t\texcept: b = s3b.create_bucket(name)\n\t\t\t\n\t\t\t# Set ACL Public for all items in the bucket\n\t\t\tb.set_acl('public-read')\n\n\t\t\tk = Key(b)\n\t\t\tstatic_dir = os.path.join(source,'project','static')\n\t\t\tfor root, dirs, files in os.walk(static_dir):\n\t\t\t\tif '.svn' in dirs: dirs.remove('.svn')\n\t\t\t\tkey_root = root.split('static')[1]\n\n\t\t\t\tfor file in files:\n\t\t\t\t\tfilename = os.path.join(root,file)\n\n\t\t\t\t\t# Set the headers\n\t\t\t\t\theaders = {'Expires':expires}\n\t\t\t\t\tif '.gz' in file:\n\t\t\t\t\t\theaders.update({'Content-Encoding':'gzip'})\n\n\t\t\t\t\tif os.path.isfile(filename):\n\t\t\t\t\t\t# Set the mime-type\n\t\t\t\t\t\text = file.split('.')[-1]\n\t\t\t\t\t\tif ext in mime_types.keys():\n\t\t\t\t\t\t\tk.content_type = mime_types[ext]\n\n\t\t\t\t\t\t# Send the file\n\t\t\t\t\t\tk.key = os.path.join(key_root,file)\n\t\t\t\t\t\tprint '\\nTransfering %s' % filename\n\t\t\t\t\t\tk.set_contents_from_filename(filename, headers=headers, cb=s3_percent_cb, num_cb=10)\n\t\t\tprint '\\nTransfer complete'\n\n\tinvalidate_cache(ec2, env, source)",
"def create_bucket(bucket_name):\n print('Creating artifacts bucket {}'.format(bucket_name))\n if bucket_exists(bucket_name):\n print('Bucket {} already exists'.format(bucket_name))\n return\n try:\n if args.region is None or args.region == \"us-east-1\":\n s3_client.create_bucket(Bucket=bucket_name)\n else:\n location = {'LocationConstraint': args.region}\n s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=location)\n except Exception as e:\n print('Failed to create artifacts bucket\\nException: {}'.format(e))\n sys.exit(1)\n print('Successfully created artifacts bucket')",
"def test_s3_bucket_creation():\n s3 = boto3.resource(\"s3\") # Will use Localstack\n assert len(list(s3.buckets.all())) == 0\n bucket = s3.Bucket(\"foobar\")\n bucket.create()",
"def test_s3_bucket_creation(self, noobaa_obj, created_buckets):\n\n bucketname = create_unique_resource_name(self.__class__.__name__.lower(), 's3-bucket')\n logger.info(f'Creating new bucket - {bucketname}')\n created_buckets.append(noobaa_obj.s3_create_bucket(bucketname=bucketname))",
"def amazon_bucket(self):\n try:\n s3_connection = S3Connection(AMAZON_ACCESS_KEY, AMAZON_SECRET_KEY)\n except Exception as e:\n raise StandardError(\"The attempt to connect amazon s3 cloud has been failed\")\n\n try:\n print S3_BUCKET_NAME\n bucket = s3_connection.get_bucket(S3_BUCKET_NAME)\n \n except S3ResponseError as e:\n print \"The bucket you are trying to connect doesnt exists yet, \\\n Trying to create the bucket required to store the relevant images\"\n bucket = s3_connection.create_bucket(S3_BUCKET_NAME)\n\n return bucket",
"def _deploy_to_s3():\n s3cmd = 's3cmd -P --add-header=Cache-Control:max-age=5 --guess-mime-type --recursive --exclude-from gzip_types.txt put gzip/ %s'\n s3cmd_gzip = 's3cmd -P --add-header=Cache-Control:max-age=5 --add-header=Content-encoding:gzip --guess-mime-type --recursive --exclude \"*\" --include-from gzip_types.txt put gzip/ %s'\n\n for bucket in env.s3_buckets:\n env.s3_bucket = bucket\n local(s3cmd % ('s3://%(s3_bucket)s/' % env))\n local(s3cmd_gzip % ('s3://%(s3_bucket)s/' % env))",
"def create_or_update_s3_bucket(aws_account, lambda_zip_filename):\n # ensure S3 bucket exists\n s3_client = boto3.client('s3', region_name=DEFAULT_REGION)\n ebs_bucket_name = 'ebs-snapper-{}'.format(aws_account)\n LOG.info(\"Creating S3 bucket %s if it doesn't exist\", ebs_bucket_name)\n s3_client.create_bucket(\n ACL='private',\n Bucket=ebs_bucket_name)\n\n # upload files to S3 bucket\n LOG.info(\"Uploading files into S3 bucket\")\n upload_files = ['cloudformation.json', lambda_zip_filename]\n for filename in upload_files:\n\n local_hash = None\n try:\n local_hash = md5sum(filename).strip('\"')\n except:\n raise\n\n try:\n # check if file in bucket is already there and up to date\n object_summary = s3_client.get_object(Bucket=ebs_bucket_name, Key=filename)\n\n remote_hash = object_summary['ETag'].strip('\"')\n\n LOG.debug(\"Local file MD5 sum: \" + local_hash)\n LOG.debug(\"ETag from AWS: \" + remote_hash)\n\n if local_hash == remote_hash:\n LOG.info(\"Skipping upload of %s, already up-to-date in S3\", filename)\n continue\n except:\n LOG.info(\"Failed to checksum remote file %s, uploading it anyway\", filename)\n\n with open(filename, 'rb') as data:\n LOG.info('Uploading %s to bucket %s', filename, ebs_bucket_name)\n s3_client.put_object(Bucket=ebs_bucket_name, Key=filename, Body=data)\n\n return ebs_bucket_name",
"def create_bucket(self):\n # Cohesity doesn't allow to create a bucket natively from s3 client.\n # response = s3_client.create_bucket(Bucket='my-bucket')\n\n # We create a view with s3Only access, since if it's multiprotocol,\n # bucket becomes readonly access for s3.\n body = View()\n body.view_box_id = self._get_storage_domain_id()\n body.name = BUCKET_NAME\n body.protocol_access = ProtocolAccessEnum.KS3ONLY\n self.cohesity_client.views.create_view(body)\n print(\"Bucket %s created on Cohesity.\" % BUCKET_NAME)",
"def create_bucket(name, policy=None):\n s3 = boto3.client('s3')\n\n s3.create_bucket(Bucket=bucket)\n print(\"S3 bucket %s created.\" % bucket)\n\n if policy:\n s3.put_bucket_policy(\n Bucket=bucket,\n Policy=json.dumps(bucketPolicy)\n )\n print(\"Policy attached to S3 bucket.\")\n\n return bucket",
"def create_bucket(bucket_name, region=None):\n logging.info(\"creating bucket %s, %s\", bucket_name, region)\n # Create bucket\n bucket = None\n\n try:\n if region is None:\n s3_client = boto3.client('s3')\n\n bucket = s3_client.create_bucket(Bucket=bucket_name)\n\n else:\n s3_client = boto3.client('s3', region_name=region)\n location = {'LocationConstraint': region}\n bucket = s3_client.create_bucket(Bucket=bucket_name,\n CreateBucketConfiguration=location)\n except ClientError as e:\n logging.error(e)\n\n return bucket",
"def create_bucket():\n # GATHER NAMING INFORMATION\n first_name = input('Enter your first name: ').lower()\n last_name = input('Enter your last name: ').lower()\n ran_num = f'{randint(100000, 999999)}'\n bucket_name = f'{first_name}{last_name}{ran_num}'\n\n if len(f'{first_name}{last_name}') == 0:\n input('No name detected. Press enter to go back to the main menu.')\n return\n\n # CREATE BUCKET\n s3.create_bucket(Bucket=bucket_name)\n\n # CONFIRMATION\n if s3.Bucket(bucket_name) in s3.buckets.all():\n print(f'Bucket \\'{bucket_name}\\' created successfully!\\n')\n else:\n print('Uh oh. Something went wrong...\\n')\n\n input('Press enter to continue.\\n')",
"def upload_lambda():\n\n s3 = session.resource('s3')\n\n try:\n s3.Bucket(f\"lambda-source-{os.environ['AWS_ACCOUNT']}\").upload_file('./build/lambda.zip', 'lambda.zip')\n print(\"Lambda deployment package uploaded to S3!\")\n\n except Exception as e:\n print(f\"Error uploading deployment package. Exception: {e}.\")",
"def delete_bucket():\n\n s3 = session.resource('s3')\n\n try:\n bucket = s3.Bucket(f\"lambda-source-{os.environ['AWS_ACCOUNT']}\")\n bucket.objects.all().delete()\n bucket.delete()\n print('Deleted S3 bucket!')\n\n except Exception as e:\n print(f\"Error deleting S3 bucket. Exception: {e}.\")",
"def create_s3(self, name, bucket, access_key, secret_access_key, endpoint=None, region=None,\n signature_version=None):\n\n config = {\n 'bucket': bucket,\n 'accessKey': access_key,\n 'secretAccessKey': secret_access_key,\n }\n if endpoint:\n config['endpoint'] = endpoint\n if region:\n config['region'] = region\n if signature_version:\n config['signatureVersion'] = signature_version\n\n storage_provider = models.StorageProvider(\n type='s3',\n name=name,\n config=config,\n )\n\n repository = self.build_repository(repositories.CreateStorageProvider)\n return repository.create(storage_provider)",
"def connect_s3(self):\n self.out('- Connecting to S3 and making bucket.\\n')\n self.s3 = boto.connect_s3()\n self.bucket = self.s3.create_bucket(self.bucket_name)\n self.bucket = self.s3.get_bucket(self.bucket_name)\n self.bucket.set_acl(self.default_acl)\n self.bucket.set_cors(self.default_cors)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes S3 Bucket for Lambda source code | def delete_bucket():
s3 = session.resource('s3')
try:
bucket = s3.Bucket(f"lambda-source-{os.environ['AWS_ACCOUNT']}")
bucket.objects.all().delete()
bucket.delete()
print('Deleted S3 bucket!')
except Exception as e:
print(f"Error deleting S3 bucket. Exception: {e}.") | [
"def bucket_delete():\r\n if not confirm(\"Are you sure you want to delete the bucket %r?\" % BUCKET_NAME):\r\n abort('Aborting at user request.')\r\n conn = connect_s3()\r\n conn.delete_bucket(BUCKET_NAME)\r\n print 'Bucket %r deleted.' % BUCKET_NAME",
"def delete_bucket(self):\n self.s3_client.delete_bucket(Bucket=BUCKET_NAME)\n print(\"Deleted Bucket: %s\" % BUCKET_NAME)",
"def s3cleanup(request):\n s3interface = S3Interface()\n\n deleted = s3interface.delete_all_images()\n print('Deleted %d object(s) from S3 bucket \"%s\" using prefix \"%s\"' % (\n len(deleted), s3interface.bucket_name, s3interface.prefix))",
"def delete(self):\n\n # TODO: Make sure the proper exceptions are raised.\n\n return self.connection.delete_bucket(self.name)",
"def handle_DELETE(request):\n if boto:\n bucket_name = request.REQUEST.get('bucket')\n key_name = request.REQUEST.get('key')\n s3_delete(key_name)\n return make_response(200)\n else:\n return make_response(500)",
"def rm_in_bucket(s3, bucket):\n bucket = s3.Bucket(bucket)\n bucket.objects.all().delete()",
"def delete_files(bucket_name):\n s3 = boto3.resource(\"s3\")\n\n bucket = s3.Bucket(bucket_name)\n for key in bucket.objects.all():\n key.delete()\n # Delete the bucket if we want to \n #bucket.delete()",
"def delete_s3_buckets():\n s3_resource = boto3.resource('s3')\n print('Deleting S3 Buckets')\n for bucket in s3_resource.buckets.all():\n print('Starting object deletion for S3 Bucket {}'.format(bucket.name))\n bucket.object_versions.delete()\n print('Deleting S3 Bucket {}'.format(bucket.name))\n bucket.delete()\n print('S3 Buckets deleted')",
"def delete_file_from_bucket(self):\n self.s3_client.delete_object(Bucket=BUCKET_NAME, Key=FILENAME)\n print(\"File %s deleted from Bucket: %s\" % (FILENAME, BUCKET_NAME))",
"def delete_file(key):\n try:\n s3_bucket.Object(key).delete()\n except Exception as e:\n print(e)",
"def delete_s3_objects(self, bucketName):\n\n s3ObjectList = AWSSetup._list_s3_objects(bucketName, self._s3Client, self.config)\n self._s3Client.delete_objects(\n Bucket = bucketName,\n Delete = {\n 'Objects' : s3ObjectList\n }\n )\n\n self._s3Client.delete_bucket(\n Bucket = bucketName\n )",
"def delete_file( s3_path ):\n\n return _get_bucket().delete_key(s3_path)",
"def test_delete_object_from_s3(self):\n set_up_directories([settings.SRC_DIR])\n object_downloader = self.configure_uploader([\"7d24b2da347b48fe9e59d8c5d4424235.tar\"])\n object_to_delete = \"7d24b2da347b48fe9e59d8c5d4424235.tar\"\n object_downloader.delete_object_from_s3(object_to_delete)\n files_in_bucket = [bucket_object.key for bucket_object in object_downloader.bucket.objects.all()]\n self.assertNotIn(object_to_delete, files_in_bucket)",
"def bucket_delete():\n test_connection = sql_connection()\n if test_connection[0] == 130:\n return redirect(url_for('error', error_str=test_connection[1], error_code=test_connection[0]))\n\n bucket_code = request.form['code']\n check = extract_info(functions.s_buc_table, functions.s_buc_code, bucket_code)\n if check[0] == 228:\n bucket_id = check[1][0][functions.s_buc_id]\n try:\n del_bucket(bucket_id)\n try:\n return render_template('delete.html')\n except:\n return redirect(url_for('error', error_str=sys.exc_info()[1], error_code=render_issue))\n except:\n return redirect(url_for('error', error_str=sys.exc_info()[1], error_code=delete_issue))\n return redirect(url_for('error', error_str=check[1], error_code=check[0]))",
"def empty_bucket(self, name):\n if self.already_exists(name):\n print('Deleting all objects of ' + name)\n bucket = self.s3r.Bucket(name)\n bucket.objects.all().delete()\n # bucket.delete() # only empty bucket",
"def test_delete_empty_bucket(make_stubber, make_unique_name, make_bucket):\n stubber = make_stubber(bucket_wrapper, 'get_s3')\n bucket_name = make_unique_name('bucket')\n\n bucket = make_bucket(stubber, bucket_wrapper, bucket_name, stubber.region_name)\n\n stubber.stub_delete_bucket(bucket_name)\n stubber.stub_head_bucket(bucket_name, 404)\n\n bucket_wrapper.delete_bucket(bucket)",
"def deleteS3files(self):\n s3 = boto3.resource('s3',\n aws_access_key_id=self.s3_key,\n aws_secret_access_key=self.s3_secret)\n bucket = s3.Bucket(self.s3_bucket)\n bucket_files = [x.key for x in bucket.objects.all()]\n delete_objects = []\n if bucket_files:\n for s3_file in bucket_files:\n delete_objects.append({'Key': s3_file})\n try:\n response = bucket.delete_objects(Delete={ 'Objects': delete_objects} )\n except botocore.exceptions.ClientError as e:\n self.logger.error(e)\n self.logger.error(delete_objects)\n return False",
"def delete_s3_storage_controller(self, request):\n try:\n logging.info(f\"Delete S3 storage from Label Studio project\")\n delete_storage_url = (\n f\"{self.label_studio_config.get('s3_storage')}/{request.storage_id}\"\n )\n status_code = APIInterface.delete(\n route=delete_storage_url, headers=self.header\n )\n if status_code == 204:\n return {\"status\": \"Storage Deleted Successfully\"}\n else:\n raise Exception({\"status\": \"Cannot Delete The Storage\"})\n except Exception as error:\n logging.error(f\"Error in delete_s3_storage_controller: {error}\")\n raise error",
"def delete_manifest(name, folder, bucket, function_log):\n action_log = {\n \"action\": \"delete_manifest\",\n \"info\": {\n \"name\": os.path.join(folder, name),\n \"bucket\": bucket\n },\n \"result\": None\n }\n\n try:\n s3.delete_object(Bucket=bucket, Key=os.path.join(folder, name))\n action_log['result'] = \"Success\"\n except ClientError as e:\n action_log['result'] = e.response['Error']['Code']\n \n log_action(function_log, action_log)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
y = getitem(x, sl) | def pb___getitem__(cls, ybar, x, sl, y, out = None):
if out == None:
raise NotImplementedError('I\'m not sure that this makes sense')
# workaround for qr and eigh
if isinstance( out[0], tuple):
tmp = list(out[0])
tmp[sl] += ybar
# usual workflow
else:
# print 'out=\n', out[0][sl]
# print 'ybar=\n',ybar
out[0][sl] = ybar
return out | [
"def get(self, x, y):\n return self.data[(y * self.sx) + x]",
"def __getitem__(self,j):\r\n return self._coords[j]",
"def get_item(self, identifier):",
"def __getitem__(self, item):\n if isinstance(item, tuple):\n if len(item) != 3:\n raise KeyError(f\"Tuple item must be of length 3, got {len(item)}\")\n if all(isinstance(i, Integer) for i in item):\n x, y, z = tuple(\n self._relative_to_absolute(axis, item[axis]) for axis in range(3)\n )\n\n sy, dy = self._section_index(y)\n if sy in self:\n return int(self._sections[sy][(x, dy, z)])\n else:\n return self.default_value\n\n elif all(isinstance(i, (int, numpy.integer, slice)) for i in item):\n item: Tuple[\n Tuple[int, int, int],\n Tuple[int, int, int],\n Tuple[int, int, int],\n ] = zip(*self._stack_slices(item))\n\n return BoundedPartial3DArray.from_partial_array(\n self._parent_array, *item\n )\n else:\n raise KeyError(f\"Unsupported tuple {item} for getitem\")\n\n elif isinstance(item, (numpy.ndarray, BoundedPartial3DArray)):\n if item.dtype == bool:\n if item.shape != self.shape:\n raise ValueError(\n f\"The shape of the index ({self.shape}) and the shape of the given array ({item.shape}) do not match.\"\n )\n out = []\n for slices_x, relative_slices_x in zip(\n range(self.start_x, self.stop_x, self.step_x), range(0, self.size_x)\n ):\n for (\n sy,\n (_, slices_y, slices_z),\n (_, relative_slices_y, relative_slices_z),\n ) in self._iter_slices(self.slices_tuple):\n if sy in self._sections:\n out.append(\n self._sections[sy][slices_x, slices_y, slices_z][\n numpy.asarray(\n item[\n relative_slices_x,\n relative_slices_y,\n relative_slices_z,\n ]\n )\n ]\n )\n else:\n out.append(\n numpy.full(\n numpy.count_nonzero(\n numpy.asarray(\n item[\n relative_slices_x,\n relative_slices_y,\n relative_slices_z,\n ]\n )\n ),\n self.default_value,\n self.dtype,\n )\n )\n if out:\n return numpy.concatenate(out)\n else:\n return numpy.full(0, self.default_value, self.dtype)\n elif numpy.issubdtype(item.dtype, numpy.integer):\n if isinstance(item, BoundedPartial3DArray):\n raise ValueError(\n \"Index array with a BoundedPartial3DArray is not valid\"\n )\n raise NotImplementedError(\n \"Index arrays are not currently supported\"\n ) # TODO\n else:\n raise ValueError(\n f\"{item.__class__.__name__}({item}) is not a supported input for __getitem__\"\n )\n else:\n raise KeyError(\n f\"{item.__class__.__name__}({item}) is not a supported input for __getitem__\"\n )",
"def get(self, x, y):\n return self.board[y][x] # x and y coords need to be switched to be correct",
"def __getitem__(self, given):\n return self.dataset[given]",
"def __getitem__(self, addr):\r\n return self.r[addr]",
"def __getitem__(self, item):\n t = self.get_table(item[0])\n return t[item[1]]",
"def getvalue(arr,position): \n return arr[position[0],position[1],position[2]]",
"def value(self,x,xlist,ylist):\r\n\tdef bin(x,xlist):\r\n\t \"\"\" returns the bin index in which boundaries the value of x lies in the xlist\r\n\t \"\"\"\r\n\t x = float(x)\r\n\t if (x<=xlist[0]): return 0,0\r\n\t if (x>=xlist[-1]): return self.size-1,self.size-1 \r\n\t for i in range(self.size):\r\n\t\tif x < xlist[i]:\r\n\t\t return max(0,i-1),min(self.size-1,i)\r\n\t#print x\r\n\tx = float(x)\r\n\t#print x\r\n\tww = bin(x,xlist)\r\n\t#print ww\r\n\tif not \"__len__\" in dir(ww):\r\n\t\tprint \"Crazy, \" ,x, xlist[0], xlist[-1]\r\n\r\n\ti,j = ww\r\n\tx0 = xlist[i]\r\n\ty0 = ylist[i] \r\n\tdx = xlist[j]-x0\r\n\tdy = ylist[j]-y0\r\n\tdydx = 0.\r\n\tif (i != j): dydx = dy/dx # ???????\r\n\ty = y0+dydx*(x-x0)\r\n\treturn y",
"def __getitem__(self, item):\n if isinstance(item, (int, np.integer)):\n item = (item,) # though the branches might differ...\n elif isinstance(item, slice):\n item = (item,)\n if any(not isinstance(i, (int, np.integer)) for i in item):\n return self.derivative_tensor(len(item), item)\n else:\n d = self.compute_derivatives(len(item), item, lazy=False)\n return d[0]",
"def getitem(self, item):\r\n for rngsets in self._rangesets:\r\n # rngsets is a _LinkedList of (RangeSet, value) tuples\r\n for rngset, value in rngsets:\r\n try:\r\n rng = rngset.getrange(item)\r\n return self._values[value], rngset, rng, value\r\n except IndexError:\r\n # try RangeSets of the same type, corresponding to other values\r\n continue\r\n except TypeError:\r\n # try RangeSets of a different type\r\n break\r\n raise KeyError(f\"'{item}' was not found in any range\")",
"def findItemId(self, x, y):\n for itemId in self.items:\n coords = self.coords(itemId)\n if self.containsPoint(coords, x, y):\n return itemId\n return None",
"def __getitem__(self, item) -> HuntPilot:\n return self.by_hunt_pilot[item][0]",
"def getitem(src, start, nitems, dest):\n src = ffi.from_buffer(src)\n dest = ffi.from_buffer(dest)\n return C.blosc_getitem(src, start, nitems, dest)",
"def __getitem__(self, item):\n return self.search(item)",
"def __getitem__(self, i):\n return self._data[i]",
"def __getitem__(self, position):\n assert position in Position\n return [self.nord,self.sud,self.est,self.ouest][position]",
"def __getitem__(self, *args):\n return _coin.SoPickedPointList___getitem__(self, *args)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
tries to convert a container (e.g. list or numpy.array) with UTPM elements as instances to a UTPM instance | def as_utpm(cls, x):
x_shp = numpy.shape(x)
xr = numpy.ravel(x)
D,P = xr[0].data.shape[:2]
shp = xr[0].data.shape[2:]
if not isinstance(shp, tuple): shp = (shp,)
if not isinstance(x_shp, tuple): x_shp = (x_shp,)
y = UTPM(numpy.zeros((D,P) + x_shp + shp))
yr = UTPM( y.data.reshape((D,P) + (numpy.prod(x_shp),) + shp))
# print yr.shape
# print yr.data.shape
for n in range(len(xr)):
# print yr[n].shape
# print xr[n].shape
yr[n] = xr[n]
return y | [
"def _convert_seq(sequence, new_class):\n return [new_class(obj) for obj in sequence]",
"def _element_to_serializable(obj: Any) -> Any:\n if isinstance(obj, bytes):\n obj = obj.decode('utf-8')\n\n elif isinstance(obj, np.generic):\n obj = obj.item()\n\n return obj",
"def _upcast_err(err):\n\n if (\n # make sure it is not a scalar\n np.iterable(err) and\n # and it is not empty\n len(err) > 0 and\n # and the first element is an array sub-class use\n # safe_first_element because getitem is index-first not\n # location first on pandas objects so err[0] almost always\n # fails.\n isinstance(cbook._safe_first_finite(err), np.ndarray)\n ):\n # Get the type of the first element\n atype = type(cbook._safe_first_finite(err))\n # Promote the outer container to match the inner container\n if atype is np.ndarray:\n # Converts using np.asarray, because data cannot\n # be directly passed to init of np.ndarray\n return np.asarray(err, dtype=object)\n # If atype is not np.ndarray, directly pass data to init.\n # This works for types such as unyts and astropy units\n return atype(err)\n # Otherwise wrap it in an object array\n return np.asarray(err, dtype=object)",
"def itkImageUL2_cast(obj: 'itkLightObject') -> \"itkImageUL2 *\":\n return _itkImagePython.itkImageUL2_cast(obj)",
"def _get_elements_raw(self, num_elements):\n from comtypes.automation import VARIANT\n # XXX Not sure this is true:\n # For VT_UNKNOWN and VT_DISPATCH, we should retrieve the\n # interface iid by SafeArrayGetIID().\n ptr = POINTER(self._itemtype_)() # container for the values\n _safearray.SafeArrayAccessData(self, byref(ptr))\n try:\n if self._itemtype_ == VARIANT:\n # We have to loop over each item, so we get no\n # speedup by creating an ndarray here.\n return [i.value for i in ptr[:num_elements]]\n elif issubclass(self._itemtype_, POINTER(IUnknown)):\n iid = _safearray.SafeArrayGetIID(self)\n itf = com_interface_registry[str(iid)]\n # COM interface pointers retrieved from array\n # must be AddRef()'d if non-NULL.\n elems = ptr[:num_elements]\n result = []\n # We have to loop over each item, so we get no\n # speedup by creating an ndarray here.\n for p in elems:\n if bool(p):\n p.AddRef()\n result.append(p.QueryInterface(itf))\n else:\n # return a NULL-interface pointer.\n result.append(POINTER(itf)())\n return result\n else:\n # If the safearray element are NOT native python\n # objects, the containing safearray must be kept\n # alive until all the elements are destroyed.\n if not issubclass(self._itemtype_, Structure):\n # Create an ndarray if requested. This is where\n # we can get the most speed-up.\n # XXX Only try to convert types known to\n # numpy.ctypeslib.\n if (safearray_as_ndarray and self._itemtype_ in\n list(npsupport.typecodes.keys())):\n arr = numpy.ctypeslib.as_array(ptr,\n (num_elements,))\n return arr.copy()\n return ptr[:num_elements]\n\n def keep_safearray(v):\n v.__keepref = self\n return v\n return [keep_safearray(x) for x in ptr[:num_elements]]\n finally:\n _safearray.SafeArrayUnaccessData(self)",
"def cast(obj: 'itkLightObject') -> \"itkImageUL2 *\":\n return _itkImagePython.itkImageUL2_cast(obj)",
"def uval_array_frompointer(*args) -> \"uval_array *\":\n return _ida_pro.uval_array_frompointer(*args)",
"def itkImageUC2_cast(obj: 'itkLightObject') -> \"itkImageUC2 *\":\n return _itkImagePython.itkImageUC2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkImageUC2 *\":\n return _itkImagePython.itkImageUC2_cast(obj)",
"def npu(self) -> 'BaseDataElement':\n new_data = self.new()\n for k, v_list in self.items():\n data_list = []\n for v in v_list:\n if isinstance(v, (torch.Tensor, BaseDataElement)):\n v = v.npu()\n data_list.append(v)\n if len(data_list) > 0:\n new_data.set_data({f'{k}': data_list})\n return new_data",
"def cast(cls, *args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0:\n elem = args[0]\n if elem is None:\n return None\n if isinstance(elem, CuGraphTensorAttr):\n return elem\n if isinstance(elem, (tuple, list)):\n return cls(*elem)\n if isinstance(elem, dict):\n return cls(**elem)\n return cls(*args, **kwargs)",
"def list_to_cpu(x_lst):\n for id, data in enumerate(x_lst):\n if isinstance(data, cuda.cupy.ndarray):\n x_lst[id] = chainer.cuda.to_cpu(data)\n elif isinstance(data, dict):\n x_lst[id] = dict_to_cpu(data)\n elif isinstance(data, list):\n x_lst[id] = list_to_cpu(data)\n else:\n x_lst[id] = data\n return x_lst",
"def itkImageUC3_cast(obj: 'itkLightObject') -> \"itkImageUC3 *\":\n return _itkImagePython.itkImageUC3_cast(obj)",
"def __from_arrow__(self, data):\n return self.construct_array_type()(data)",
"def itkImageUS3_cast(obj: 'itkLightObject') -> \"itkImageUS3 *\":\n return _itkImagePython.itkImageUS3_cast(obj)",
"def itkImageUL3_cast(obj: 'itkLightObject') -> \"itkImageUL3 *\":\n return _itkImagePython.itkImageUL3_cast(obj)",
"def _formulate(self, objects) -> list[T]:\n return objects",
"def test_it_should_cast_the_internal_iterator_to_the_provided_container_type(self):\n from enumerable.iterators import Enumerable\n\n test_data = [1, 2, 3, 4, 5]\n results = Enumerable(test_data).to(list)\n expect(results).to(equal(test_data))",
"def cast(obj: 'itkLightObject') -> \"itkImageUL3 *\":\n return _itkImagePython.itkImageUL3_cast(obj)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
operation to extract UTP coefficients of x defined by the slice sl creates a new UTPM instance where the coefficients have the shape as defined by shp | def coeff_op(self, sl, shp):
tmp = self.data.__getitem__(sl)
tmp = tmp.reshape(shp)
return self.__class__(tmp) | [
"def as_utpm(cls, x):\n\n x_shp = numpy.shape(x)\n xr = numpy.ravel(x)\n D,P = xr[0].data.shape[:2]\n shp = xr[0].data.shape[2:]\n\n if not isinstance(shp, tuple): shp = (shp,)\n if not isinstance(x_shp, tuple): x_shp = (x_shp,)\n\n y = UTPM(numpy.zeros((D,P) + x_shp + shp))\n\n yr = UTPM( y.data.reshape((D,P) + (numpy.prod(x_shp),) + shp))\n\n # print yr.shape\n # print yr.data.shape\n\n for n in range(len(xr)):\n # print yr[n].shape\n # print xr[n].shape\n yr[n] = xr[n]\n\n return y",
"def thrust(s, obj):\n #return vector(0.0, 0.0, 0.0)\n return obj.n.scale(ft2WU(2000))",
"def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = self.sys.h( x , self.sys.ubar , t )\n \n return y",
"def basis(u, s, p = 0.5):\n sum_lambda=np.sum(s)\n sum_chosen=0\n num=0\n for var in s:\n sum_chosen+=var\n num+=1\n if sum_chosen>=sum_lambda*p:\n break\n u=u[:,0:num].copy()\n return u",
"def csg_torus(int slices, segments, LandFloat diameter,\n void *shared) -> LandCSG *:\n LandArray *polygons = land_array_new()\n for int i in range(slices): # latitude\n for int j in range(segments): # \"longitude\"\n LandArray *vertices = land_array_new()\n torus_point(vertices, 1.0 * i / slices, 1.0 * j / segments,\n diameter / 2)\n torus_point(vertices, 1.0 * (i + 1) / slices,\n 1.0 * j / segments, diameter / 2)\n torus_point(vertices, 1.0 * (i + 1) / slices,\n 1.0 * (j + 1) / segments, diameter / 2)\n torus_point(vertices, 1.0 * i / slices,\n 1.0 * (j + 1) / segments, diameter / 2)\n land_array_add(polygons, land_csg_polygon_new(vertices, shared))\n return land_csg_new_from_polygons(polygons)",
"def localVelTri(Vx,Vy,Vz,tp,sweep,dih):\n \n Vxz = Vx * np.cos(sweep) - Vy * np.sin(sweep);\n Vyz = Vx * np.sin(sweep) + Vy * np.cos(sweep);\n Vzz = Vz;\n \n Vxx = Vxz * np.cos(tp) - Vzz * np.sin(tp);\n Vyx = Vyz;\n Vzx = Vxz * np.sin(tp) + Vzz * np.cos(tp);\n \n Vxl = Vxx;\n Vyl = Vyx * np.cos(dih) + Vzx * np.sin(dih);\n Vzl = - Vyx * np.sin(dih) + Vzx * np.cos(dih);\n return Vxl,Vyl,Vzl;",
"def sliced(self,*args):\n if len(args)==1 and type(args[0])==slice: s=args[0]\n else: s=slice(*args)\n ps = self.apply_func(lambda _, spec: spec[s], lambda _, cov: cov[s,s])\n ps.ells = self.ells[s]\n return ps",
"def update_precond_splu(L12, l3, U12, u3, dxs, dgs, step=0.01):\n # make sure that L and U have similar dynamic range\n max_l = max(np.max(np.abs(L12)), np.max(l3))\n max_u = max(np.max(np.abs(U12)), np.max(u3))\n rho = np.sqrt(max_l/max_u)\n L12 = L12/rho\n l3 = l3/rho\n U12 = rho*U12\n u3 = rho*u3\n # extract blocks\n r = U12.shape[0]\n L1 = L12[:r]\n L2 = L12[r:]\n U1 = U12[:, :r]\n U2 = U12[:, r:]\n \n dx = np.concatenate([np.reshape(x, [-1, 1]) for x in dxs], 0) # a tall column vector\n dg = np.concatenate([np.reshape(g, [-1, 1]) for g in dgs], 0) # a tall column vector\n \n # U*dg\n Ug1 = np.dot(U1, dg[:r]) + np.dot(U2, dg[r:])\n Ug2 = u3*dg[r:]\n # Q*dg\n Qg1 = np.dot(L1, Ug1)\n Qg2 = np.dot(L2, Ug1) + l3*Ug2\n # inv(U^T)*dx\n iUtx1 = linalg.solve_triangular(np.transpose(U1), dx[:r], lower=True)\n iUtx2 = (dx[r:] - np.dot(np.transpose(U2), iUtx1))/u3\n # inv(Q^T)*dx\n iQtx2 = iUtx2/l3\n iQtx1 = linalg.solve_triangular(np.transpose(L1), \n iUtx1 - np.dot(np.transpose(L2), iQtx2), lower=False)\n # L^T*Q*dg\n LtQg1 = np.dot(np.transpose(L1), Qg1) + np.dot(np.transpose(L2), Qg2)\n LtQg2 = l3*Qg2\n # P*dg\n Pg1 = np.dot(np.transpose(U1), LtQg1)\n Pg2 = np.dot(np.transpose(U2), LtQg1) + u3*LtQg2\n # inv(L)*inv(Q^T)*dx\n iLiQtx1 = linalg.solve_triangular(L1, iQtx1, lower=True)\n iLiQtx2 = (iQtx2 - np.dot(L2, iLiQtx1))/l3\n # inv(P)*dx\n iPx2 = iLiQtx2/u3\n iPx1 = linalg.solve_triangular(U1, iLiQtx1 - np.dot(U2, iPx2), lower=False)\n \n # update L\n grad1 = np.dot(Qg1, np.transpose(Qg1)) - np.dot(iQtx1, np.transpose(iQtx1))\n grad1 = np.tril(grad1)\n grad2 = np.dot(Qg2, np.transpose(Qg1)) - np.dot(iQtx2, np.transpose(iQtx1))\n grad3 = Qg2*Qg2 - iQtx2*iQtx2\n max_abs_grad = np.max(np.abs(grad1))\n max_abs_grad = max(max_abs_grad, np.max(np.abs(grad2)))\n max_abs_grad = max(max_abs_grad, np.max(np.abs(grad3)))\n step0 = step/(max_abs_grad + _tiny)\n newL1 = L1 - np.dot(step0*grad1, L1)\n newL2 = L2 - np.dot(step0*grad2, L1) - step0*grad3*L2\n newl3 = l3 - step0*grad3*l3\n\n # update U\n grad1 = np.dot(Pg1, np.transpose(dg[:r])) - np.dot(dx[:r], np.transpose(iPx1))\n grad1 = np.triu(grad1)\n grad2 = np.dot(Pg1, np.transpose(dg[r:])) - np.dot(dx[:r], np.transpose(iPx2))\n grad3 = Pg2*dg[r:] - dx[r:]*iPx2\n max_abs_grad = np.max(np.abs(grad1))\n max_abs_grad = max(max_abs_grad, np.max(np.abs(grad2)))\n max_abs_grad = max(max_abs_grad, np.max(np.abs(grad3)))\n step0 = step/(max_abs_grad + _tiny)\n newU1 = U1 - np.dot(U1, step0*grad1)\n newU2 = U2 - np.dot(U1, step0*grad2) - step0*np.transpose(grad3)*U2\n newu3 = u3 - step0*grad3*u3\n\n return np.concatenate([newL1, newL2], axis=0), newl3, np.concatenate([newU1, newU2], axis=1), newu3",
"def get_lu_row_pivoting(Ao, number_of_subsamples):\n A = Ao.copy()\n P = lu(A)[0]\n z = np.where(P==1)[1][:number_of_subsamples]\n return z",
"def unpak(self, p):\n\n errstring = self.consist('gmm')\n if errstring != None:\n raise Exception(errstring)\n if self.nwts != len(p):\n raise Exception('Invalid weight vector length')\n\n mark1 = self.ncentres\n mark2 = mark1 + self.ncentres*self.nin\n\n self.priors = p[0:mark1]\n self.centres = p[mark1:mark2].reshape(self.ncentres, self.nin, order='F')\n if self.covar_type == 'spherical':\n mark3 = self.ncentres*(2 + self.nin)\n self.covars = p[mark2:mark3].reshape(1, self.ncentres, order='F')\n elif self.covar_type == 'diag':\n mark3 = self.ncentres*(1 + self.nin + self.nin)\n self.covars = p[mark2:mark3].reshape(self.ncentres, self.nin, order='F')\n elif self.covar_type == 'full':\n mark3 = self.ncentres*(1 + self.nin + self.nin*self.nin)\n self.covars = p[mark2:mark3].reshape(self.nin, self.nin, self.ncentres, order='F')\n elif self.covar_type == 'ppca':\n mark3 = self.ncentres*(2 + self.nin)\n self.covars = p[mark2:mark3]\n # Now also extract k and eigenspaces\n mark4 = mark3 + self.ncentres*self.ppca_dim\n self.lambd = p[mark3:mark4].reshape(self.ncentres, self.ppca_dim, order='F')\n self.U = p[mark4 + 1:-1].reshape(self.nin, self.ppca_dim, self.ncentres, order='F')\n else:\n raise Exception('Unknown covariance type ' + self.covar_type)",
"def reshape_coeffs(lin_op):\r\n new_coeffs = []\r\n coeffs = get_coefficients(lin_op.args[0])\r\n for id_, size, block in coeffs:\r\n new_coeffs.append((id_, lin_op.size, block))\r\n\r\n return new_coeffs",
"def He3_cross(v,P,T,L): \n PdT=P/T\n return PdT*L*8606.3/v",
"def _subsample_vector_field(theta, strength, step=None):\n\tif np.array(theta.shape != strength.shape).any():\n\t\traise RuntimeError(\"Theta {} and strength {} must have the same shape\".format(theta.shape, strength.shape))\n\trows, cols = theta.shape\n\tx, y = np.meshgrid(np.arange(cols), np.arange(rows))\n\tif step: S = step\n\telse: S = 6\n\tx = x[::S,::S]\n\ty = y[::S,::S]\n\ttheta = theta[::S,::S]\n\tstrength = strength[::S,::S]\n\treturn x, y, theta, strength",
"def polyCopyUV(selectionList, uvSetName=\"string\", createNewMap=bool, uvSetNameInput=\"string\", nodeState=int, constructionHistory=bool, caching=bool, name=\"string\"):\n pass",
"def maccormack(U_init,numt,numx,numy,delx,dely,Tw,Tfs,rho_fs,ufs,c_v,c_p,viscfs,Prt,lmbda,R,gamma):\n Un = numpy.zeros((numt+1,4,numx,numy))\n Un[0,:,:,:] = U_init.copy()\n #\n U = U_init.copy()\n #\n Us = U_init.copy()\n #\n for t in range(1,numt+1):\n \t#get properties to calculate fluxes:\n \tT = get_Temperature(U, numx, numy, Tw, Tfs, c_v)\n \tmu = get_visc(T, viscfs, Tfs)\n \tk = get_k(mu, c_p, Prt)\n \t#get shear:\n \tt_xyE = get_tau_xy_Epredict(U, mu, numx, numy, delx, dely )\n \tt_xyF = get_tau_xy_Fpredict(U, mu, numx, numy, delx, dely )\n \tt_xx = get_tau_xx_Epredict(U, mu, numx, numy, delx, dely, lmbda)\n \tt_yy = get_tau_yy_Fpredict(U, mu, numx, numy, delx, dely, lmbda)\n \t#calculate fluxes E, F:\n \tE = get_E_flux_predictor(U, numx, numy, delx, mu, T, k, t_xx, t_xyE, R)\n \tF = get_F_flux_predictor(U, numx, numy, dely, mu, T, k, t_xyF, t_yy, R)\n \t#dt:\n \tdt = get_dt(U, numx, numy, delx, dely, mu, T, gamma, R, Prt)\n \t#Predictor Step:\n \tUs[:,1:-1,1:-1] = U[:,1:-1,1:-1] -\\\n \t\t\t\t\t\t\t(dt/delx)*(E[:,2:,1:-1] - E[:,1:-1,1:-1]) -\\\n \t\t\t\t\t\t\t(dt/dely)*(F[:,1:-1,2:] - F[:,1:-1,1:-1])\n \tUstar = get_BC(Us, T, numy, rho_fs, Tw, ufs, c_v, Tfs, R)\n \t#update properties:\n \tT2 = get_Temperature(Ustar, numx, numy, Tw, Tfs, c_v)\n \tmu2 = get_visc(T2, viscfs, Tfs)\n \tk2 = get_k(mu2, c_p, Prt)\n \t#update shear:\n \tt_xyE2 = get_tau_xy_Ecorrect(Ustar,mu2,numx, numy, delx, dely)\n \tt_xyF2 = get_tau_xy_Fcorrect(Ustar,mu2,numx, numy, delx, dely)\n \tt_xx2 = get_tau_xx_Ecorrect(Ustar, mu2, numx, numy, delx, dely, lmbda)\n \tt_yy2 = get_tau_yy_Fcorrect(Ustar, mu2, numx, numy, delx, dely, lmbda)\n \t#update fluxes:\n \tE2 = get_E_flux_correct(Ustar, numx, numy, delx, mu2, T2, k2, t_xx2, t_xyE2, R)\n \tF2 = get_F_flux_correct(Ustar, numx, numy, dely, mu2, T2, k2, t_xyF2, t_yy2, R)\n \t#corrector step:\n \tUn[t,:,1:-1,1:-1] = 0.5*( U[:,1:-1,1:-1] + Ustar[:,1:-1,1:-1] -\\\n \t\t\t\t\t\t\t(dt/delx)*(E2[:,1:-1,1:-1]-E2[:,:-2,1:-1]) -\\\n \t\t\t\t\t\t\t(dt/dely)*(F2[:,1:-1,1:-1]-F2[:,1:-1,:-2] ))\n \t#\n \tUn[t,:,:,:] = get_BC(Un[t,:,:,:], T2, numy, rho_fs, Tw, ufs, c_v, Tfs, R)\n \tU = Un[t,:,:,:].copy()\n \t#print(t)\n \tif( numpy.all(numpy.abs(Un[t,0,:,:]-Un[t-1,0,:,:]) < 1e-8) == True ):\n \t\ttt=t+1\n \t\tUn = Un[:tt,:,:,:].copy()\n \t\tmscn = (numpy.trapz(Un[t,1,0,:])/numpy.trapz(Un[t,1,-1,:]))*100\n \t\tprint('Mass is conserved by %.2f percent' % mscn)\n \t\tbreak\n \n return Un",
"def update_precond_splu(L12, l3, U12, u3, dxs, dgs, step=0.01):\n # make sure that L and U have similar dynamic range\n max_l = max(torch.max(torch.abs(L12)), torch.max(l3))\n max_u = max(torch.max(torch.abs(U12)), torch.max(u3))\n rho = torch.sqrt(max_l / max_u)\n L12 = L12 / rho\n l3 = l3 / rho\n U12 = rho * U12\n u3 = rho * u3\n # extract blocks\n r = U12.shape[0]\n L1 = L12[:r]\n L2 = L12[r:]\n U1 = U12[:, :r]\n U2 = U12[:, r:]\n\n dx = torch.cat([torch.reshape(x, [-1, 1]) for x in dxs]) # a tall column vector\n dg = torch.cat([torch.reshape(g, [-1, 1]) for g in dgs]) # a tall column vector\n\n # U*dg\n Ug1 = U1.mm(dg[:r]) + U2.mm(dg[r:])\n Ug2 = u3 * dg[r:]\n # Q*dg\n Qg1 = L1.mm(Ug1)\n Qg2 = L2.mm(Ug1) + l3 * Ug2\n # inv(U^T)*dx\n iUtx1 = torch.triangular_solve(dx[:r], U1.t(), upper=False)[0]\n iUtx2 = (dx[r:] - U2.t().mm(iUtx1)) / u3\n # inv(Q^T)*dx\n iQtx2 = iUtx2 / l3\n iQtx1 = torch.triangular_solve(iUtx1 - L2.t().mm(iQtx2), L1.t(), upper=True)[0]\n # L^T*Q*dg\n LtQg1 = L1.t().mm(Qg1) + L2.t().mm(Qg2)\n LtQg2 = l3 * Qg2\n # P*dg\n Pg1 = U1.t().mm(LtQg1)\n Pg2 = U2.t().mm(LtQg1) + u3 * LtQg2\n # inv(L)*inv(Q^T)*dx\n iLiQtx1 = torch.triangular_solve(iQtx1, L1, upper=False)[0]\n iLiQtx2 = (iQtx2 - L2.mm(iLiQtx1)) / l3\n # inv(P)*dx\n iPx2 = iLiQtx2 / u3\n iPx1 = torch.triangular_solve(iLiQtx1 - U2.mm(iPx2), U1, upper=True)[0]\n\n # update L\n grad1 = Qg1.mm(Qg1.t()) - iQtx1.mm(iQtx1.t())\n grad1 = torch.tril(grad1)\n grad2 = Qg2.mm(Qg1.t()) - iQtx2.mm(iQtx1.t())\n grad3 = Qg2 * Qg2 - iQtx2 * iQtx2\n max_abs_grad = torch.max(torch.abs(grad1))\n max_abs_grad = max(max_abs_grad, torch.max(torch.abs(grad2)))\n max_abs_grad = max(max_abs_grad, torch.max(torch.abs(grad3)))\n step0 = step / (max_abs_grad + _tiny)\n newL1 = L1 - step0 * grad1.mm(L1)\n newL2 = L2 - step0 * grad2.mm(L1) - step0 * grad3 * L2\n newl3 = l3 - step0 * grad3 * l3\n\n # update U\n grad1 = Pg1.mm(dg[:r].t()) - dx[:r].mm(iPx1.t())\n grad1 = torch.triu(grad1)\n grad2 = Pg1.mm(dg[r:].t()) - dx[:r].mm(iPx2.t())\n grad3 = Pg2 * dg[r:] - dx[r:] * iPx2\n max_abs_grad = torch.max(torch.abs(grad1))\n max_abs_grad = max(max_abs_grad, torch.max(torch.abs(grad2)))\n max_abs_grad = max(max_abs_grad, torch.max(torch.abs(grad3)))\n step0 = step / (max_abs_grad + _tiny)\n newU1 = U1 - U1.mm(step0 * grad1)\n newU2 = U2 - U1.mm(step0 * grad2) - step0 * grad3.t() * U2\n newu3 = u3 - step0 * grad3 * u3\n\n return torch.cat([newL1, newL2], dim=0), newl3, torch.cat([newU1, newU2], dim=1), newu3",
"def cSpmvh():\n \n R=\"\"\"\n \n KERNEL void pELL_spmvh_mCoil(\n const unsigned int Reps, // number of coils\n const unsigned int nRow, // number of rows\n const unsigned int prodJd, // product of Jd\n const unsigned int sumJd, // sum of Jd\n const unsigned int dim, // dimensionality\n GLOBAL_MEM const unsigned int *Jd, // Jd\n // GLOBAL_MEM const unsigned int *curr_sumJd, // \n GLOBAL_MEM const unsigned int *meshindex, // meshindex, prodJd * dim\n GLOBAL_MEM const unsigned int *kindx, // unmixed column indexes of all dimensions\n GLOBAL_MEM const float2 *udata, // interpolation data before Kronecker product\n GLOBAL_MEM float2 *k, \n //GLOBAL_MEM float2 *res,\n GLOBAL_MEM const float2 *input) // y\n { \n const unsigned int t = get_local_id(0);\n const unsigned int vecWidth=${LL};\n // Thread ID within wavefront\n const unsigned int id = t & (vecWidth-1);\n \n // One row per wavefront\n unsigned int vecsPerBlock=get_local_size(0)/vecWidth;\n unsigned int myRow=(get_group_id(0)*vecsPerBlock) + (t/ vecWidth); // the myRow-th non-Cartesian sample\n unsigned int m = myRow / Reps;\n unsigned int nc = myRow - m * Reps;\n \n float2 zero;\n zero.x = 0.0;\n zero.y = 0.0;\n \n \n if (myRow < nRow * Reps)\n {\n const unsigned int vecStart = 0; \n const unsigned int vecEnd =prodJd; \n float2 u=zero;\n \n for (unsigned int j = vecStart+id; j<vecEnd; j += vecWidth)\n { \n // now doing the first dimension\n unsigned int index_shift = m * sumJd;\n // unsigned int tmp_sumJd = 0;\n unsigned int J = Jd[0];\n unsigned int index = index_shift + meshindex[dim*j + 0];\n unsigned int col = kindx[index] ;\n float2 spdata = udata[index];\n index_shift += J; \n for (unsigned int dimid = 1; dimid < dim; dimid ++ )\n {\n J = Jd[dimid];\n index = index_shift + meshindex[dim*j + dimid]; // the index of the partial ELL arrays *kindx and *udata\n col += kindx[index];// + 1 ; // the column index of the current j\n float tmp_x = spdata.x;\n float2 tmp_udata = udata[index];\n spdata.x = tmp_x * tmp_udata.x - spdata.y * tmp_udata.y; // the spdata of the current j\n spdata.y = tmp_x * tmp_udata.y + spdata.y * tmp_udata.x; \n index_shift += J;\n }; // Iterate over dimensions 1 -> Nd - 1\n \n float2 ydata=input[myRow]; // kout[col];\n u.x = spdata.x*ydata.x + spdata.y*ydata.y;\n u.y = - spdata.y*ydata.x + spdata.x*ydata.y;\n \n atomic_add_float2(k + col*Reps + nc, u);//, res + col*Reps + nc);\n LOCAL_BARRIER;\n // atomic_add_float2(k + col*Reps + nc, u, res + col*Reps + nc);\n }; // Iterate for (unsigned int j = 0; j < prodJd; j ++)\n }; // if (m < nRow)\n \n }; // End of xELL_spmvh_mCoil \n \n \n KERNEL void pELL_spmvh_mCoil_new(\n const unsigned int Reps, // number of coils\n const unsigned int nRow, // number of rows\n const unsigned int prodJd, // product of Jd\n const unsigned int sumJd, // sum of Jd\n const unsigned int dim, // dimensionality\n GLOBAL_MEM const unsigned int *Jd, // Jd\n // GLOBAL_MEM const unsigned int *curr_sumJd, // \n GLOBAL_MEM const unsigned int *meshindex, // meshindex, prodJd * dim\n GLOBAL_MEM const unsigned int *kindx, // unmixed column indexes of all dimensions\n GLOBAL_MEM const float2 *udata, // interpolation data before Kronecker product\n GLOBAL_MEM float2 *k, \n GLOBAL_MEM float2 *res,\n GLOBAL_MEM const float2 *input) // y\n {\n unsigned int myRow0= get_global_id(0);\n unsigned int myRow= myRow0/(float)Reps;\n unsigned int nc = myRow0 - myRow*Reps;\n float2 zero;\n zero.x = 0.0;\n zero.y = 0.0;\n if (myRow < nRow){ \n for (unsigned int j = 0; j < prodJd; j ++){\n float2 u = zero;\n\n // now doing the first dimension\n unsigned int index_shift = myRow * sumJd;\n // unsigned int tmp_sumJd = 0;\n unsigned int J = Jd[0];\n unsigned int index = index_shift + meshindex[dim*j + 0];\n unsigned int col = kindx[index] ;\n float2 spdata = udata[index];\n index_shift += J; \n for (unsigned int dimid = 1; dimid < dim; dimid ++ ){\n J = Jd[dimid];\n index = index_shift + meshindex[dim*j + dimid]; // the index of the partial ELL arrays *kindx and *udata\n col += kindx[index];// + 1 ; // the column index of the current j\n float tmp_x = spdata.x;\n float2 tmp_udata = udata[index];\n spdata.x = tmp_x * tmp_udata.x - spdata.y * tmp_udata.y; // the spdata of the current j\n spdata.y = tmp_x * tmp_udata.y + spdata.y * tmp_udata.x; \n index_shift += J;\n }; // Iterate over dimensions 1 -> Nd - 1\n \n float2 ydata=input[myRow*Reps + nc]; // kout[col];\n u.x = spdata.x*ydata.x + spdata.y*ydata.y;\n u.y = - spdata.y*ydata.x + spdata.x*ydata.y;\n atomic_add_float2(k + col*Reps + nc, u);\n \n }; // Iterate for (unsigned int j = 0; j < prodJd; j ++)\n \n }; // if (m < nRow)\n \n }; // End of pELL_spmvh_mCoil \n \"\"\"\n return R",
"def __getslice__(self, *args):\n return _wali.TransVector___getslice__(self, *args)",
"def spline_regression(x, y, num_parts, deg=3, alpha=.01, smoothness=1):\n\n # coefficients of the polynomial of p.\n p = cvxpy.Variable((num_parts, deg + 1), name='p')\n\n # convert to numpy format because it is easier to work with.\n numpy_p = np.array([[p[i, j] for j in range(deg+1)] \\\n for i in range(num_parts)])\n\n regularizer = alpha * cvxpy.norm(p, 1)\n\n num_points_per_part = int(len(x) / num_parts)\n\n smoothness_constraints = []\n\n # cuttoff values\n t = []\n\n fitting_value = 0\n # split the data into equal `num_parts` pieces\n for i in range(num_parts):\n\n # the part of the data that the current piece fits\n sub_x = x[num_points_per_part * i:num_points_per_part * (i + 1)]\n sub_y = y[num_points_per_part * i:num_points_per_part * (i + 1)]\n\n # compute p(sub_x)\n # pow_x = np.array([sub_x**k for k in range(deg + 1)])\n # sub_p = polyval(sub_xnumpy_p[i, :].dot(pow_x)\n sub_p = eval_poly_from_coefficients(numpy_p[i], sub_x)\n\n # fitting value of the current part of p,\n # equal to sqrt(sum |p(x_i) - y_i|^2), where the sum\n # is over data (x_i, y_i) in the current piece.\n fitting_value += cvxpy.norm(cvxpy.vstack(sub_p - sub_y), 1)\n\n # glue things together by ensuring smoothness of the p at x1\n if i > 0:\n x1 = x[num_points_per_part * i]\n # computes the derivatives p'(x1) for the left and from the right of x1\n\n # x_deriv is the 2D matrix k!/(k-j)! x1^(k-j) indexed by (j, k)\n x1_deriv = np.array(\n [[np.prod(range(k - j, k)) * x1**(k - j)\n for k in range(deg + 1)]\n for j in range(smoothness + 1)]).T\n\n p_deriv_left = numpy_p[i - 1].dot(x1_deriv)\n p_deriv_right = numpy_p[i].dot(x1_deriv)\n\n smoothness_constraints += [\n cvxpy.vstack(p_deriv_left - p_deriv_right) == 0\n ]\n t.append(x1)\n min_loss = cvxpy.Minimize(fitting_value + regularizer)\n prob = cvxpy.Problem(min_loss, smoothness_constraints)\n prob.solve(verbose=False)\n\n return _piecewise_polynomial_as_function(p.value, t)",
"def getRSStri(t, u, model, h, K=0):\n \n # remember, unlike Fortran, indices here will start from 0. \n # So remember use begin_idx as one less than what we were using in Fortran.\n # Basically, recresid will get filled from idx=ncols to idx=Sfinal for linear regression.\n # Fortran wud have filled it from idx = ncols+1 to Sfinal.\n\n # build RSS matrix\n if (model == 'linear'):\n ncols = 2\n elif (model == 'harmonic'):\n ncols = 2*K+1\n\n Sfinal = len(t)\n RSStri = [[0 for i in range(Sfinal)] for j in range(Sfinal)]\n brkpt_spacing = int(np.floor(Sfinal * h))\n if brkpt_spacing <= ncols:\n print (\"minimum segment size must be greater than the number of regressors; resetting\")\n brkpt_spacing = ncols + 2 #this number 2 is a random choice\n \n for idx in range(Sfinal- brkpt_spacing +1):\n if (model == 'linear'):\n tmp = recresids(t[idx:], u[idx:], ncols, 'linear', 1)\n elif (model == 'harmonic'):\n tmp = recresids(t[idx:], u[idx:], ncols, 'harmon', K) \n else:\n print (\"model not supported\")\n tmp2 = [i*i for i in tmp]\n RSStri[idx][idx:] = np.cumsum(tmp2)\n \n return RSStri"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
computes y = hyp1f1(a, b, x) in UTP arithmetic | def hyp1f1(cls, a, b, x):
retval = x.clone()
cls._hyp1f1(a, b, x.data, out = retval.data)
return retval | [
"def hyp1f1(a, b, z):\n #global mp\n uselib = lib is not None and not use_mpmath\n #if not uselib and mp is None:\n # mp = __import__(\"mpmath\")\n\n p = PrmsAndInfo(c_int(max_iter), c_double(tol), c_int(0), c_double(0), c_int(0))\n if (np.ndim(a) + np.ndim(b) + np.ndim(z) > 1):\n l = [len(x) for x in (a, b, z) if hasattr(x, \"__len__\")]\n if l[1:] != l[:-1]:\n raise TypeError(\"if more than one parameter is a numpy array, they have to have the same length\")\n a, b, z = [np.ones(l[0])*x if not hasattr(x, \"__len__\") else x for x in (a, b, z)]\n if uselib:\n out = np.zeros(l[0], dtype=np.complex128)\n lib.hyp1f1_all_arr(a.astype(np.complex128), b.astype(np.complex128), z.astype(np.complex128), out, len(out), byref(p))\n if not nofallback and p.prec_warning or not uselib:\n out = np.array([mp.hyp1f1(aa, bb, zz) for aa, bb, zz in zip(a, b, z)], dtype=np.complex128)\n return out\n if (np.ndim(a) == 1):\n if uselib:\n out = np.zeros(len(a), dtype=np.complex128)\n lib.hyp1f1_a_arr(a.astype(np.complex128), cmpl(b), cmpl(z), out, len(out), byref(p))\n if not nofallback and p.prec_warning or not uselib:\n out = np.array([mp.hyp1f1(aa, b, z) for aa in a], dtype=np.complex128)\n return out\n elif (np.ndim(b) == 1):\n if uselib:\n out = np.zeros(len(b), dtype=np.complex128)\n lib.hyp1f1_b_arr(cmpl(a), b.astype(np.complex128), cmpl(z), out, len(out), byref(p))\n if not nofallback and p.prec_warning or not uselib:\n out = np.array([mp.hyp1f1(a, bb, z) for bb in b], dtype=np.complex128)\n return out\n elif (np.ndim(z) == 1):\n if uselib:\n out = np.zeros(len(z), dtype=np.complex128)\n lib.hyp1f1_z_arr(cmpl(a), cmpl(b), z.astype(np.complex128), out, len(out), byref(p))\n if not nofallback and p.prec_warning or not uselib:\n out = np.array([mp.hyp1f1(a, b, zz) for zz in z], dtype=np.complex128)\n return out\n else: \n if uselib:\n c = lib.hyp1f1(cmpl(a), cmpl(b), cmpl(z), byref(p))\n out = c.re + 1j* c.im\n if not nofallback and p.prec_warning or not uselib:\n out = np.complex128(mp.hyp1f1(a, b, z))\n return out",
"def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = self.sys.h( x , self.sys.ubar , t )\n \n return y",
"def hypothenuse(a, b):\n return sqrt(a*a + b*b)",
"def hyperbolic_tangent(a, b, prime_offset=0.0, threshold=float('inf')):\n thr_fun = lambda X: (X < -threshold) * -a + (X > threshold) * a + ((X < -threshold) + (X > threshold) == 0) * X\n fun = lambda X: thr_fun(a * scipy.tanh(X * b))\n # der = lambda X: scipy.ones(X.shape) - scipy.tanh(X)**2\n ab = a * b\n der = lambda X: ab * (scipy.ones(X.shape) - scipy.tanh(X * b)**2) + scipy.ones(X.shape) * prime_offset\n inv = lambda X: scipy.arctanh(X / a) / b\n descr = \"hyperbolic_tangent(%f, %f, %f, %f)\" % (a, b, prime_offset, threshold)\n return ActivationFunction(fun, inv, der, descr)",
"def dtanh(tanh_x):\n return 1 - tanh_x**2",
"def eval_hypothesis_function(w, x):\n # print(\"w shape:\",w.T.shape)\n # print(\"x shape:\",x.T.shape)\n z= np.dot(w.T, x.T)\n #print(z.shape)\n return 1. / (1 + np.exp(-z))",
"def HamSaddle1D_Hamiltonian(t, u, PARAMETERS = [1]):\n x, y = u.T\n # Hamiltonian Model Parameter\n lamda, = PARAMETERS\n return 0.5*lamda*(y*y - x*x)",
"def test_hyperbolic_function(self):\n reg_args = Namespace(regularization_x_trans=-1.0,\n regularization_y_trans=-2.0,\n regularization_slope=2,\n regularization_method=regularization.Regularization.HYPERBOLIC)\n self.assertEqual(\"Hyperbolic regularization: y = 2 (0.5 (x + 1.0) + (0.25 + 0.25 (x + 1.0)^{2})^{0.5}) - 2.0\",\n regularization.hyperbolic_function_string(reg_args))\n\n x_t = lambda t: t\n y_t = lambda t: 0\n reg_x_t, reg_y_t = regularization.regularize(x_t, y_t, reg_args)\n self.assertAlmostEqual(2.236067977, reg_x_t(1))\n self.assertAlmostEqual(0, reg_y_t(1))\n\n x_t = lambda t: t * 3 / 5\n y_t = lambda t: t * 4 / 5\n reg_x_t, reg_y_t = regularization.regularize(x_t, y_t, reg_args)\n self.assertAlmostEqual(2.236067977 * 3 / 5, reg_x_t(1))\n self.assertAlmostEqual(2.236067977 * 4 / 5, reg_y_t(1))",
"def phi(x1, x2):\n return np.array([x1, x2, x1**2.0 + x2**2.0])",
"def HamSN1D_Hamiltonian(t, u):\n x, y = u.T\n return 0.5*y*y + x**3/3 + 0.5*x*x",
"def Duffing1D_Hamiltonian(t, u, PARAMETERS = [1, 1]):\n x, p_x = u.T\n alpha, beta = PARAMETERS\n return 0.5*(p_x**2 - alpha*x**2 + 0.5*beta*x**4)",
"def calculate_t1_point(values, alphas, tr, clamp=None, threshold=None):\n if threshold is not None:\n if np.sum(values) < len(values)*threshold:\n return 0\n a = np.argsort(alphas)\n x = values[a]/np.sin(alphas[a])\n y = values[a]/np.tan(alphas[a])\n\n weight = 1.1\n A = np.vstack([x, np.ones(len(x))]).T\n W = np.sqrt(np.diag(np.logspace(weight,0,y.shape[0])))\n A = np.dot(W,A)\n y = np.dot(y,W)\n\n slope, intercept = _lstsq(A,y)[0]\n # TODO: is it appropriate to set negative values to 0? Why are we getting negative values?\n if slope <= 0:\n return 0\n\n t1 = tr / np.log(slope)\n if clamp:\n t1 = max(clamp[0], min(clamp[1], t1))\n return t1",
"def tanh_activation(val, theta):\n result = math.tanh(val-theta)\n return result\n # return threshold_activation(result, 0)",
"def test_tanh1(self):\r\n self.assertEqual(tanh_calculation.calculate_tanh(0), 0.0)",
"def tanh_derivative(x):\n return 1 - np.power(x, 2)",
"def tanh(x):\n return (x.exp() - (-x).exp()) / (x.exp() + (-x).exp())",
"def _hyp2f1_dlmf1583_first(a_i, b_i, a_j, b_j, y, mu):\n\n a = a_j\n c = a_j + a_i\n s = (b_j - mu) / (mu + b_i)\n z = (b_j + b_i) / (b_j - mu)\n scale = (\n -a_j * np.log(s)\n + _gammaln(a_j + y + 1)\n - _gammaln(y + 1)\n + _gammaln(a_i)\n - _gammaln(a_i + a_j)\n )\n\n # 2F1(a, -y; c; z) via backwards recurrence\n val, sign, da, _, dc, dz, d2z = _hyp2f1_recurrence(a, y, c, z)\n\n # map gradient to parameters\n da_i = dc - _digamma(a_i + a_j) + _digamma(a_i)\n da_j = da + dc - np.log(s) + _digamma(a_j + y + 1) - _digamma(a_i + a_j)\n db_i = dz / (b_j - mu) + a_j / (mu + b_i)\n db_j = dz * (1 - z) / (b_j - mu) - a_j / s / (mu + b_i)\n\n # needed to verify result\n d2b_j = (1 - z) / (b_j - mu) ** 2 * (d2z * (1 - z) - 2 * dz * (1 + a_j)) + (\n 1 + a_j\n ) * a_j / (b_j - mu) ** 2\n\n val += scale\n\n return val, sign, da_i, db_i, da_j, db_j, d2b_j",
"def der_cost_func_p1(es_x, gt_y, p1):\n s = 0\n for ex, gy in zip(es_x, gt_y):\n ey = ex * p1\n s += ((ey - gy) * ex)\n m = len(es_x)\n # gradiente\n g = s / m\n print(g)\n return g",
"def _h1_chi2_cmp_ ( h1 ,\n func ,\n integral = False ,\n select = lambda x,y,v : True ,\n chi2 = lambda v1,v2 : v1.chi2(v2) ) :\n c2 = 0\n ndf = 0\n\n _func_ = lambda x , xl , xr : func ( x )\n if integral and hasattr ( func , 'integral' ) :\n _func_ = lambda x,xl,xr : func.integral ( xl , xr ) / ( xr - xl ) \n elif integral and hasattr ( func , 'Integral' ) : \n _func_ = lambda x,xl,xr : func.Integral ( xl , xr ) / ( xr - xl ) \n elif integral :\n ## use numerical integration \n from ostap.math.intergal import integral as _integral_\n _func_ = lambda x , xl , xr : _integral_ ( func , xl , xr ) / ( xr - xl )\n\n\n ## helper function\n def _chi2_ ( c , histo , func , accept , funchi2 ) :\n\n c2 = 0.0\n ndf = 1\n\n for entry in histo.items() :\n \n x = entry [ 1 ]\n y1 = entry [ 2 ]\n \n xv = x.value()\n xe = x.error()\n xl = xv - xe\n xr = xv + xe\n \n y2 = func ( x , xl , xr ) \n if not accept ( x, y1 , y2 ) : continue\n\n c2 += funchi2 ( y1 , c * y2 )\n ndf += 1\n\n return c2 , ndf \n\n if not scale : \n c2 , ndf = _chi2_ ( 1.0 , h1 , _func_ , select , chi2 )\n c2ndf = c2/ndf \n return c2ndf, ROOT.TMath.Prob( c2 , ndf )\n \n fun = lambda c : _chi2_ ( 1.0 , h1 , _func_ , select , chi2 )[0]\n\n from ostap.math.minimize import minimize_scalar \n r = minimize_scalar ( fun )\n\n c2 , ndf = _chi2_ ( r.x , h1 , _func_ , select , chi2 )\n \n c2ndf = c2/ndf \n return c2ndf, ROOT.TMath.Prob( c2 , ndf ) , r.x",
"def interpolate(x, y, x1):\r\n\tfor item in x:\r\n\t\titem = float(item)\r\n\tfor item in y:\r\n\t\titem = float(item)\r\n\tx1 = float(x1)\r\n\t \r\n\ty1 = y[0] + (x1 - x[0]) / (x[1] - x[0]) * (y[1] - y[0])\r\n\t\r\n\treturn y1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
computes y = hyperu(a, b, x) in UTP arithmetic | def hyperu(cls, a, b, x):
retval = x.clone()
cls._hyperu(a, b, x.data, out = retval.data)
return retval | [
"def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = self.sys.h( x , self.sys.ubar , t )\n \n return y",
"def solve_U(U, b):\n m, k = b.shape\n x = np.zeros((m,k))\n x[-1,:] = b[-1,:] / U[-1,-1]\n for i in range(m-2,-1,-1):\n x[i,:] = (b[i,:] - U[i, i+1:]@x[i+1:,:]) / U[i,i]\n return x",
"def get_ua(u_1,z_b):\n\tu_a = -g*np.matrix([[0.0],[0.0],[1.0]]) + u_1*z_b/m\n\treturn u_a",
"def _tlu(x, weights, bias, threshold):\n return K.maximum(weights * x + bias, threshold)",
"def hyperbolic_tangent(a, b, prime_offset=0.0, threshold=float('inf')):\n thr_fun = lambda X: (X < -threshold) * -a + (X > threshold) * a + ((X < -threshold) + (X > threshold) == 0) * X\n fun = lambda X: thr_fun(a * scipy.tanh(X * b))\n # der = lambda X: scipy.ones(X.shape) - scipy.tanh(X)**2\n ab = a * b\n der = lambda X: ab * (scipy.ones(X.shape) - scipy.tanh(X * b)**2) + scipy.ones(X.shape) * prime_offset\n inv = lambda X: scipy.arctanh(X / a) / b\n descr = \"hyperbolic_tangent(%f, %f, %f, %f)\" % (a, b, prime_offset, threshold)\n return ActivationFunction(fun, inv, der, descr)",
"def test_hyperbolic_function(self):\n reg_args = Namespace(regularization_x_trans=-1.0,\n regularization_y_trans=-2.0,\n regularization_slope=2,\n regularization_method=regularization.Regularization.HYPERBOLIC)\n self.assertEqual(\"Hyperbolic regularization: y = 2 (0.5 (x + 1.0) + (0.25 + 0.25 (x + 1.0)^{2})^{0.5}) - 2.0\",\n regularization.hyperbolic_function_string(reg_args))\n\n x_t = lambda t: t\n y_t = lambda t: 0\n reg_x_t, reg_y_t = regularization.regularize(x_t, y_t, reg_args)\n self.assertAlmostEqual(2.236067977, reg_x_t(1))\n self.assertAlmostEqual(0, reg_y_t(1))\n\n x_t = lambda t: t * 3 / 5\n y_t = lambda t: t * 4 / 5\n reg_x_t, reg_y_t = regularization.regularize(x_t, y_t, reg_args)\n self.assertAlmostEqual(2.236067977 * 3 / 5, reg_x_t(1))\n self.assertAlmostEqual(2.236067977 * 4 / 5, reg_y_t(1))",
"def ranged_dot(lower, upper, w, b):\n lowers = torch.tensor(lower).repeat(len(w), 1)\n uppers = torch.tensor(upper).repeat(len(w), 1)\n # Element-wise product of each (x_l, x_u) with the weights\n y_lowers = w * lowers\n y_uppers = w * uppers\n\n # Since a negative weight will swap lower/upper bounds:\n # 1. Take the element-wise minimum and maximum\n # 2. Sum along the output dimension\n # 3. Add the bias\n y_lower = torch.min(y_lowers, y_uppers)\n y_lower = y_lower.sum(1) + b\n y_upper = torch.max(y_lowers, y_uppers)\n y_upper = y_upper.sum(1) + b\n\n return y_lower, y_upper",
"def backSubstitution_UpperTri(U, b):\n # Dimension Check\n if U.shape[0] != U.shape[1]:\n raise DimensionMismatchError(\"Q must be square\")\n if U.shape[0] != b.shape[0]:\n raise DimensionMismatchError(\"Rows Q ({Q.shape[0]} != Rows b ({b.shape[0]}))\")\n\n n = U.shape[0]\n x = np.zeros((n, 1))\n x[n-1, 0] = b[n-1, 0]/U[n-1, n-1]\n\n for i in range(n-2, -1, -1):\n s = 0\n for j in range(i+1, n):\n s += U[i, j]*x[j, 0]\n x[i, 0] = (b[i, 0] - s)/U[i, i]\n return x",
"def HamSN1D_Hamiltonian(t, u):\n x, y = u.T\n return 0.5*y*y + x**3/3 + 0.5*x*x",
"def build(quadratic_control_lyapunov_function, a, b):\n\n affine_dynamic_output = quadratic_control_lyapunov_function.output\n P = quadratic_control_lyapunov_function.P\n alpha = quadratic_control_lyapunov_function.alpha\n return LearnedQuadraticControlLyapunovFunction(affine_dynamic_output, P, alpha, a, b)",
"def Duffing1D_Hamiltonian(t, u, PARAMETERS = [1, 1]):\n x, p_x = u.T\n alpha, beta = PARAMETERS\n return 0.5*(p_x**2 - alpha*x**2 + 0.5*beta*x**4)",
"def scheme(u, q, f, i, j, n, i2, i3, j2, j3, x ,y, dtdx2, dtdy2, dt2, dt, b):\n\n u[i,j,n+1] = 2*u[i,j,n] - (1 - 0.5*b*dt)*u[i,j,n-1] + \\\n dtdx2*((q(x[i2],y[j]) + q(x[i],y[j]))*(u[i2,j,n] - u[i,j,n]) - (q(x[i],y[j]) + q(x[i3],y[j]))*(u[i,j,n] -u[i3,j,n])) + \\\n dtdy2*((q(x[i],y[j2]) + q(x[i],y[j]))*(u[i,j2,n] - u[i,j,n]) - (q(x[i],y[j]) + q(x[i],y[j3]))*(u[i,j,n] -u[i,j3,n])) + \\\n dt2*f(x[i],y[j],dt*n)\n \n u[i,j,n+1] /= 1 + 0.5*b*dt",
"def evaluate(self, u, K, y):\n u = ensure_1d(u)\n y = ensure_1d(y)\n\n yKu = y * (K @ u)\n\n f = np.logaddexp(0, -yKu).sum() + (self.lammy / 2) * u @ K @ u\n\n with np.errstate(over=\"ignore\"): # overflowing here is okay: we get 0\n g_bits = -y / (1 + np.exp(yKu))\n g = K @ g_bits + self.lammy * K @ u\n\n return f, g",
"def evaluate(x,y):\n return (1.5 - x + x*y)**2 + (2.25 - x + x*y*y)**2 + (2.625 - x + x*y*y*y)**2",
"def get_Hu():\n \n ue = np.zeros((nx+1,ny)) \n uw = np.zeros((nx+1,ny))\n un = np.zeros((nx+1,ny))\n us = np.zeros((nx+1,ny))\n vn = np.zeros((nx+1,ny))\n vs = np.zeros((nx+1,ny))\n τxxe = np.zeros((nx+1,ny))\n τxxw = np.zeros((nx+1,ny))\n τxyn = np.zeros((nx+1,ny))\n τxys = np.zeros((nx+1,ny))\n Hu = np.zeros((nx+1,ny))\n \n i = np.arange(1,nx) # u-cell centers in domain interior\n \n ue[i,:] = (u[i+1,:] + u[i,:])/2\n uw[i,:] = (u[i,:] + u[i-1,:])/2\n \n j = np.arange(0,ny-1)\n un[IJ(i,j)] = (u[IJ(i,j+1)] + u[IJ(i,j)])/2\n un[i,ny-1] = ubc_t\n j = np.arange(1,ny)\n us[IJ(i,j)] = (u[IJ(i,j)] + u[IJ(i,j-1)])/2\n us[i,0] = ubc_b\n \n j = np.arange(0,ny)\n vn[IJ(i,j)] = (v[IJ(i-1,j+1)]+v[IJ(i,j+1)])/2\n vs[IJ(i,j)] = (v[IJ(i-1,j)] +v[IJ(i,j)]) /2\n \n τxxe[i,:] = -2*ν*(u[i+1,:] - u[i,:]) /Δx\n τxxw[i,:] = -2*ν*(u[i,:] - u[i-1,:])/Δx\n \n j = np.arange(0,ny-1)\n τxyn[IJ(i,j)] = -ν*(u[IJ(i,j+1)]-u[IJ(i,j)])/Δy - ν*(v[IJ(i,j+1)]-v[IJ(i-1,j+1)])/Δx\n τxyn[i,ny-1] = -ν*(ubc_t-u[i,ny-1])/(Δy/2) - ν*(v[i,ny]-v[i-1,ny])/Δx \n \n j = np.arange(1,ny)\n τxys[IJ(i,j)] = -ν*(u[IJ(i,j)]-u[IJ(i,j-1)])/Δy - ν*(v[IJ(i,j)]-v[IJ(i-1,j)])/Δx\n τxys[i,0] = -ν*(u[i,0]-ubc_b)/(Δy/2) - ν*(v[i,0]-v[i-1,0])/Δx\n \n Hu[i,:] = -((ue[i,:]*ue[i,:] - uw[i,:]*uw[i,:])/Δx + (un[i,:]*vn[i,:] - us[i,:]*vs[i,:])/Δy) \\\n -((τxxe[i,:] - τxxw[i,:])/Δx + (τxyn[i,:] - τxys[i,:])/Δy)\n \n return Hu",
"def forward(s, x, u=None):\n # mean control\n mu = s.m(x)\n # Build u_theta(cdot | x)\n n = Normal(mu, s.std)\n # sample a u if we are simulating the system, use the argument\n # we are calculating the policy gradient\n if u is None:\n u = n.rsample()\n logp = n.log_prob(u)\n return u, logp",
"def U(i, g, X) :\n d_i = sum(g[i]) # degree of i\n\n direct_u = sum([g[i, j] * u(i, j, X) for j in range(n)])\n\n mutual_u = sum([g[i, j] * g[j, i] * u(i, j, X) for j in range(n)])\n\n indirect_u = 0\n for j in range(n) :\n for k in range(n) :\n if k == i or k == j :\n continue\n else :\n indirect_u += g[i, j] * g[j, k] * u(i, k, X)\n\n return direct_u + gamma * mutual_u + delta * indirect_u - d_i ** alpha * c",
"def affine_transformation(X_unprj, affine_x, affine_y, args, header):\n\tx_pred = np.dot(X_unprj, affine_x)\n\ty_pred = np.dot(X_unprj, affine_y)\n\treturn x_pred, y_pred",
"def theil_U(y: np.ndarray, y_hat: np.ndarray) -> float:\n return np.sqrt(MSE(y, y_hat) / MSE(y, np.zeros_like(y)))",
"def test_relu_deriv():\n\n x = np.array([[0, 1, 3],\n [-1, 0, -5],\n [1, 0, 3],\n [10, -9, -7]])\n\n y = np.array([[0, 1, 1],\n [0, 0, 0],\n [1, 0, 1],\n [1, 0, 0]])\n\n assert np.array_equal(relu(x, deriv=True), y)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
computes y = botched_clip(a_min, a_max, x) in UTP arithmetic | def botched_clip(cls, a_min, a_max, x):
retval = x.clone()
cls._botched_clip(a_min, a_max, x.data, out = retval.data)
return retval | [
"def clip(x:T, minvalue:T, maxvalue:T) -> T:\n if x < minvalue:\n return minvalue\n elif x < maxvalue:\n return x\n return maxvalue",
"def clip(x, min=0, max=1):\n return np.clip(x.obs, min, max)",
"def clip_image(image, clip_min, clip_max):\n return np.minimum(np.maximum(clip_min, image), clip_max)",
"def clamp(x, a, b):\n return min(max(x,a),b)",
"def clip(min, val, max):\n return min if val < min else max if val > max else val",
"def clip(val, minval, maxval):\n return max(min(maxval, val), minval)",
"def clamp(x, lower, upper) -> torch.Tensor:\n x = torch.min(torch.max(x, lower), upper)\n x = torch.where(lower < upper, x, (lower + upper) / 2)\n return x",
"def clip_(self, _min, _max):\n return self.__lazy_map(lambda e: max(min(e, _max), _min))",
"def clip(data, domain):\n max_val = np.max(domain)\n min_val = np.min(domain)\n data[data>max_val] = max_val\n data[data<min_val] = min_val",
"def clip(val, lower=0.0, upper=1.0):\n if isinstance(val, list):\n return [clip(v, lower, upper) for v in val]\n return max(lower, min(upper, val))",
"def clip(x, clip_level):\n mean = np.mean(x)\n std = np.std(x)\n return np.clip(x, mean - clip_level * std, mean + clip_level * std)",
"def clip(val):\n return max(min(val, 4.0), -4.0)",
"def _clip(arr, vmin, vmax):\n arr[arr < vmin] = vmin\n arr[arr > vmax] = vmax\n return arr",
"def clip(*args):\n return _seb.clip(*args)",
"def clipped_logmap(x, ax, bx, min, max):\n\n sgn = np.sign(x)\n clipped_x = np.clip(x, 0.35, None)\n y = math.log10(clipped_x / ax * 30) * bx / 80\n y_clipped = sgn * np.clip(y, 0, 1)\n\n return (y_clipped/2 +0.5) * (max - min) + min",
"def clip(value, min=None, max=None):\n if min is not None and value < min:\n value = min\n if max is not None and value > max:\n value = max\n return value",
"def clamp(x='0.0', min='0.0', max='1.0'):\n\n pass",
"def clip_scalar(val, vmin, vmax):\n return vmin if val < vmin else vmax if val > vmax else val",
"def maybe_clamp(x, x_range, ignored_if_non_positive):\n x_min, x_max = x_range\n if x_min is not None and x_max is not None and x_min > x_max:\n raise ValueError('Invalid range: %s.' % str(x_range))\n if (x_min is not None) and (not ignored_if_non_positive or x_min > 0.0):\n x = tf.math.maximum(x_min, x)\n if (x_max is not None) and (not ignored_if_non_positive or x_max > 0.0):\n x = tf.math.minimum(x_max, x)\n return x",
"def clip(self, min, max):\n new_data = np.clip(self.data, min, max)\n newasa = self.copy()\n newasa._data = new_data\n return newasa"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
computes y = hyp2f0(a1, a2, x) in UTP arithmetic | def dpm_hyp2f0(cls, a1, a2, x):
retval = x.clone()
cls._dpm_hyp2f0(a1, a2, x.data, out = retval.data)
return retval | [
"def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = self.sys.h( x , self.sys.ubar , t )\n \n return y",
"def hypothenuse(a, b):\n return sqrt(a*a + b*b)",
"def _hyp2f1_dlmf1583_first(a_i, b_i, a_j, b_j, y, mu):\n\n a = a_j\n c = a_j + a_i\n s = (b_j - mu) / (mu + b_i)\n z = (b_j + b_i) / (b_j - mu)\n scale = (\n -a_j * np.log(s)\n + _gammaln(a_j + y + 1)\n - _gammaln(y + 1)\n + _gammaln(a_i)\n - _gammaln(a_i + a_j)\n )\n\n # 2F1(a, -y; c; z) via backwards recurrence\n val, sign, da, _, dc, dz, d2z = _hyp2f1_recurrence(a, y, c, z)\n\n # map gradient to parameters\n da_i = dc - _digamma(a_i + a_j) + _digamma(a_i)\n da_j = da + dc - np.log(s) + _digamma(a_j + y + 1) - _digamma(a_i + a_j)\n db_i = dz / (b_j - mu) + a_j / (mu + b_i)\n db_j = dz * (1 - z) / (b_j - mu) - a_j / s / (mu + b_i)\n\n # needed to verify result\n d2b_j = (1 - z) / (b_j - mu) ** 2 * (d2z * (1 - z) - 2 * dz * (1 + a_j)) + (\n 1 + a_j\n ) * a_j / (b_j - mu) ** 2\n\n val += scale\n\n return val, sign, da_i, db_i, da_j, db_j, d2b_j",
"def phi(x1, x2):\n return np.array([x1, x2, x1**2.0 + x2**2.0])",
"def p1CoeffsFromP2x0y0(p2Coeffs, x0, y0):\n mP1 = p2Coeffs[0]/p2Coeffs[2]\n cosTheta = np.cos(np.arctan(mP1))\n sinTheta = np.sin(np.arctan(mP1))\n deltaP1 = -cosTheta*x0 - sinTheta*y0\n p1Coeffs = [cosTheta, sinTheta - cosTheta, -sinTheta, deltaP1]\n\n return p1Coeffs",
"def chi2(prof, x, y, dy):\n\n return np.sum(np.power((Voigt(prof, x)-y)/dy, 2))",
"def g_rtfn_y(t, y, gout, g_data):\r\n import ctypes\r\n gout[0] = y[0] - ctypes.cast(g_data,\r\n ctypes.POINTER(ctypes.c_float)).contents.value\r\n return 0",
"def _h1_chi2_cmp_ ( h1 ,\n func ,\n integral = False ,\n select = lambda x,y,v : True ,\n chi2 = lambda v1,v2 : v1.chi2(v2) ) :\n c2 = 0\n ndf = 0\n\n _func_ = lambda x , xl , xr : func ( x )\n if integral and hasattr ( func , 'integral' ) :\n _func_ = lambda x,xl,xr : func.integral ( xl , xr ) / ( xr - xl ) \n elif integral and hasattr ( func , 'Integral' ) : \n _func_ = lambda x,xl,xr : func.Integral ( xl , xr ) / ( xr - xl ) \n elif integral :\n ## use numerical integration \n from ostap.math.intergal import integral as _integral_\n _func_ = lambda x , xl , xr : _integral_ ( func , xl , xr ) / ( xr - xl )\n\n\n ## helper function\n def _chi2_ ( c , histo , func , accept , funchi2 ) :\n\n c2 = 0.0\n ndf = 1\n\n for entry in histo.items() :\n \n x = entry [ 1 ]\n y1 = entry [ 2 ]\n \n xv = x.value()\n xe = x.error()\n xl = xv - xe\n xr = xv + xe\n \n y2 = func ( x , xl , xr ) \n if not accept ( x, y1 , y2 ) : continue\n\n c2 += funchi2 ( y1 , c * y2 )\n ndf += 1\n\n return c2 , ndf \n\n if not scale : \n c2 , ndf = _chi2_ ( 1.0 , h1 , _func_ , select , chi2 )\n c2ndf = c2/ndf \n return c2ndf, ROOT.TMath.Prob( c2 , ndf )\n \n fun = lambda c : _chi2_ ( 1.0 , h1 , _func_ , select , chi2 )[0]\n\n from ostap.math.minimize import minimize_scalar \n r = minimize_scalar ( fun )\n\n c2 , ndf = _chi2_ ( r.x , h1 , _func_ , select , chi2 )\n \n c2ndf = c2/ndf \n return c2ndf, ROOT.TMath.Prob( c2 , ndf ) , r.x",
"def f2(self, x):\n return (2./3.)*(1/(np.power(x,2)-1.))",
"def _hyp2f1_recurrence(a, b, c, z):\n # TODO\n # fails with (200.0, 101.0, 401.6, 1.1)\n assert b % 1.0 == 0.0 and b >= 0\n assert np.abs(c) >= np.abs(a)\n assert 2.0 > z > 1.0 # TODO: generalize\n f0 = 1.0\n f1 = 1 - a * z / c\n s0 = 1.0\n s1 = np.sign(f1)\n g0 = np.zeros(4) # df/da df/db df/dc df/dz\n g1 = np.array([-z / c, 0.0, a * z / c**2, -a / c]) / f1\n p0 = 0.0 # d2f/dz2\n p1 = 0.0\n f0 = np.log(np.abs(f0))\n f1 = np.log(np.abs(f1))\n if b == 0:\n return f0, s0, g0[0], g0[1], g0[2], g0[3], p0\n if b == 1:\n return f1, s1, g1[0], g1[1], g1[2], g1[3], p1\n for n in range(1, int(b)):\n ak = n * (z - 1) / (c + n)\n dak = np.array([0.0, 0.0, -ak / (c + n), ak / (z - 1)])\n bk = (2 * n + c - z * (a + n)) / (c + n)\n dbk = np.array([-z / (c + n), 0.0, (1 - bk) / (c + n), -(a + n) / (c + n)])\n u = s0 * np.exp(f0 - f1)\n v = s1 * bk + u * ak\n s = np.sign(v)\n f = np.log(np.abs(v)) + f1\n g = (g1 * bk * s1 + g0 * u * ak + dbk * s1 + dak * u) / v\n p = (\n p1 * bk * s1\n + p0 * u * ak\n + 2 / (c + n) * (u * g0[3] * n - s1 * g1[3] * (a + n))\n ) / v\n f1, f0 = f, f1\n s1, s0 = s, s1\n g1, g0 = g, g1\n p1, p0 = p, p1\n if not _is_valid_2f1(g[3], p, a, -b, c, z):\n raise Invalid2F1(\"Hypergeometric series did not converge\")\n da, db, dc, dz = g\n return f, s, da, db, dc, dz, p",
"def _h3_cmp_costheta_ ( h1 ,\n h2 ,\n density = False ) :\n assert isinstance ( h1 , ROOT.TH3 ) and 3 == h1.dim () , \\\n \"cmp_cos: invalid type of h1 %s/%s\" % ( h1 , type ( h1 ) )\n \n if isinstance ( h2 , ROOT.TH1 ) :\n assert 3 == h2.dim () , \"cmp_cos: invalid type of h2 %s/%s\" % ( h2 , type ( h2 ) )\n \n if density : \n h1_ = h1.density() if hasattr ( h1 , 'density' ) else h1\n h2_ = h2.density() if hasattr ( h2 , 'density' ) else h2\n cmp = _h3_cmp_costheta_ ( h1_ , h2_ , density = False )\n if h1_ is not h1 : del h1_\n if h2_ is not h2 : del h2_\n return cmp\n \n f1 = lambda x , y , z : float ( h1 ( x , y , z ) ) \n f2 = lambda x , y , z : float ( h2 ( x , y , z ) )\n \n xlims = h1.xminmax()\n ylims = h1.yminmax() \n zlims = h1.zminmax()\n params = xlims [ 0 ] , xlims [ 1 ] , ylims [ 0 ] , ylims [ 1 ] , zlims [ 0 ] , zlims [ 1 ] \n \n from ostap.math.integral import integral3 as _integral3_\n r1 = _integral3_ ( lambda x , y , z : f1 ( x , y , z ) ** 2 , *params )\n r2 = _integral3_ ( lambda x , y , z : f2 ( x , y , z ) ** 2 , *params )\n r12 = _integral3_ ( lambda x , y , z : f1 ( x , y , z ) * f2 ( x , y , z ) , *params ) \n \n return r12 / ( r1 * r2 ) ** 0.5",
"def hyperbolic_tangent(a, b, prime_offset=0.0, threshold=float('inf')):\n thr_fun = lambda X: (X < -threshold) * -a + (X > threshold) * a + ((X < -threshold) + (X > threshold) == 0) * X\n fun = lambda X: thr_fun(a * scipy.tanh(X * b))\n # der = lambda X: scipy.ones(X.shape) - scipy.tanh(X)**2\n ab = a * b\n der = lambda X: ab * (scipy.ones(X.shape) - scipy.tanh(X * b)**2) + scipy.ones(X.shape) * prime_offset\n inv = lambda X: scipy.arctanh(X / a) / b\n descr = \"hyperbolic_tangent(%f, %f, %f, %f)\" % (a, b, prime_offset, threshold)\n return ActivationFunction(fun, inv, der, descr)",
"def u2(self, phi, lam, q):\n return self.append(U2Gate(phi, lam), [q], [])",
"def theil_U(y: np.ndarray, y_hat: np.ndarray) -> float:\n return np.sqrt(MSE(y, y_hat) / MSE(y, np.zeros_like(y)))",
"def bohachevsky(x1, x2):\n term1 = x1 ** 2\n term2 = 2 * x2 ** 2\n term3 = -0.3 * math.cos(3 * math.pi * x1)\n term4 = -0.4 * math.cos(4 * math.pi * x2)\n\n return term1 + term2 + term3 + term4 + 0.7",
"def y_HC2GC(s, l, b):\n return s*sin(l)*cos(b)",
"def y(self, t, n):\n s = self.s\n if n == 0:\n # eq. A.3\n y = np.tanh(2*(2*t - 1) / ((4*t*(1 - t))**s))\n elif n == 1:\n # eq. A.5\n y = self.a(t, 2)*(1 - self.y(t, 0)**2)\n else:\n # eq. A.7\n y = sum(sp.special.binom(n - 1, k)*self.a(t, k + 2)*self.z(t, n - 1 - k) for k in range(0, n))\n return y",
"def calculate_hypotenuse(base, height):\n pass",
"def q2u(q1, q2):\n u1 = 2*np.sqrt(q1)*q2\n u2 = np.sqrt(q1)*(1-2*q2)\n return u1, u2",
"def linear_interpolation(y1, y2, weight):\n \n # Return linearly interpolated data value\n return y1*(1.0-weight)+y2*weight"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
computes y = hyp2f0(a1, a2, x) in UTP arithmetic | def hyp2f0(cls, a1, a2, x):
retval = x.clone()
cls._hyp2f0(a1, a2, x.data, out = retval.data)
return retval | [
"def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = self.sys.h( x , self.sys.ubar , t )\n \n return y",
"def hypothenuse(a, b):\n return sqrt(a*a + b*b)",
"def _hyp2f1_dlmf1583_first(a_i, b_i, a_j, b_j, y, mu):\n\n a = a_j\n c = a_j + a_i\n s = (b_j - mu) / (mu + b_i)\n z = (b_j + b_i) / (b_j - mu)\n scale = (\n -a_j * np.log(s)\n + _gammaln(a_j + y + 1)\n - _gammaln(y + 1)\n + _gammaln(a_i)\n - _gammaln(a_i + a_j)\n )\n\n # 2F1(a, -y; c; z) via backwards recurrence\n val, sign, da, _, dc, dz, d2z = _hyp2f1_recurrence(a, y, c, z)\n\n # map gradient to parameters\n da_i = dc - _digamma(a_i + a_j) + _digamma(a_i)\n da_j = da + dc - np.log(s) + _digamma(a_j + y + 1) - _digamma(a_i + a_j)\n db_i = dz / (b_j - mu) + a_j / (mu + b_i)\n db_j = dz * (1 - z) / (b_j - mu) - a_j / s / (mu + b_i)\n\n # needed to verify result\n d2b_j = (1 - z) / (b_j - mu) ** 2 * (d2z * (1 - z) - 2 * dz * (1 + a_j)) + (\n 1 + a_j\n ) * a_j / (b_j - mu) ** 2\n\n val += scale\n\n return val, sign, da_i, db_i, da_j, db_j, d2b_j",
"def phi(x1, x2):\n return np.array([x1, x2, x1**2.0 + x2**2.0])",
"def p1CoeffsFromP2x0y0(p2Coeffs, x0, y0):\n mP1 = p2Coeffs[0]/p2Coeffs[2]\n cosTheta = np.cos(np.arctan(mP1))\n sinTheta = np.sin(np.arctan(mP1))\n deltaP1 = -cosTheta*x0 - sinTheta*y0\n p1Coeffs = [cosTheta, sinTheta - cosTheta, -sinTheta, deltaP1]\n\n return p1Coeffs",
"def chi2(prof, x, y, dy):\n\n return np.sum(np.power((Voigt(prof, x)-y)/dy, 2))",
"def g_rtfn_y(t, y, gout, g_data):\r\n import ctypes\r\n gout[0] = y[0] - ctypes.cast(g_data,\r\n ctypes.POINTER(ctypes.c_float)).contents.value\r\n return 0",
"def _h1_chi2_cmp_ ( h1 ,\n func ,\n integral = False ,\n select = lambda x,y,v : True ,\n chi2 = lambda v1,v2 : v1.chi2(v2) ) :\n c2 = 0\n ndf = 0\n\n _func_ = lambda x , xl , xr : func ( x )\n if integral and hasattr ( func , 'integral' ) :\n _func_ = lambda x,xl,xr : func.integral ( xl , xr ) / ( xr - xl ) \n elif integral and hasattr ( func , 'Integral' ) : \n _func_ = lambda x,xl,xr : func.Integral ( xl , xr ) / ( xr - xl ) \n elif integral :\n ## use numerical integration \n from ostap.math.intergal import integral as _integral_\n _func_ = lambda x , xl , xr : _integral_ ( func , xl , xr ) / ( xr - xl )\n\n\n ## helper function\n def _chi2_ ( c , histo , func , accept , funchi2 ) :\n\n c2 = 0.0\n ndf = 1\n\n for entry in histo.items() :\n \n x = entry [ 1 ]\n y1 = entry [ 2 ]\n \n xv = x.value()\n xe = x.error()\n xl = xv - xe\n xr = xv + xe\n \n y2 = func ( x , xl , xr ) \n if not accept ( x, y1 , y2 ) : continue\n\n c2 += funchi2 ( y1 , c * y2 )\n ndf += 1\n\n return c2 , ndf \n\n if not scale : \n c2 , ndf = _chi2_ ( 1.0 , h1 , _func_ , select , chi2 )\n c2ndf = c2/ndf \n return c2ndf, ROOT.TMath.Prob( c2 , ndf )\n \n fun = lambda c : _chi2_ ( 1.0 , h1 , _func_ , select , chi2 )[0]\n\n from ostap.math.minimize import minimize_scalar \n r = minimize_scalar ( fun )\n\n c2 , ndf = _chi2_ ( r.x , h1 , _func_ , select , chi2 )\n \n c2ndf = c2/ndf \n return c2ndf, ROOT.TMath.Prob( c2 , ndf ) , r.x",
"def f2(self, x):\n return (2./3.)*(1/(np.power(x,2)-1.))",
"def _hyp2f1_recurrence(a, b, c, z):\n # TODO\n # fails with (200.0, 101.0, 401.6, 1.1)\n assert b % 1.0 == 0.0 and b >= 0\n assert np.abs(c) >= np.abs(a)\n assert 2.0 > z > 1.0 # TODO: generalize\n f0 = 1.0\n f1 = 1 - a * z / c\n s0 = 1.0\n s1 = np.sign(f1)\n g0 = np.zeros(4) # df/da df/db df/dc df/dz\n g1 = np.array([-z / c, 0.0, a * z / c**2, -a / c]) / f1\n p0 = 0.0 # d2f/dz2\n p1 = 0.0\n f0 = np.log(np.abs(f0))\n f1 = np.log(np.abs(f1))\n if b == 0:\n return f0, s0, g0[0], g0[1], g0[2], g0[3], p0\n if b == 1:\n return f1, s1, g1[0], g1[1], g1[2], g1[3], p1\n for n in range(1, int(b)):\n ak = n * (z - 1) / (c + n)\n dak = np.array([0.0, 0.0, -ak / (c + n), ak / (z - 1)])\n bk = (2 * n + c - z * (a + n)) / (c + n)\n dbk = np.array([-z / (c + n), 0.0, (1 - bk) / (c + n), -(a + n) / (c + n)])\n u = s0 * np.exp(f0 - f1)\n v = s1 * bk + u * ak\n s = np.sign(v)\n f = np.log(np.abs(v)) + f1\n g = (g1 * bk * s1 + g0 * u * ak + dbk * s1 + dak * u) / v\n p = (\n p1 * bk * s1\n + p0 * u * ak\n + 2 / (c + n) * (u * g0[3] * n - s1 * g1[3] * (a + n))\n ) / v\n f1, f0 = f, f1\n s1, s0 = s, s1\n g1, g0 = g, g1\n p1, p0 = p, p1\n if not _is_valid_2f1(g[3], p, a, -b, c, z):\n raise Invalid2F1(\"Hypergeometric series did not converge\")\n da, db, dc, dz = g\n return f, s, da, db, dc, dz, p",
"def _h3_cmp_costheta_ ( h1 ,\n h2 ,\n density = False ) :\n assert isinstance ( h1 , ROOT.TH3 ) and 3 == h1.dim () , \\\n \"cmp_cos: invalid type of h1 %s/%s\" % ( h1 , type ( h1 ) )\n \n if isinstance ( h2 , ROOT.TH1 ) :\n assert 3 == h2.dim () , \"cmp_cos: invalid type of h2 %s/%s\" % ( h2 , type ( h2 ) )\n \n if density : \n h1_ = h1.density() if hasattr ( h1 , 'density' ) else h1\n h2_ = h2.density() if hasattr ( h2 , 'density' ) else h2\n cmp = _h3_cmp_costheta_ ( h1_ , h2_ , density = False )\n if h1_ is not h1 : del h1_\n if h2_ is not h2 : del h2_\n return cmp\n \n f1 = lambda x , y , z : float ( h1 ( x , y , z ) ) \n f2 = lambda x , y , z : float ( h2 ( x , y , z ) )\n \n xlims = h1.xminmax()\n ylims = h1.yminmax() \n zlims = h1.zminmax()\n params = xlims [ 0 ] , xlims [ 1 ] , ylims [ 0 ] , ylims [ 1 ] , zlims [ 0 ] , zlims [ 1 ] \n \n from ostap.math.integral import integral3 as _integral3_\n r1 = _integral3_ ( lambda x , y , z : f1 ( x , y , z ) ** 2 , *params )\n r2 = _integral3_ ( lambda x , y , z : f2 ( x , y , z ) ** 2 , *params )\n r12 = _integral3_ ( lambda x , y , z : f1 ( x , y , z ) * f2 ( x , y , z ) , *params ) \n \n return r12 / ( r1 * r2 ) ** 0.5",
"def hyperbolic_tangent(a, b, prime_offset=0.0, threshold=float('inf')):\n thr_fun = lambda X: (X < -threshold) * -a + (X > threshold) * a + ((X < -threshold) + (X > threshold) == 0) * X\n fun = lambda X: thr_fun(a * scipy.tanh(X * b))\n # der = lambda X: scipy.ones(X.shape) - scipy.tanh(X)**2\n ab = a * b\n der = lambda X: ab * (scipy.ones(X.shape) - scipy.tanh(X * b)**2) + scipy.ones(X.shape) * prime_offset\n inv = lambda X: scipy.arctanh(X / a) / b\n descr = \"hyperbolic_tangent(%f, %f, %f, %f)\" % (a, b, prime_offset, threshold)\n return ActivationFunction(fun, inv, der, descr)",
"def u2(self, phi, lam, q):\n return self.append(U2Gate(phi, lam), [q], [])",
"def theil_U(y: np.ndarray, y_hat: np.ndarray) -> float:\n return np.sqrt(MSE(y, y_hat) / MSE(y, np.zeros_like(y)))",
"def bohachevsky(x1, x2):\n term1 = x1 ** 2\n term2 = 2 * x2 ** 2\n term3 = -0.3 * math.cos(3 * math.pi * x1)\n term4 = -0.4 * math.cos(4 * math.pi * x2)\n\n return term1 + term2 + term3 + term4 + 0.7",
"def y_HC2GC(s, l, b):\n return s*sin(l)*cos(b)",
"def y(self, t, n):\n s = self.s\n if n == 0:\n # eq. A.3\n y = np.tanh(2*(2*t - 1) / ((4*t*(1 - t))**s))\n elif n == 1:\n # eq. A.5\n y = self.a(t, 2)*(1 - self.y(t, 0)**2)\n else:\n # eq. A.7\n y = sum(sp.special.binom(n - 1, k)*self.a(t, k + 2)*self.z(t, n - 1 - k) for k in range(0, n))\n return y",
"def calculate_hypotenuse(base, height):\n pass",
"def q2u(q1, q2):\n u1 = 2*np.sqrt(q1)*q2\n u2 = np.sqrt(q1)*(1-2*q2)\n return u1, u2",
"def linear_interpolation(y1, y2, weight):\n \n # Return linearly interpolated data value\n return y1*(1.0-weight)+y2*weight"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
computes y = hyp0f1(b, x) in UTP arithmetic | def hyp0f1(cls, b, x):
retval = x.clone()
cls._hyp0f1(b, x.data, out = retval.data)
return retval | [
"def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = self.sys.h( x , self.sys.ubar , t )\n \n return y",
"def _h_function(self,h):\n return self.contribution * np.exp(-1.0 * h / self.a)",
"def hyperbolic_tangent(a, b, prime_offset=0.0, threshold=float('inf')):\n thr_fun = lambda X: (X < -threshold) * -a + (X > threshold) * a + ((X < -threshold) + (X > threshold) == 0) * X\n fun = lambda X: thr_fun(a * scipy.tanh(X * b))\n # der = lambda X: scipy.ones(X.shape) - scipy.tanh(X)**2\n ab = a * b\n der = lambda X: ab * (scipy.ones(X.shape) - scipy.tanh(X * b)**2) + scipy.ones(X.shape) * prime_offset\n inv = lambda X: scipy.arctanh(X / a) / b\n descr = \"hyperbolic_tangent(%f, %f, %f, %f)\" % (a, b, prime_offset, threshold)\n return ActivationFunction(fun, inv, der, descr)",
"def bprop_scalar_tanh(x, out, dout):\n return (dout - dout * out * out,)",
"def Duffing1D_Hamiltonian(t, u, PARAMETERS = [1, 1]):\n x, p_x = u.T\n alpha, beta = PARAMETERS\n return 0.5*(p_x**2 - alpha*x**2 + 0.5*beta*x**4)",
"def dtanh(tanh_x):\n return 1 - tanh_x**2",
"def HamSN1D_Hamiltonian(t, u):\n x, y = u.T\n return 0.5*y*y + x**3/3 + 0.5*x*x",
"def theil_U(y: np.ndarray, y_hat: np.ndarray) -> float:\n return np.sqrt(MSE(y, y_hat) / MSE(y, np.zeros_like(y)))",
"def f(self,t,y):\n return -self.lambd*y + 2*scipy.ones_like(y)*scipy.exp(-t)*scipy.cos(2*t)",
"def HamSaddle1D_Hamiltonian(t, u, PARAMETERS = [1]):\n x, y = u.T\n # Hamiltonian Model Parameter\n lamda, = PARAMETERS\n return 0.5*lamda*(y*y - x*x)",
"def F(self,t,z,p):\n return 0.*z",
"def tanh_activation(val, theta):\n result = math.tanh(val-theta)\n return result\n # return threshold_activation(result, 0)",
"def hyp1f1(a, b, z):\n #global mp\n uselib = lib is not None and not use_mpmath\n #if not uselib and mp is None:\n # mp = __import__(\"mpmath\")\n\n p = PrmsAndInfo(c_int(max_iter), c_double(tol), c_int(0), c_double(0), c_int(0))\n if (np.ndim(a) + np.ndim(b) + np.ndim(z) > 1):\n l = [len(x) for x in (a, b, z) if hasattr(x, \"__len__\")]\n if l[1:] != l[:-1]:\n raise TypeError(\"if more than one parameter is a numpy array, they have to have the same length\")\n a, b, z = [np.ones(l[0])*x if not hasattr(x, \"__len__\") else x for x in (a, b, z)]\n if uselib:\n out = np.zeros(l[0], dtype=np.complex128)\n lib.hyp1f1_all_arr(a.astype(np.complex128), b.astype(np.complex128), z.astype(np.complex128), out, len(out), byref(p))\n if not nofallback and p.prec_warning or not uselib:\n out = np.array([mp.hyp1f1(aa, bb, zz) for aa, bb, zz in zip(a, b, z)], dtype=np.complex128)\n return out\n if (np.ndim(a) == 1):\n if uselib:\n out = np.zeros(len(a), dtype=np.complex128)\n lib.hyp1f1_a_arr(a.astype(np.complex128), cmpl(b), cmpl(z), out, len(out), byref(p))\n if not nofallback and p.prec_warning or not uselib:\n out = np.array([mp.hyp1f1(aa, b, z) for aa in a], dtype=np.complex128)\n return out\n elif (np.ndim(b) == 1):\n if uselib:\n out = np.zeros(len(b), dtype=np.complex128)\n lib.hyp1f1_b_arr(cmpl(a), b.astype(np.complex128), cmpl(z), out, len(out), byref(p))\n if not nofallback and p.prec_warning or not uselib:\n out = np.array([mp.hyp1f1(a, bb, z) for bb in b], dtype=np.complex128)\n return out\n elif (np.ndim(z) == 1):\n if uselib:\n out = np.zeros(len(z), dtype=np.complex128)\n lib.hyp1f1_z_arr(cmpl(a), cmpl(b), z.astype(np.complex128), out, len(out), byref(p))\n if not nofallback and p.prec_warning or not uselib:\n out = np.array([mp.hyp1f1(a, b, zz) for zz in z], dtype=np.complex128)\n return out\n else: \n if uselib:\n c = lib.hyp1f1(cmpl(a), cmpl(b), cmpl(z), byref(p))\n out = c.re + 1j* c.im\n if not nofallback and p.prec_warning or not uselib:\n out = np.complex128(mp.hyp1f1(a, b, z))\n return out",
"def test_hyperbolic_function(self):\n reg_args = Namespace(regularization_x_trans=-1.0,\n regularization_y_trans=-2.0,\n regularization_slope=2,\n regularization_method=regularization.Regularization.HYPERBOLIC)\n self.assertEqual(\"Hyperbolic regularization: y = 2 (0.5 (x + 1.0) + (0.25 + 0.25 (x + 1.0)^{2})^{0.5}) - 2.0\",\n regularization.hyperbolic_function_string(reg_args))\n\n x_t = lambda t: t\n y_t = lambda t: 0\n reg_x_t, reg_y_t = regularization.regularize(x_t, y_t, reg_args)\n self.assertAlmostEqual(2.236067977, reg_x_t(1))\n self.assertAlmostEqual(0, reg_y_t(1))\n\n x_t = lambda t: t * 3 / 5\n y_t = lambda t: t * 4 / 5\n reg_x_t, reg_y_t = regularization.regularize(x_t, y_t, reg_args)\n self.assertAlmostEqual(2.236067977 * 3 / 5, reg_x_t(1))\n self.assertAlmostEqual(2.236067977 * 4 / 5, reg_y_t(1))",
"def eval_hypothesis_function(w, x):\n # print(\"w shape:\",w.T.shape)\n # print(\"x shape:\",x.T.shape)\n z= np.dot(w.T, x.T)\n #print(z.shape)\n return 1. / (1 + np.exp(-z))",
"def hypothesis(self, test):\n h = np.zeros(test.shape[0])\n for i in range(len(self.confidence)):\n h += self.confidence[i] * self.predictors[i].predict(test)\n return h",
"def tanh_derivative(x):\n return 1 - np.power(x, 2)",
"def tanh_derivative(t):\n return 1 - t * t",
"def sigmoid(self, h):\n return 1.0 / (1.0 + exp(-self.beta * h))",
"def g_rtfn_y(t, y, gout, g_data):\r\n import ctypes\r\n gout[0] = y[0] - ctypes.cast(g_data,\r\n ctypes.POINTER(ctypes.c_float)).contents.value\r\n return 0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
computes y = polygamma(n, x) in UTP arithmetic | def polygamma(cls, n, x):
retval = x.clone()
cls._polygamma(n, x.data, out = retval.data)
return retval | [
"def polyfunc(x, *p):\n y = 0\n for n, P in enumerate(p):\n y += P * x ** n\n return y",
"def polynomial():\n np.random.seed(42)\n input_data = 10 * np.random.rand(30)\n y = (\n 5 * input_data +\n 2.0 * input_data ** 2 +\n 7 +\n 10 * np.random.randn(len(input_data))\n )\n formula = (\n gammy.Scalar(prior=(0, 1e-6)) * x +\n gammy.Scalar(prior=(0, 1e-6)) * x ** 2 +\n gammy.Scalar(prior=(0, 1e-6))\n )\n return (input_data, y, formula)",
"def get_bprop_polygamma(self):\n polygamma = Polygamma()\n\n def bprop(a, x, out, dout):\n one = Tensor(1)\n a = a + one\n if x.dtype in (mstype.float16,):\n x = F.cast(x, mstype.float64)\n dx = dout * polygamma(a, x)\n dx = F.cast(dx, mstype.float16)\n else:\n dx = dout * polygamma(a, x)\n return zeros_like(a), dx\n\n return bprop",
"def gamma(x):\n return 1.0",
"def polyFeatures(X, p):\n X_poly = np.zeros((X.size, p))\n for i in range(p):\n X_poly[:, [i]] = X**(i+1)\n return X_poly",
"def polyEval(p, x):\n\tk = len(p)-1 # last valid index\n\tif(k < 0):\n\t\treturn 0\n\ty = p[k]\n\twhile(k > 0):\n\t\tk -= 1\n\t\ty = y*x + p[k]\n\treturn y",
"def LagrangePolynomial( k ): # Inputs arrays\r\n assert len(x) == len(f_x) , \" x and f not same size \"\r\n sum_ = 0\r\n for i in range( n ): \r\n product = 1 # Reset the product for each i\r\n for j in range( len(x) ):\r\n if i!=j:\r\n product = product * ( k - x[j] ) / ( x[i] - x[j] ) # Product over j's for a given i\r\n sum_ = sum_ + product * f_x[i] # Sum over i's after product over j done\r\n return sum_",
"def A_term(i,r,u,l1,l2,PAx,PBx,CPx,gamma):\n return pow(-1,i)*binomial_prefactor(i,l1,l2,PAx,PBx)*\\\n pow(-1,u)*factorial(i)*pow(CPx,i-2*r-2*u)*\\\n pow(0.25/gamma,r+u)/factorial(r)/factorial(u)/factorial(i-2*r-2*u)",
"def evaluate_lambda_poly(x, ps):\r\n if not isinstance(x, np.ndarray):\r\n x = np.array(x)\r\n result = np.ones(len(x))\r\n for p in ps:\r\n result = result * (x - p)\r\n return result.astype(np.float)",
"def lgamma(x):\n cof = [ 76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5 ]\n y = x\n tmp = x + 5.5\n tmp -= ((x + 0.5) * math.log(tmp))\n ser = 1.000000000190015\n for j in range(len(cof)):\n y += 1\n ser += (cof[j] / y)\n return (-tmp + math.log(2.5066282746310005 * ser / x))",
"def polyfunc_odd(x, *p):\n y = p[0]\n for n, P in enumerate(p[1:]):\n y += P * x ** (2*n+1)\n return y",
"def haei_with_gamma(x, gamma):\n\n return 1 - (gamma/np.sqrt(gamma**2 + x))",
"def polynom(coeffs, x): \n if hasattr(x, \"__len__\"):\n return np.array([polynom(coeffs, x_i) for x_i in x])\n return sum([coeffs[i]*x**i for i in range(len(coeffs))])",
"def taylorPoly(f, a, n=1, df=None):\n if df is None:\n df = lambda a, n: numDiff(f, a, n)\n fprime = zeros(((n+1),(n+1)))\n for i in range(n+1):\n value = df(a, i) / factorial(i)\n for j in range(i+1):\n x, y = i-j, j\n fprime[x,y] = value\n pasc = pascal(n)\n alpha = (-a)**numpy.arange(n+1)\n terms = alpha[newaxis,:] * pasc * fprime\n coeff = numpy.sum(terms, axis=1)\n return Polynomial(coeff)",
"def probability_of_n_purchases_up_to_time(self, t, n):\n\n r, alpha, a, b = self._unload_params('r', 'alpha', 'a', 'b')\n\n first_term = special.beta(a, b + n) / special.beta(a, b) * special.gamma(r + n) / special.gamma(r) / special.gamma(n + 1) * (alpha / (alpha + t)) ** r * (t / (alpha + t)) ** n\n if n > 0:\n finite_sum = np.sum([special.gamma(r + j) / special.gamma(r) / special.gamma(j + 1) * (t / (alpha + t)) ** j for j in range(0, n)])\n second_term = special.beta(a + 1, b + n - 1) / special.beta(a, b) * (1 - (alpha / (alpha + t)) ** r * finite_sum)\n else:\n second_term = 0\n return first_term + second_term",
"def phi(n):\n ps = list(unique(prime_factors(n)))\n return int(n * reduce(operator.mul, (1 - Fraction(1, p) for p in ps)))",
"def primitivePoly(p, n):\n iPoly = irreduciblePoly(p, n + 1)\n #print(iPoly)\n candidates = [gfp(p, v) for v in iPoly if len(v) == n + 1]\n #print(candidates)\n fieldPoly = [0 for i in range(p**n)]\n fieldPoly[0], fieldPoly[-1] = 1, -1\n fieldPoly = gfp(p, fieldPoly)\n #print(fieldPoly)\n \n pPoly = []\n for i in range(len(candidates)):\n q, r = fieldPoly.deconv(candidates[i])\n if r == gfp(p):\n pPoly.append(candidates[i])\n \n return pPoly",
"def runPoly():\n X,y=preprocess()\n Polynomial(X,y)",
"def polynomial(x: int, coefficients: typing.Sequence[int], n: int) -> int:\n y = 0\n for i, a in enumerate(coefficients):\n y = (y + (a * (x ** i) % n) % n) % n\n return y",
"def polynomial_func(x_data,pars):\n f = 0\n for (i,a) in enumerate(pars):\n f += a()*x_data**(i)\n\n return f"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
computes y = reciprocal(x) in UTP arithmetic | def reciprocal(cls, x):
retval = x.clone()
cls._reciprocal(x.data, out = retval.data)
return retval | [
"def reciprocal(self):\n\n value = -1 / (self.val * self.val)\n der = {k: value * v for k, v in self.der.items()}\n return AutoDiff(self.var, 1 / self.val, der)",
"def GetReciprocal(self) -> \"itkVersorD\":\n return _itkVersorPython.itkVersorD_GetReciprocal(self)",
"def reciprocal(self):\n return Rational(self.denominator, self.numerator)",
"def residual_discrete_eq(u):\n R = DtDt(u, dt) + (w**2)*u(t) - f\n return sym.simplify(R)",
"def residual_discrete_eq(u):\n R = DtDt(u,dt)+w**2*u(t)-f \n return sym.simplify(R)",
"def residual_discrete_eq_step1(u):\n\n u1 = (dt**2*f.subs(t,0))/2 + I - (I*w**2*dt**2)/2 + V*dt\n\n R = u(t).subs(t,dt) - u1\n return sym.simplify(R)",
"def inverse_rational(x):\r\n return rational(denom(x), numer(x))",
"def relu(data):\n return data * (data > 0)",
"def vm_impl_real_div(self):\n\n def vm_impl(x, y):\n x = x.asnumpy()\n y = y.asnumpy()\n out = x / y\n out = np.array(out, x.dtype)\n return Tensor(out)\n\n return vm_impl",
"def _r2(x, y):\n\txbar = np.mean(x)\n\tss_tot = np.sum(np.power(x-xbar,2))\n\tss_res = np.sum(np.power(x-y,2))\n\treturn 1-(ss_res/ss_tot)",
"def residual_discrete_eq_step1(u):\n R = u(t + dt) - I + 0.5*dt**2*w**2*I - dt*V - 0.5*dt**2*f.subs(t,0)\n R = R.subs(t, 0) # t=0 in the rhs of the first step eq.\n return sym.simplify(R)",
"def russellrao(u, v):\n u = _validate_vector(u)\n v = _validate_vector(v)\n if u.dtype == bool:\n ntt = (u & v).sum()\n else:\n ntt = (u * v).sum()\n return float(len(u) - ntt) / float(len(u))",
"def reciprocal_operations(self):\n return self._reciprocal_operations",
"def g_rtfn_y(t, y, gout, g_data):\r\n import ctypes\r\n gout[0] = y[0] - ctypes.cast(g_data,\r\n ctypes.POINTER(ctypes.c_float)).contents.value\r\n return 0",
"def getrxryrz(u):\n c2s1 = -u[1,2]\n c2c1 = u[2,2]\n r1 = -np.arctan2(c2s1,c2c1)\n c2c3 = u[0,0]\n c2s3 = -u[0,1]\n r3 = -np.arctan2( c2s3, c2c3 )\n s2 = u[0,2]\n if abs(np.sin(r3)) > 0.5:\n c2 = c2s3 / np.sin(r3)\n else:\n c2 = c2c3 / np.cos(r3)\n r2 = -np.arctan2( s2, c2 )\n if 1:\n utest = np.dot(np.dot(rotmatx(r1),rotmaty(r2)),rotmatz(r3))\n assert abs(utest-u).ravel().sum() < 1e-10\n return r1,r2,r3",
"def normalize(y, x=None):\n #return y * np.sqrt( (np.abs(x)**2.0).mean() / (np.abs(y)**2.0).mean() )\n if x is not None:\n x = ms(x)\n else:\n x = 1.0\n return y * np.sqrt(x / ms(y))\n #return y * np.sqrt( 1.0 / (np.abs(y)**2.0).mean() )\n\n ## Broken? Caused correlation in auralizations....weird!",
"def __reward_Inverse(self, x):\n return min(1.0, 1.0/(x + 0.5))",
"def xnor(x, y):\r\n return (not xor(x, y))",
"def normalize(x, m0, m1, r0, r1):\n return (r1 - r0) * ((x - m0) / (m1 - m0)) + r0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
inverse operation of FtoJT x.data.shape = (D,1, P,shp) y = x.JTtoF() y.data.shape = (D+1, P, shp) | def JTtoF(self):
D = self.data.shape[0]
P = self.data.shape[2]
shp = self.data.shape[3:]
tmp = numpy.zeros((D+1,P) + shp)
tmp[0:D,...] = self.data.reshape((D,P) + shp)
return UTPM(tmp) | [
"def jonswap(f,Hm0,Tp,\n g = 9.81,\n gamma = 3.3, \n method = 'Yamaguchi', \n normalize = True,\n sa = 0.07,\n sb = 0.09):\n \n # method = 'Yamaguchi'; # 'Goda' \n\n def sigma(f,fpeak,sa,sb):\n s = np.ones(f.size)*sa\n s[f > fpeak] = sb\n return s\n \n # Pierson-Moskowitz\n if method=='Yamaguchi':\n alpha = 1/(0.06533*gamma**0.8015 + 0.13467)/16; # Yamaguchi (1984), used in SWAN\n elif method=='Goda':\n alpha = 1/(0.23+0.03*gamma-0.185*(1.9+gamma)**-1)/16; # Goda\n\n pm = alpha*Hm0**2*Tp**-4*f**-5*np.exp(-1.25*(Tp*f)**-4);\n \n # apply JONSWAP shape\n E = pm*gamma**np.exp(-0.5*(Tp*f-1)**2./sigma(f,1/Tp,sa,sb)**2);\n #E(np.isnan(E))=0\n\n if normalize:\n corr = Hm0**2/(16*np.trapz(E,f))\n E = E*corr\n \n return E",
"def inverse_transform(r,t):\n r = r.transpose()\n t = - r*t\n return r,t",
"def _inverse_stress_tensor(self, f, j, p=None, formulation=None):\n\n mu = self._parameters['mu']\n finv = dlf.inv(f)\n c = f.T*f\n i1 = dlf.tr(c)\n i2 = dlf.Constant(0.5)*(i1**2 - dlf.tr(c*c))\n T = self._basic_stress_tensor(dlf.inv(c), mu)\n dim = ufl.domain.find_geometric_dimension(f)\n I = dlf.Identity(dim)\n\n if self._incompressible:\n\n T *= j**(-5.0/dim)\n b_vol = (-1.0/dim)*mu*(-1.0/dim)*i2\n if p is None:\n kappa = self._parameters['kappa']\n b_vol += self._volumetric_strain_energy_diff(1.0/j, kappa,\n formulation)\n else:\n b_vol -= p\n T += b_vol*I\n else:\n la = self._parameters['la']\n T = self._basic_stress_tensor(dlf.inv(c), mu)\n T += self._compressible_strain_energy_diff(1.0/j, la, mu)*I\n\n return T",
"def trans(trjs, J=None):\n T = []\n O = []\n X = []\n P = []\n if J is not None:\n J = np.array(J)\n\n for k in range(len(trjs)):\n if ( J is None or np.any(trjs[k].p.j == J) ) and len(trjs[k].t) >= 3:\n t = trjs[k].t.take([0,-2,-1])\n x = trjs[k].x.take([0,-2,-1],axis=0)\n p = trjs[k].p\n o = trjs[k].hds.O(t,x,p)\n\n T += [t]; X += [x]; O += [o]; P += [p]\n \n return T,X,O,P",
"def inverse_transform(self, matrix):\n #return np.fft.ifft(matrix) #just wanted to see what is to be expected\n sx = matrix.shape[0]\n sy = matrix.shape[1]\n N = max(matrix.shape[0], matrix.shape[1])\n newimage = np.zeros((sx,sy),dtype=np.complex)\n for u in range(sx):\n for v in range(sy):\n t = 0\n\n for i in range(sx):\n for j in range(sy):\n t = t + ((matrix[i, j] * (math.cos(((math.pi * 2) / N) * ((u * i) + (v * j))) - (\n ((1j) * math.sin(((math.pi * 2) / N) * ((u * i) + (v * j))))))))\n\n #t = t + (matrix[i,j]*math.exp((1j.imag)*((2*math.pi)/N)*((u*i) +(v*j))))\n\n #t = t + (matrix[i, j] * (math.cos(((math.pi * 2) / N) * ((u * i) + (v * j))) + (\n #(((1j).imag) * math.sin(((math.pi * 2) / N) * ((u * i) + (v * j)))))))\n\n newimage[u, v] = t #round(t)\n\n if (False):\n for u in range(sx):\n for v in range(sy):\n newimage[u,v] = math.floor(math.log(abs(newimage[u,v])))\n\n return newimage",
"def scheme(u, q, f, i, j, n, i2, i3, j2, j3, x ,y, dtdx2, dtdy2, dt2, dt, b):\n\n u[i,j,n+1] = 2*u[i,j,n] - (1 - 0.5*b*dt)*u[i,j,n-1] + \\\n dtdx2*((q(x[i2],y[j]) + q(x[i],y[j]))*(u[i2,j,n] - u[i,j,n]) - (q(x[i],y[j]) + q(x[i3],y[j]))*(u[i,j,n] -u[i3,j,n])) + \\\n dtdy2*((q(x[i],y[j2]) + q(x[i],y[j]))*(u[i,j2,n] - u[i,j,n]) - (q(x[i],y[j]) + q(x[i],y[j3]))*(u[i,j,n] -u[i,j3,n])) + \\\n dt2*f(x[i],y[j],dt*n)\n \n u[i,j,n+1] /= 1 + 0.5*b*dt",
"def compute_jz(tmap, pmap, mask=False, tr_cutoff=100):\n #Conversion: hmi pixel = 0.5 arcsec = 0.03 deg. 1 arcsec at 1 AU = 725.27 km\n degree_to_km = ((0.5*u.arcsec)/(0.03*u.deg)) * ((725.27*u.km)/(1*u.arcsec))\n #Quantities needed for computing Jz\n dx = (tmap.scale[0])*(1*u.pix) * degree_to_km #pizel width in km\n dy = (tmap.scale[1])*(1*u.pix) * degree_to_km #pixel height in km\n dby = (shift(-tmap.data, [0,-1], cval=np.NaN)-shift(-tmap.data, [0,1], cval=np.NaN)) * u.gauss\n dbx = (shift(pmap.data, [-1,0], cval=np.NaN)-shift(pmap.data, [1,0], cval=np.NaN)) * u.gauss\n npix = 2 #relative number of pixels shifted when getting dby and dbx (1--1=2)\n dbydx = dby/(npix*dx) #gradient of By in x\n dbxdy = dbx/(npix*dy) #gradient of Bx in y\n u_0 = constants.mu_0 * (u.tesla*u.m/u.amp) #constant\n #Compute Jz\n jz = ((dbydx-dbxdy)/u_0).decompose() #compute Jz and simplify units\n unit = jz.unit #take unit of Jz\n jz[np.isnan(jz)] = 0 #change nans in Jz to zeroes\n #mask current where transverse field is weak\n if mask:\n mag_tr = np.sqrt(tmap.data**2 + pmap.data**2) #transverse field magnitude\n mask = mag_tr<=tr_cutoff #find pixels below threshold\n jz[mask] = 0 #set masked pixels to 0\n #format as an array with correct units\n jz = np.asarray(jz) * unit\n #Return the Jz array\n return jz",
"def forward_step_transpose_endo_2d(D, x_i, y_i, x_pi, y_pi):\n nZ, nX, nY = D.shape\n Dnew = np.empty_like(D)\n for iz in range(nZ):\n for ix in range(nX):\n for iy in range(nY):\n ixp = x_i[iz, ix, iy]\n iyp = y_i[iz, ix, iy]\n alpha = x_pi[iz, ix, iy]\n beta = y_pi[iz, ix, iy]\n\n Dnew[iz, ix, iy] = (alpha * beta * D[iz, ixp, iyp] + alpha * (1-beta) * D[iz, ixp, iyp+1] +\n (1-alpha) * beta * D[iz, ixp+1, iyp] +\n (1-alpha) * (1-beta) * D[iz, ixp+1, iyp+1])\n return Dnew",
"def invert(self):\n d = det(self.a, self.b, self.d, self.e)\n return affine(self.e/d, -self.b/d,\n det(self.b, self.c, self.e, self.f)/d,\n -self.d/d, self.a/d,\n -det(self.a, self.c, self.d, self.f)/d)",
"def _inverse_transform(self, i, x):\n inv_interp_func = interp1d(self.interp_func_[i].y, self.interp_func_[i].x, kind=self.interp_kind,\n copy=self.interp_copy, fill_value=self.fill_value)\n return inv_interp_func(erf(x))",
"def F(self,t,z,p):\n return 0.*z",
"def transpose(self):\n return lx.FunctionLinearOperator(self.fn_t, self.input_structure_t)",
"def invertQTransform(tr):\n try:\n det = tr.determinant()\n detr = 1.0 / det # let singular matrices raise ZeroDivisionError\n inv = tr.adjoint()\n inv *= detr\n return inv\n except ZeroDivisionError:\n return _pinv_fallback(tr)",
"def invert_s(F: xr.DataArray, value: Surface):\n\n val = value\n # Work on numpy arrays\n F0 = F.values\n # z_rho = F.z_rho.values\n # s_rho = F.s_rho.values\n val = np.asarray(val, dtype=\"float\")\n # Fshape = F.shape # Save original shape\n # if val.shape and val.shape != Fshape[1:]:\n # raise ValueError(\"z must be scalar or have shape = F.shape[1:]\")\n\n # Flatten all non-vertical dimensions\n N = F.shape[0] # Length of vertical dimension\n M = F0.size // N # Combined length of horizontal dimensions\n F0 = F0.reshape((N, M))\n if val.shape: # Value may be space dependent\n val = val.reshape((M,))\n\n # Look for highest s-value where G is negative\n G = (F0[1:, :] - val) * (F0[:-1, :] - val)\n G = G[::-1, :] # Reverse\n K = N - 1 - (G <= 0).argmax(axis=0)\n\n # Define D such that F[D][i] = F[K[i], i]\n I = np.arange(M)\n D = (K, I)\n Dm = (K - 1, I)\n\n # Compute interpolation weights\n a = (val - F0[Dm]) / (F0[D] - F0[Dm] + 1e-30)\n # Only use 0 <= a <= 1\n a[np.abs(a - 0.5) > 0.5] = np.nan #\n\n return D, Dm, a",
"def reflect_in_y(self):\n new_data = self.data.copy()\n\tnew_data = numpy.fliplr(new_data)\n return IFSMatrix(self.width, new_data)",
"def _get_flirt_xform_between_axes(from_nii, target_nii):\n\n to2tovox = np.linalg.inv(_get_sform(target_nii)[\"trans\"])\n fromvox2from = _get_sform(from_nii)[\"trans\"]\n\n from2to = to2tovox @ fromvox2from\n\n return from2to",
"def trans2d(*args):\n return _seb.trans2d(*args)",
"def transverse_field_matrix(p, state_table):\n if len(p['hx']) != p['N']:\n warnings.warn('hx array not commensurate with system size!')\n\n dim = len(state_table)\n row = []\n col = []\n data = []\n\n for In in range(dim):\n state = int_to_state(p, state_table[In])\n\n # iterate through the chain and flip each spin with application of X\n for i in range(len(state)):\n outstate = copy.deepcopy(state)\n\n # flip local spin (X)\n outstate[i] = 0 if outstate[i] else 1\n\n # get new state number\n Out = state_to_int(p, outstate)\n\n # get matrix element\n matrixelement = -1.0 * p['hx'][i]\n\n # store matrix element\n row.append(Out)\n col.append(In)\n data.append(matrixelement)\n\n del matrixelement\n\n transverse_field = sparse.csr_matrix((data, (row, col)),\n shape=(dim, dim), dtype=complex)\n return transverse_field",
"def polyFlipEdge():\n pass",
"def U(N,P,mode,M):\t\n\tM_valeurs = np.copy(M)\n\tM_valeurs = np.flipud(M_valeurs)\n\tF_m = F( np.array(M_valeurs) )\n\tB = B_mat(N,mode)\n\tretour = []\n\tretour.append(U_T(N))\n\tfor i in range(P-1):\n\t\tsuivant = U_suivant(N,P,B,retour[-1],F_m[i+1])\n\t\tretour.append(suivant)\n\tretour = np.flipud(retour)\n\treturn retour"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a new UTPM instance with the same data. `clone` is opposed to `copy` or `deepcopy` by calling the __init__ function. | def clone(self):
return UTPM(self.data.copy()) | [
"def create_copy(self):\n print('WARNING: Implementation and testing still in progress!!!!')\n\n new_obj = self.__class__()\n new_obj.data = copy.deepcopy(self.data)\n new_obj.topography = copy.deepcopy(self.topography)\n new_obj.electrode_positions = copy.deepcopy(\n self.electrode_positions)\n\n # what about the log?\n print('WARNING: Journal and log is not copied!')\n\n return new_obj",
"def clone(self, data):",
"def copy(self):\n new_tirp = TIRP()\n new_tirp._symbols = copy(self._symbols)\n new_tirp._label=self._label\n new_tirp._tirp_matrix = self._tirp_matrix.copy()\n for entity_id in self._supporting_sequences_by_entity.keys():\n new_tirp._supporting_sequences_by_entity[entity_id] = deepcopy(self._supporting_sequences_by_entity[entity_id])\n for entity_id in self._Artemis_by_entity.keys():\n new_tirp._Artemis_by_entity[entity_id] = deepcopy(self._Artemis_by_entity[entity_id])\n return new_tirp",
"def copy(self) -> \"adsk::core::Ptr< adsk::core::Torus >\" :\n return _core.Torus_copy(self)",
"def clone(self):\n return self.__clone(True)",
"def clone(self, deep=False):\n inst = self.__class__()\n inst.number, inst.singular, inst.plural = self.number, self.singular, self.plural\n inst.template_map = self.template_map.copy() if deep else self.template_map\n return inst",
"def Clone(self) -> \"itkImageUS2_Pointer\":\n return _itkImagePython.itkImageUS2_Clone(self)",
"def hollow_copy(self):\n new_tirp = TIRP()\n new_tirp._symbols = copy(self._symbols)\n new_tirp._label=self._label\n new_tirp._name = self._name\n new_tirp._tirp_matrix = self._tirp_matrix.copy()\n\n return new_tirp",
"def copy(self):\n from copy import deepcopy\n\n outdata = UVData()\n\n if self.array is not None:\n outdata.array = self.array.copy()\n\n if self.source is not None:\n outdata.source = self.source.copy()\n\n if self.freq is not None:\n outdata.freq = self.freq.copy()\n\n if self.data is not None:\n outdata.vis = self.data.copy()\n\n if self.antable is not None:\n outdata.antable = self.antable.copy()\n\n if self.gaintable is not None:\n outdata.gaintable = self.gaintable.copy()\n\n outdata.flags = deepcopy(self.flags)\n outdata.stokestype = deepcopy(self.stokestype)\n\n return outdata",
"def Clone(self) -> \"itkImageUC2_Pointer\":\n return _itkImagePython.itkImageUC2_Clone(self)",
"def copy(self):\n if self.data is not None:\n _data = self.data.copy()\n else:\n _data = None\n return self.__class__(data=_data, header=self.header.copy())",
"def copy(self):\n\t\treturn Account(self._init, self._option_posi, self.option, self._one_side_cost)",
"def copy(self):\n return Polynomial(self)",
"def clone(self) -> \"ScXMLElt *\":\n return _coin.ScXMLElt_clone(self)",
"def copy(self):\n cp = Entity()\n cp.genotype = self.genotype.copy()\n cp.fitness = self.fitness\n\n return cp",
"def Clone(self) -> \"itkImageUS3_Pointer\":\n return _itkImagePython.itkImageUS3_Clone(self)",
"def clone(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return ExponentialDistBase.clone(self)",
"def Clone(self) -> \"itkImageUL2_Pointer\":\n return _itkImagePython.itkImageUL2_Clone(self)",
"def clone(self):\n poly = self.polydata(False)\n polyCopy = vtk.vtkPolyData()\n polyCopy.DeepCopy(poly)\n\n cloned = Mesh(polyCopy)\n pr = vtk.vtkProperty()\n pr.DeepCopy(self.GetProperty())\n cloned.SetProperty(pr)\n\n # assign the same transformation to the copy\n cloned.SetOrigin(self.GetOrigin())\n cloned.SetScale(self.GetScale())\n cloned.SetOrientation(self.GetOrientation())\n cloned.SetPosition(self.GetPosition())\n\n cloned._mapper.SetScalarVisibility(self._mapper.GetScalarVisibility())\n cloned._mapper.SetScalarRange(self._mapper.GetScalarRange())\n cloned._mapper.SetColorMode(self._mapper.GetColorMode())\n lsr = self._mapper.GetUseLookupTableScalarRange()\n cloned._mapper.SetUseLookupTableScalarRange(lsr)\n cloned._mapper.SetScalarMode(self._mapper.GetScalarMode())\n lut = self._mapper.GetLookupTable()\n if lut:\n cloned._mapper.SetLookupTable(lut)\n\n cloned.base = self.base\n cloned.top = self.top\n cloned.name = self.name\n if self.trail:\n n = len(self.trailPoints)\n cloned.addTrail(self.trailOffset, self.trailSegmentSize*n, n,\n None, None, self.trail.GetProperty().GetLineWidth())\n if self.shadow:\n cloned.addShadow(self.shadowX, self.shadowY, self.shadowZ,\n self.shadow.GetProperty().GetColor(),\n self.shadow.GetProperty().GetOpacity())\n return cloned",
"def clone(self):\n # First performing a deep copy of the vector\n vec_clone = deepcopy(self)\n if vec_clone.vecfile is None:\n # Creating header and binary files from vector space\n # Placing temporary file into datapath folder\n tmp_vec = sep.datapath + \"clone_tmp_vector\" + str(int(time() * 1000000)) + \".H\"\n axis_file = \"\"\n for iaxis, naxis in enumerate(tuple(reversed(vec_clone.shape))):\n axis_file += \"n%s=%s \" % (iaxis + 1, naxis)\n # Creating temporary vector file\n cmd = \"Spike %s | Add scale=0.0 > %s\" % (axis_file, tmp_vec)\n RunShellCmd(cmd, get_stat=False, get_output=False)\n vec_clone.vecfile = tmp_vec\n vec_clone.binfile = sep.get_binary(vec_clone.vecfile)\n else:\n # Creating a temporary file with similar name but computer time at the end\n tmp_vec = self.vecfile.split(\".H\")[0].split(\"/\")[-1] # Getting filename only\n # Placing temporary file into datapath folder\n tmp_vec = sep.datapath + tmp_vec + \"_clone_\" + str(int(time() * 1000000)) + \".H\"\n tmp_bin = tmp_vec + \"@\"\n # Copying header and binary files and setting pointers to new file\n copyfile(self.vecfile, tmp_vec) # Copying header\n copyfile(self.binfile, tmp_bin) # Copying binary\n vec_clone.vecfile = tmp_vec\n vec_clone.binfile = tmp_bin\n # \"Fixing\" header file\n with open(vec_clone.vecfile, \"a\") as fid:\n fid.write(\"in='%s\\n'\" % tmp_bin)\n # By default the clone file is going to be removed once the vector is deleted\n vec_clone.remove_file = True\n return vec_clone"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
initializes this UTPM instance to compute the Jacobian, it is possible to force the dtype to a certain dtype, if no dtype is provided, the dtype is inferred from x | def init_jacobian(cls, x, dtype=None):
x = numpy.asarray(x)
if dtype==None:
# try to infer the dtype from x
dtype= x.dtype
if dtype==int:
dtype=float
shp = numpy.shape(x)
data = numpy.zeros(numpy.hstack( [2, numpy.size(x), shp]), dtype=dtype)
data[0] = x
data[1,:].flat = numpy.eye(numpy.size(x))
return cls(data) | [
"def jacobian(\n self, t: float, state: np.ndarray, u: np.ndarray) -> np.ndarray:\n pass",
"def init_jac_vec(cls, x, v, dtype=None):\n\n x = numpy.asarray(x)\n\n if dtype==None:\n # try to infer the dtype from x\n dtype= x.dtype\n\n if dtype==int:\n dtype=float\n\n\n shp = numpy.shape(x)\n data = numpy.zeros(numpy.hstack( [2, 1, shp]), dtype=dtype)\n data[0,0] = x\n data[1,0] = v\n return cls(data)",
"def jacobian(self, x):\n return self.jnz",
"def __init__(self, xt = None, yt = None, **kwds):\n super().__init__(**kwds)\n self.atrans = a_trans.initialize(numpy.ascontiguousarray(xt, dtype = numpy.float64),\n numpy.ascontiguousarray(yt, dtype = numpy.float64))",
"def prepareJacobian(self):\n self.jac.clear()\n self.nm = self.regionManager().parameterCount()\n self.nf = len(self.fops)\n print(self.nm, \"model cells\")\n nd = 0\n for i, fop in enumerate(self.fops):\n self.jac.addMatrix(fop.jacobian(), nd, i*self.nm)\n nd += fop.data.size()\n\n self.jac.recalcMatrixSize()\n self.setJacobian(self.jac)",
"def __init__(self, x_0, t_0):\n # set the model parameters for the observer\n self.A = matrix([[-0.01546814,0.00639784],\n [0.03924884,-0.03924884]])\n self.B = matrix([[5.71428571429e-3],[0]])\n self.C = matrix([[0,1]])\n self.L = matrix([[1],[0.2]])\n\n self.x = x_0\n self.t_prev = t_0",
"def Jacobian(self,t,y):\n return -self.lambd",
"def J_dense(x): # dense Jacobian\n return np.array([[1.004, -1e3*x[2], -1e3*x[1]],\n [-0.004, 1.0 + 1e3*x[2] + 60.0*x[1], 1e3*x[1]],\n [0.0, -60.0*x[1], 1.0]])",
"def __init__(self, dim=2, num_basis_functions=25,\n alpha_x=1., dt=0.01, max_time=1., tau=1.,\n alpha_y=25., beta_y=6.25, weight_scale=1e3,\n dtype=torch.float32):\n super().__init__()\n\n self.dim = dim\n\n \"\"\" Time values \"\"\"\n self.x = torch.tensor(1., dtype=dtype) # Internal time counter\n self.tau = torch.tensor(tau, dtype=dtype)\n self.alpha_x = alpha_x\n self.dt = dt\n self.max_time = max_time\n\n \"\"\" Basis functions PSI \"\"\"\n self.c = torch.exp(-self.alpha_x * torch.linspace(0., max_time, num_basis_functions, dtype=dtype))\n self.h = torch.ones(num_basis_functions, dtype=dtype) * num_basis_functions / self.c\n\n \"\"\" Goal and weights parameters \"\"\"\n self.g = torch.nn.Parameter(torch.ones(dim, dtype=dtype))\n self.weights = torch.nn.Parameter(torch.ones((dim, num_basis_functions), dtype=dtype) / num_basis_functions)\n self.weight_scale = torch.tensor(weight_scale, dtype=dtype)\n\n \"\"\" PD controller setup for the trajectory following. \"\"\"\n self.alpha_y = torch.tensor(alpha_y, dtype=dtype)\n self.beta_y = torch.tensor(beta_y, dtype=dtype)",
"def BuildJacobianMatrix(self): \r\n hf=self.hf\r\n ha=self.ha\r\n \r\n TMP_NumProb=copy.deepcopy(self)\r\n \r\n \r\n self.Ytmp[:]=self.Ynp1[:]\r\n for i in range(self.NbVariables):\r\n # Construction du dY\r\n dh=(2.0*hf)*self.Ytmp[i]+2.0*ha\r\n \r\n self.Ytmp[i]=((1.0-hf)*self.Ytmp[i]-ha)\r\n self.BuildFluxFunction(self.Ytmp) \r\n self.Flux_m1[:]=self.Flux_TMP[:]\r\n \r\n self.Ytmp[i]=self.Ytmp[i]+dh\r\n self.BuildFluxFunction(self.Ytmp)\r\n self.Flux_p1[:]=self.Flux_TMP[:]\r\n inv_dY=1.0/dh\r\n self.JacobianMatrix[:,i]=(self.Flux_p1[:]-self.Flux_m1[:])*inv_dY\r\n self.Ytmp[i]=self.Ynp1[i]",
"def init_input(self):\n m1 = np.diagflat([-1] * (self.n - 1), -1)\n m2 = np.diagflat([-1] * (self.n - 1), 1)\n m3 = np.diagflat([self.gamma] * self.n)\n self.A = np.matrix((m1 + m2 + m3).astype(np.double))\n\n self.b = np.matrix(\n np.full((self.n, 1), self.gamma - 2).astype(np.double)\n )\n self.b[0] = self.gamma - 1\n self.b[self.n - 1] = self.gamma - 1\n\n self.x0 = np.matrix(\n np.full((self.n, 1), 0).astype(np.double)\n )",
"def jacobianstructure(self):\n return self.jstruct",
"def __transpose(self):\n if len(self.jacobian) == 0:\n print('First compute the jacobian')\n else:\n dtheta = []\n maxAngleChange = 30*np.pi/180\n \n e = self.target-self.end_effector.getPosition(frame=self.currentframe) #Distance\n# target = self.target\n# eepos = self.end_effector.getPosition(frame=self.currentframe)\n# jacobian = np.asarray(self.jacobian)\n dtheta2 = np.dot(self.jacobian.T, e)\n j_jt_e = np.dot(np.dot(self.jacobian, self.jacobian.T), e)\n alpha = np.dot(e,j_jt_e)/np.dot(j_jt_e,j_jt_e)\n beta = maxAngleChange/np.linalg.norm(dtheta2)\n dtheta2 = dtheta2*np.min([alpha,beta])*180/np.pi\n for i in range(0,self.jacobian.shape[1]):\n #i = current rotation axis being used\n j_jt_e = np.dot(np.dot(self.jacobian[:,i], self.jacobian[:,i].T), e)\n alpha = np.dot(e,j_jt_e)/np.dot(j_jt_e,j_jt_e)\n beta = maxAngleChange\n dtheta.append(alpha*np.dot(self.jacobian[:,i].T, e)*180/np.pi)\n# self.dtheta = dtheta[:]\n self.dtheta=dtheta2",
"def __init__(self, n_var: int, order: int, var_names: List[str]) -> None:\n TaylorExpansAbstract.__init__(self, n_var, order, np.float64, var_names)",
"def __init__(self, X, Y, X_prime):\n X_concat = np.vstack([X, X_prime])\n X_concat = DataHolder(X_concat, on_shape_change='recompile')\n Y = DataHolder(Y, on_shape_change='recompile')\n UnimodalGP.__init__(self)\n self.X_concat = DataHolder(X_concat)\n self.Y = DataHolder(Y)\n self.X = DataHolder(X)\n self.X_prime = DataHolder(X_prime)\n self.num_data = X_concat.shape[0]\n self.num_x_points = X.shape[0]\n self.num_der_points = X_prime.shape[0]\n self.num_latent = Y.shape[1]\n \n self.Vf = Param(np.zeros((self.num_data, self.num_latent)))\n self.Vf.prior = Gaussian(0., 1.)\n \n self.Vg = Param(np.zeros((2*self.num_der_points, self.num_latent)))\n self.Vg.prior = Gaussian(0., 1.)",
"def jacobian(Q, d):\n return zeros([n, n])",
"def test_dtype_jax(self, dtype1, dtype2):\n import jax\n from jax.config import config\n\n config.update(\"jax_enable_x64\", True)\n\n dtype = dtype1\n dtype1 = getattr(jax.numpy, dtype1)\n dtype2 = getattr(jax.numpy, dtype2)\n\n tangent = jax.numpy.array([1], dtype=dtype1)\n jac = tuple([jax.numpy.array(1, dtype=dtype2), jax.numpy.array([1, 1], dtype=dtype2)])\n assert qml.gradients.compute_jvp_multi(tangent, jac)[0].dtype == dtype",
"def jacobian(function, x):\n x = np.asarray(x)\n assert x.ndim == 1, \"x must be a vector\"\n x_ad = np.empty(x.shape, dtype=np.object)\n for i in range(x.size):\n der = np.zeros(x.size)\n der[i] = 1\n x_ad.flat[i] = AutoDiffXd(x.flat[i], der)\n y_ad = np.asarray(function(x_ad))\n return np.vstack(\n [y.derivatives() for y in y_ad.flat]).reshape(y_ad.shape + (-1,))",
"def __init__(\n self,\n hilbert: Fock,\n graph: AbstractGraph,\n U: float,\n V: float = 0.0,\n J: float = 1.0,\n mu: float = 0.0,\n dtype: Optional[DType] = None,\n ):\n assert (\n graph.n_nodes == hilbert.size\n ), \"The size of the graph must match the hilbert space.\"\n\n assert isinstance(hilbert, Fock)\n super().__init__(hilbert)\n\n if dtype is None:\n dtype = jnp.promote_types(_dtype(U), _dtype(V))\n dtype = jnp.promote_types(dtype, _dtype(J))\n dtype = jnp.promote_types(dtype, _dtype(mu))\n dtype = jnp.promote_types(float, dtype)\n dtype = np.empty((), dtype=dtype).dtype\n self._dtype = dtype\n\n self._U = np.asarray(U, dtype=dtype)\n self._V = np.asarray(V, dtype=dtype)\n self._J = np.asarray(J, dtype=dtype)\n self._mu = np.asarray(mu, dtype=dtype)\n\n self._n_max = hilbert.n_max\n self._n_sites = hilbert.size\n self._edges = np.asarray(list(graph.edges()))\n self._max_conn = 1 + self._edges.shape[0] * 2\n self._max_mels = np.empty(self._max_conn, dtype=self.dtype)\n self._max_xprime = np.empty((self._max_conn, self._n_sites))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
initializes this UTPM instance to compute the Jacobian vector product J v, it is possible to force the dtype to a certain dtype, if no dtype is provided, the dtype is inferred from x | def init_jac_vec(cls, x, v, dtype=None):
x = numpy.asarray(x)
if dtype==None:
# try to infer the dtype from x
dtype= x.dtype
if dtype==int:
dtype=float
shp = numpy.shape(x)
data = numpy.zeros(numpy.hstack( [2, 1, shp]), dtype=dtype)
data[0,0] = x
data[1,0] = v
return cls(data) | [
"def init_jacobian(cls, x, dtype=None):\n\n x = numpy.asarray(x)\n\n if dtype==None:\n # try to infer the dtype from x\n dtype= x.dtype\n\n if dtype==int:\n dtype=float\n\n\n shp = numpy.shape(x)\n data = numpy.zeros(numpy.hstack( [2, numpy.size(x), shp]), dtype=dtype)\n data[0] = x\n data[1,:].flat = numpy.eye(numpy.size(x))\n\n return cls(data)",
"def jacobian(\n self, t: float, state: np.ndarray, u: np.ndarray) -> np.ndarray:\n pass",
"def get_j_vect(self):\n\t\tresult = self.vect_j.mult_scalar(self.scale)\n\t\treturn result",
"def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorUC_swiginit(self,_vnl_vectorPython.new_vnl_vectorUC(*args))",
"def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorUS_swiginit(self,_vnl_vectorPython.new_vnl_vectorUS(*args))",
"def jacobian(self, x):\n return self.jnz",
"def __init__(self, xt = None, yt = None, **kwds):\n super().__init__(**kwds)\n self.atrans = a_trans.initialize(numpy.ascontiguousarray(xt, dtype = numpy.float64),\n numpy.ascontiguousarray(yt, dtype = numpy.float64))",
"def __init__(self, *args):\n _ida_pro.uval_array_swiginit(self, _ida_pro.new_uval_array(*args))",
"def __init__(self, x_0, t_0):\n # set the model parameters for the observer\n self.A = matrix([[-0.01546814,0.00639784],\n [0.03924884,-0.03924884]])\n self.B = matrix([[5.71428571429e-3],[0]])\n self.C = matrix([[0,1]])\n self.L = matrix([[1],[0.2]])\n\n self.x = x_0\n self.t_prev = t_0",
"def __init__(\n self,\n hilbert: Fock,\n graph: AbstractGraph,\n U: float,\n V: float = 0.0,\n J: float = 1.0,\n mu: float = 0.0,\n dtype: Optional[DType] = None,\n ):\n assert (\n graph.n_nodes == hilbert.size\n ), \"The size of the graph must match the hilbert space.\"\n\n assert isinstance(hilbert, Fock)\n super().__init__(hilbert)\n\n if dtype is None:\n dtype = jnp.promote_types(_dtype(U), _dtype(V))\n dtype = jnp.promote_types(dtype, _dtype(J))\n dtype = jnp.promote_types(dtype, _dtype(mu))\n dtype = jnp.promote_types(float, dtype)\n dtype = np.empty((), dtype=dtype).dtype\n self._dtype = dtype\n\n self._U = np.asarray(U, dtype=dtype)\n self._V = np.asarray(V, dtype=dtype)\n self._J = np.asarray(J, dtype=dtype)\n self._mu = np.asarray(mu, dtype=dtype)\n\n self._n_max = hilbert.n_max\n self._n_sites = hilbert.size\n self._edges = np.asarray(list(graph.edges()))\n self._max_conn = 1 + self._edges.shape[0] * 2\n self._max_mels = np.empty(self._max_conn, dtype=self.dtype)\n self._max_xprime = np.empty((self._max_conn, self._n_sites))",
"def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorD_swiginit(self,_vnl_vectorPython.new_vnl_vectorD(*args))",
"def prepareJacobian(self):\n self.jac.clear()\n self.nm = self.regionManager().parameterCount()\n self.nf = len(self.fops)\n print(self.nm, \"model cells\")\n nd = 0\n for i, fop in enumerate(self.fops):\n self.jac.addMatrix(fop.jacobian(), nd, i*self.nm)\n nd += fop.data.size()\n\n self.jac.recalcMatrixSize()\n self.setJacobian(self.jac)",
"def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorF_swiginit(self,_vnl_vectorPython.new_vnl_vectorF(*args))",
"def J_dense(x): # dense Jacobian\n return np.array([[1.004, -1e3*x[2], -1e3*x[1]],\n [-0.004, 1.0 + 1e3*x[2] + 60.0*x[1], 1e3*x[1]],\n [0.0, -60.0*x[1], 1.0]])",
"def __init__(self, *args):\n _ida_pro.longlongvec_t_swiginit(self, _ida_pro.new_longlongvec_t(*args))",
"def _vjp_func_jacobian(\n self, values: VariableData\n ) -> Tuple[FactorValue, \"VectorJacobianProduct\"]:\n from autofit.graphical.factor_graphs.jacobians import (\n VectorJacobianProduct,\n )\n raw_fval, fvjp = self._factor_vjp(*self.resolve_args(values))\n fval = self._factor_value(raw_fval)\n\n fvjp_op = VectorJacobianProduct(\n self.factor_out,\n fvjp,\n *self.args,\n out_shapes=fval.to_dict().shapes,\n )\n return fval, fvjp_op",
"def __init__(self, X, Y, X_prime):\n X_concat = np.vstack([X, X_prime])\n X_concat = DataHolder(X_concat, on_shape_change='recompile')\n Y = DataHolder(Y, on_shape_change='recompile')\n UnimodalGP.__init__(self)\n self.X_concat = DataHolder(X_concat)\n self.Y = DataHolder(Y)\n self.X = DataHolder(X)\n self.X_prime = DataHolder(X_prime)\n self.num_data = X_concat.shape[0]\n self.num_x_points = X.shape[0]\n self.num_der_points = X_prime.shape[0]\n self.num_latent = Y.shape[1]\n \n self.Vf = Param(np.zeros((self.num_data, self.num_latent)))\n self.Vf.prior = Gaussian(0., 1.)\n \n self.Vg = Param(np.zeros((2*self.num_der_points, self.num_latent)))\n self.Vg.prior = Gaussian(0., 1.)",
"def __init__(self, *args):\n _ida_pro.uintvec_t_swiginit(self, _ida_pro.new_uintvec_t(*args))",
"def __init__(self, x=0., y=0.):\n if hasattr(x, \"__getitem__\"):\n x, y = x\n self._v = [float(x), float(y)]\n else:\n self._v = [float(x), float(y)]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
initializes this UTPM instance to compute the Hessian | def init_hessian(cls, x):
x = numpy.ravel(x)
# generate directions
N = x.size
M = (N*(N+1))/2
L = (N*(N-1))/2
S = numpy.zeros((N,M), dtype=x.dtype)
s = 0
i = 0
for n in range(1,N+1):
S[-n:,s:s+n] = numpy.eye(n)
S[-n,s:s+n] = numpy.ones(n)
s+=n
i+=1
S = S[::-1].T
data = numpy.zeros(numpy.hstack([3,S.shape]), dtype=x.dtype)
data[0] = x
data[1] = S
return cls(data) | [
"def build_hessian(self,x,y):\n #Precalculate entries of hessian\n x_sum = np.sum(x)\n x_squared_sum = np.sum(x*x)\n y_sum = np.sum(y)\n y_squared_sum = np.sum(y*y)\n xy_sum = np.sum(x*y)\n n = len(x)\n\n hessian = np.array([\n [n,0,x_sum,y_sum,0,0],\n [0,n,0,0,x_sum,y_sum],\n [x_sum,0,x_squared_sum,xy_sum,0,0],\n [y_sum,0,xy_sum,y_squared_sum,0,0],\n [0,x_sum,0,0,x_squared_sum,xy_sum],\n [0,y_sum,0,0,xy_sum,y_squared_sum]\n ])\n return hessian",
"def _init_h(self):\n self.H = np.random.random((self._num_bases, self._num_samples)) + 0.2",
"def hess(self, x, t, hdata):\n\n # Check arguments for consistency\n errstring = self.consist('mlp', x, t)\n if errstring != None:\n raise Exception(errstring)\n\n if computeData:\n # Data term in Hessian needs to be computed\n hdata = self.datahess(x, t)\n\n h, hdata = self.hbayes(hdata)\n\n # Sub-function to compute data part of Hessian",
"def SetTemperatureField(self, T, H):\n self.privateTemperature = T\n self.privateField = H\n J = 1.\t\t# Convention: also float to avoid int problems\n # Set up heat bath algorithm lookup table\n self.heatBathProbUp = scipy.zeros(5, float)\n for nUp in range(0,5):\t# Four neighbors on square lattice\n sumNbrs = 2*(nUp-2)\t# Sum of spins of neighbors\n eUp = -J * sumNbrs - H\n eDown = J * sumNbrs + H\n if T != 0:\n boltzUp = scipy.exp(-eUp/T) \n boltzDown = scipy.exp(-eDown/T) \n self.heatBathProbUp[nUp] = boltzUp/(boltzUp+boltzDown)\n else:\n if eUp>0:\n self.heatBathProbUp[nUp]=0.\n elif eUp<0:\n self.heatBathProbUp[nUp]=1.\n else:\n self.heatBathProbUp[nUp]=0.5\n # Set up Metropolis algorithm lookup table\n self.MetropolisProbUp = scipy.zeros((2,5), float)\n for nUp in range(0,5):\t# Four neighbors on square lattice\n sumNbrs = 2*(nUp-2)\t# Sum of spins of neighbors\n eUp = -J * sumNbrs - H\n eDown = J * sumNbrs + H\n\t if T != 0:\n\t if eDown > eUp: # Down spin unstable\n\t\t # If current spin is down, flip up\n\t\t self.MetropolisProbUp[0,nUp]=1.\n\t\t # If current spin is up, flip down with prob e^(-|dE|/T)\n\t\t self.MetropolisProbUp[1,nUp]= 1.-scipy.exp(-(eDown-eUp)/T)\n\t\telse: # Up spin unstable\n\t\t # If current spin is down, flip up with prob e^(-|dE|/T)\n\t\t self.MetropolisProbUp[0,nUp]= scipy.exp(-(eUp-eDown)/T)\n\t\t # If current spin is up, flip down\n\t\t self.MetropolisProbUp[1,nUp]= 0.\n\t else:\n\t if eDown > eUp: # Down spin unstable\n\t\t # If current spin is down, flip up\n\t\t self.MetropolisProbUp[0,nUp]=1.\n\t\t # If current spin is up, leave alone\n\t\t self.MetropolisProbUp[1,nUp]= 0.\n\t\telif eDown < eUp: # Up spin unstable\n\t\t # If current spin is down, leave alone\n\t\t self.MetropolisProbUp[0,nUp]= 0.\n\t\t # If current spin is up, flip down\n\t\t self.MetropolisProbUp[1,nUp]= 1.\n # Set up Wolff algorithm\n\tif T==0:\n\t self.p = 0.\n else:\n\t self.p = 1.0 - scipy.exp(-2.*J/T)",
"def __init__(self, node_property, w_uij, delta_tb=1,\n alpha=10, lam=(1.3, -0.5), bl=0, delta_tc=2,\n N=100, Q=None, R=None,\n sp_input=None):\n self.fc = FishCalcium(node_property=node_property, w_uij=w_uij, delta_tb=delta_tb,\n alpha=alpha, lam=lam, bl=bl, delta_tc=delta_tc,\n sp_input=sp_input)\n\n self.t_ratio = int(delta_tc / delta_tb)\n\n self.x = self.obtain_vars(self.fc)\n self.dim_x = self.x.shape[0]\n self.dim_z = self.fc.calciumAR.flu.shape[0]\n\n self.N = N\n\n if Q is None:\n self.Q = torch.eye(self.dim_x) # process uncertainty\n else:\n self.Q = Q\n\n if R is None:\n self.R = torch.eye(self.dim_z) # measurement uncertainty\n else:\n self.R = R\n\n self.sp_input = sp_input\n\n self.K = torch.zeros((self.dim_x, self.dim_z)) # kalman gain\n self.S = torch.zeros((self.dim_z, self.dim_z)) # system uncertainty\n self.SI = torch.zeros((self.dim_z, self.dim_z)) # inverse of system uncertainty\n\n self.__mean = torch.zeros(self.dim_x) # as 1D tensor for sampling\n self.__meanz = torch.zeros(self.dim_z) # as 1D tensor for sampling",
"def __init__(self,ham,wfn,ci_basis_set): \n self.assign_hamiltonian(ham)\n self.assign_wavefunction(wfn)\n self.assign_ci_basis_set(ci_basis_set)\n self.assign_integral(ham,wfn)",
"def initialize_PSD(self):\n self.W_noise_NnFK = self.xp.random.rand(self.n_noise, self.n_freq, self.n_basis_noise).astype(self.TYPE_FLOAT)\n self.H_noise_NnKT = self.xp.random.rand(self.n_noise, self.n_basis_noise, self.n_time).astype(self.TYPE_FLOAT)\n\n self.U_F = self.xp.ones(self.n_freq, dtype=self.TYPE_FLOAT) / self.n_freq\n self.V_T = self.xp.ones(self.n_time, dtype=self.TYPE_FLOAT)\n\n power_observation_FT = (self.xp.abs(self.X_FTM) ** 2).mean(axis=2)\n if self.normalize_encoder_input:\n power_observation_FT = power_observation_FT / power_observation_FT.sum(axis=0).mean()\n\n self.Z_speech_DT = self.speech_VAE.encode_cupy(power_observation_FT.astype(self.xp.float32))\n self.z_link_speech = Z_link(self.Z_speech_DT.T)\n self.z_optimizer_speech = chainer.optimizers.Adam().setup(self.z_link_speech)\n self.power_speech_FT = self.speech_VAE.decode_cupy(self.Z_speech_DT)\n\n self.lambda_NFT = self.xp.zeros([self.n_source, self.n_freq, self.n_time], dtype=self.TYPE_FLOAT)\n self.lambda_NFT[0] = self.U_F[:, None] * self.V_T[None] * self.power_speech_FT\n self.lambda_NFT[1:] = self.W_noise_NnFK @ self.H_noise_NnKT",
"def _compute_H_MAP(self, verbose=False):\n \n # compute _theta_MAP if necessary\n if self._theta_MAP is None: self._compute_theta_MAP(verbose)\n \n tic = time.clock()\n self._H_MAP = hessian_MAP_KJMA(theta = self._theta_MAP, param = self._param)\n if verbose: print \"H_MAP computed in {}s\".format(time.clock()-tic)",
"def __init__(self, wh_p=4.5, ua=0.0019678, eta=1, t2=49, d=0.55, h=1.0):\n self.WH_P = wh_p #[kJ/sec] rated power of water heater\n self.UA = ua #[kJ/(sec C)] = 7.084 kJ/hr-C\n self.eta_c = eta #[none] recovery efficiency\n self.T2 = t2 #[C] initial temperature of top node in Celsius\n self.diameter = d #[m] tank diameter in meters\n self.height = h #[m] tank height in meters\n self.T1 = self.T2 - 1 #[C] bottom node temperature\n self.Cp = 4.1818 #[kJ/(kg C)] heat capacity of water\n self.D = 1000 #[kg/m^3] density of water\n self.volume = self.height * np.pi * (self.diameter / 2)**2 #[m^3]\n self.S_top = 0.25 * np.pi * self.diameter**2 #[m^2] top area\n self.S_side = np.pi * self.diameter * self.height #[m^2] side area\n self.S_total = self.S_top * 2 + self.S_side #[m^2] total area\n self.UA1 = self.UA*(self.S_top+(2./3.)*self.S_side)/self.S_total #bottom UA\n self.UA2 = self.UA*(self.S_top+(1./3.)*self.S_side)/self.S_total #top UA\n self.C1 = self.volume * (2./3.) * self.D * self.Cp #bottom\n self.C2 = self.volume * (1./3.) * self.D * self.Cp #top\n self.phi, self.gamma = None, None",
"def __init__(self, **kwargs):\n super().__init__()\n self.activation=torch.nn.Tanh()\n self.layers=torch.nn.ModuleList()\n\n self.set_hyperparams(**kwargs)\n num_units=self.hypers['num_units']\n num_layers=self.hypers['num_layers']\n in_dim=self.hypers['in_dim']\n out_dim=self.hypers['out_dim']\n self.set_reynolds_stress_fn()\n\n activation=self.hypers['activation']\n if activation == 'swish':\n self.activation=utils.Swish()\n\n # build architecture\n self.layers.append(torch.nn.Linear(in_dim, num_units)) # input layer\n for i in range(num_layers):\n self.layers.append(torch.nn.Linear(num_units, num_units)) # hidden layer\n self.layers.append(torch.nn.Linear(num_units, out_dim)) # output layer",
"def __init__(self, N=10, T=2./scipy.log(1.+scipy.sqrt(2.)), \n H=0., seed=1):\n if seed==None:\n\t scipy.random.seed()\n\telse:\n scipy.random.seed(seed)\n self.lattice = scipy.random.random_integers(0,1,(N,N))\n self.SetTemperatureField(T, H)\n self.N = N",
"def __init__(self):\n \n self.enh_lib = enhancement\n self.enh = None \n\n self.height = 1.e-2\n # height (m) of coolant duct\n self.mdot = 1.0\n # mass flow rate (kg/s) of coolant\n self.ducts = 2 # number of coolant ducts per hot duct\n self.geometry = 'parallel plates'\n self.c_p = 4.179\n # Specific heat (kJ/kg*K) of water at 325K \n self.mu = 5.3e-4\n # viscosity of water at 325K (Pa*s), WolframAlpha\n self.k = 0.646e-3\n # thermal conductivity of water at 325K (kW/m*K) through\n # cooling duct \n self.Pr = (7.01 + 5.43)/2 # Prandtl # of water from Engineering\n # Toolbox\n self.rho = 1000.\n # density (kg/m**3) of water\n self.Nu_coeff = 0.023\n self.enthalpy0 = 113.25\n # enthalpy (kJ/kg) of coolant at restricted dead state\n self.entropy0 = 0.437\n # entropy (kJ/kg*K) of coolant at restricted dead state\n self.sides = 1\n \n functions.bind_functions(self)",
"def setup(self):\n d = self._setup_convergences()\n return d.addErrback(self.log.err, \"selfheal-setup-err\")",
"def __init__(self, num_qubits: int):\n self.__n = num_qubits\n self.__N = 2 ** self.n\n self._H = scp.sparse.csr_matrix((self.N, self.N))",
"def _hash_init(self):\r\n # Initialize the indices and data dependencies.\r\n self.rotor = 1\r\n self.ratchet = 3\r\n self.avalanche = 5\r\n self.last_plain = 7\r\n self.last_cipher = 11\r\n\r\n # Start with cards all in inverse order.\r\n self.cards = list(range(255, -1, -1))",
"def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = self.sys.h( x , self.sys.ubar , t )\n \n return y",
"def _set_HU(self):\r\n # The Hamiltonian H is proportional to the\r\n # time derivative of U times its inverse\r\n self._HU = (0.5*1.0j*self.hbar/self.dt)*(self.U - np.conj(self.U.T))\r\n # self._HU =(1.0j*self.hbar/self.dt)*(self.U - self.id)\r",
"def initialize(self):\n # self.gc1.reset_parameters()\n # self.gc2.reset_parameters()\n nn.init.xavier_uniform_(self.gc2.fc.weight)",
"def __init__(self) -> None:\n self._prepare_minions()\n self._m3u_dict = {}\n self._encrypted = False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
extracts the Hessian of shape (N,N) from the UTPM instance y | def extract_hessian(cls, N, y, as_full_matrix = True, use_mpmath=False):
if use_mpmath:
import mpmath
mpmath.dps = 50
H = numpy.zeros((N,N),dtype=y.data.dtype)
for n in range(N):
for m in range(n):
a = sum(range(n+1))
b = sum(range(m+1))
k = sum(range(n+2)) - m - 1
#print 'k,a,b=', k,a,b
if n!=m:
if use_mpmath:
tmp = (mpmath.mpf(y.data[2,k]) - mpmath.mpf(y.data[2,a]) - mpmath.mpf(y.data[2,b]))
else:
tmp = (y.data[2,k] - y.data[2,a] - y.data[2,b])
H[m,n]= H[n,m]= tmp
a = sum(range(n+1))
H[n,n] = 2*y.data[2,a]
return H | [
"def build_hessian(self,x,y):\n #Precalculate entries of hessian\n x_sum = np.sum(x)\n x_squared_sum = np.sum(x*x)\n y_sum = np.sum(y)\n y_squared_sum = np.sum(y*y)\n xy_sum = np.sum(x*y)\n n = len(x)\n\n hessian = np.array([\n [n,0,x_sum,y_sum,0,0],\n [0,n,0,0,x_sum,y_sum],\n [x_sum,0,x_squared_sum,xy_sum,0,0],\n [y_sum,0,xy_sum,y_squared_sum,0,0],\n [0,x_sum,0,0,x_squared_sum,xy_sum],\n [0,y_sum,0,0,xy_sum,y_squared_sum]\n ])\n return hessian",
"def calc_hessian_at(self, x: np.ndarray) -> np.ndarray:\n return hessian_approximation(self.f, x)",
"def getHessian(fgradient):\n def hess(x):\n return evaluateHessian(fgradient,x)\n return hess",
"def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = self.sys.h( x , self.sys.ubar , t )\n \n return y",
"def update_gradient_hessian(self, X, y, sample_weight):",
"def hessian(y, xs):\n y = tf.convert_to_tensor(y)\n dependencies = [tf.verify_tensor_all_finite(y, msg='')]\n dependencies.extend([tf.verify_tensor_all_finite(x, msg='') for x in xs])\n\n with tf.control_dependencies(dependencies):\n # Calculate flattened vector grad_{xs} y.\n grads = tf.gradients(y, xs)\n grads = [tf.reshape(grad, [-1]) for grad in grads]\n grads = tf.concat(0, grads)\n # Loop over each element in the vector.\n mat = []\n d = grads.get_shape()[0]\n if not isinstance(d, int):\n d = grads.eval().shape[0]\n\n for j in range(d):\n # Calculate grad_{xs} ( [ grad_{xs} y ]_j ).\n gradjgrads = tf.gradients(grads[j], xs)\n # Flatten into vector.\n hi = []\n for l in range(len(xs)):\n hij = gradjgrads[l]\n # return 0 if gradient doesn't exist; TensorFlow returns None\n if hij is None:\n hij = tf.zeros(xs[l].get_shape(), dtype=tf.float32)\n\n hij = tf.reshape(hij, [-1])\n hi.append(hij)\n\n hi = tf.concat(0, hi)\n mat.append(hi)\n\n # Form matrix where each row is grad_{xs} ( [ grad_{xs} y ]_j ).\n return tf.pack(mat)",
"def hessian(X, theta, reg):\n n = len(X)\n d = len(X[0, :])\n h_vec = np.array([h(x, theta) for x in X])\n w = h_vec * (1 - h_vec)\n \n hess = np.zeros((d, d))\n for i in range(n):\n hess += np.outer(w[i] * X[i], X[i])\n hess += n * reg * np.eye(d)\n return hess",
"def hess(self, x, t, hdata):\n\n # Check arguments for consistency\n errstring = self.consist('mlp', x, t)\n if errstring != None:\n raise Exception(errstring)\n\n if computeData:\n # Data term in Hessian needs to be computed\n hdata = self.datahess(x, t)\n\n h, hdata = self.hbayes(hdata)\n\n # Sub-function to compute data part of Hessian",
"def hessian(self, var, bayesianOptimizer):\n bayesianOptimizer.raiseAnError(NotImplementedError,'Hessian is not yet developed for this acqusition function')",
"def test_hessian():\n u.seed_random(1)\n batch_size = 500\n\n data_width = 4\n targets_width = 4\n\n d1 = data_width ** 2\n d2 = 10\n d3 = targets_width ** 2\n o = d3\n N = batch_size\n d = [d1, d2, d3]\n\n dataset = u.TinyMNIST(data_width=data_width, targets_width=targets_width, dataset_size=batch_size)\n trainloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False)\n train_iter = iter(trainloader)\n data, targets = next(train_iter)\n\n def loss_fn(data, targets):\n assert len(data) == len(targets)\n err = data - targets.view(-1, data.shape[1])\n return torch.sum(err * err) / 2 / len(data)\n\n u.seed_random(1)\n model: u.SimpleModel = u.SimpleFullyConnected(d, nonlin=False, bias=True)\n\n # backprop hessian and compare against autograd\n hessian_backprop = u.HessianExactSqrLoss()\n output = model(data)\n for bval in hessian_backprop(output):\n output.backward(bval, retain_graph=True)\n\n i, layer = next(enumerate(model.layers))\n A_t = layer.activations\n Bh_t = layer.backprops_list\n H, Hb = u.hessian_from_backprops(A_t, Bh_t, bias=True)\n\n model.disable_hooks()\n H_autograd = u.hessian(loss_fn(model(data), targets), layer.weight)\n u.check_close(H, H_autograd.reshape(d[i + 1] * d[i], d[i + 1] * d[i]),\n rtol=1e-4, atol=1e-7)\n Hb_autograd = u.hessian(loss_fn(model(data), targets), layer.bias)\n u.check_close(Hb, Hb_autograd, rtol=1e-4, atol=1e-7)\n\n # check first few per-example Hessians\n Hi, Hb_i = u.per_example_hess(A_t, Bh_t, bias=True)\n u.check_close(H, Hi.mean(dim=0))\n u.check_close(Hb, Hb_i.mean(dim=0), atol=2e-6, rtol=1e-5)\n\n for xi in range(5):\n loss = loss_fn(model(data[xi:xi + 1, ...]), targets[xi:xi + 1])\n H_autograd = u.hessian(loss, layer.weight)\n u.check_close(Hi[xi], H_autograd.reshape(d[i + 1] * d[i], d[i + 1] * d[i]))\n Hbias_autograd = u.hessian(loss, layer.bias)\n u.check_close(Hb_i[i], Hbias_autograd)\n\n # get subsampled Hessian\n u.seed_random(1)\n model = u.SimpleFullyConnected(d, nonlin=False)\n hessian_backprop = u.HessianSampledSqrLoss(num_samples=1)\n\n output = model(data)\n for bval in hessian_backprop(output):\n output.backward(bval, retain_graph=True)\n model.disable_hooks()\n i, layer = next(enumerate(model.layers))\n H_approx1 = u.hessian_from_backprops(layer.activations, layer.backprops_list)\n\n # get subsampled Hessian with more samples\n u.seed_random(1)\n model = u.SimpleFullyConnected(d, nonlin=False)\n\n hessian_backprop = u.HessianSampledSqrLoss(num_samples=o)\n output = model(data)\n for bval in hessian_backprop(output):\n output.backward(bval, retain_graph=True)\n model.disable_hooks()\n i, layer = next(enumerate(model.layers))\n H_approx2 = u.hessian_from_backprops(layer.activations, layer.backprops_list)\n\n assert abs(u.l2_norm(H) / u.l2_norm(H_approx1) - 1) < 0.08, abs(u.l2_norm(H) / u.l2_norm(H_approx1) - 1) # 0.0612\n assert abs(u.l2_norm(H) / u.l2_norm(H_approx2) - 1) < 0.03, abs(u.l2_norm(H) / u.l2_norm(H_approx2) - 1) # 0.0239\n assert u.kl_div_cov(H_approx1, H) < 0.3, u.kl_div_cov(H_approx1, H) # 0.222\n assert u.kl_div_cov(H_approx2, H) < 0.2, u.kl_div_cov(H_approx2, H) # 0.1233",
"def lr_loss_gradient_hessian(y, tx, w):\n loss, gradient = lr_compute_gradient(y, tx, w)\n # print(loss)\n\n return lr_compute_loss(y, tx, w), gradient, hessian(tx, w)",
"def InvHessian(self,x):\n return linalg.inv(self.besthessian(x))",
"def evaluateHessian(fgradient,x,epsilon = 1e-5):\n h = zeros(shape(x))\n res = zeros((len(x),len(x)))\n for i in range(0,len(x)):\n # Define new gradient function which returns only the i:th element of \n # the gradient in a point x.\n def fgradienti(x):\n return fgradient(x)[i]\n # Evaluate new funciton object and store the result as a row in the \n # hessian.\n row = evaluateGradient(fgradienti,x)\n res[i,:] = row\n return res",
"def calculate_logistic_gradient_hessian(y, tx, w):\n loss, gradient = calculate_logistic_gradient(y, tx, w)\n return loss, gradient, calculate_hessian(tx, w)",
"def theil_U(y: np.ndarray, y_hat: np.ndarray) -> float:\n return np.sqrt(MSE(y, y_hat) / MSE(y, np.zeros_like(y)))",
"def test_turbomole_parse_hessian(h2o_nprhessian):\n hessian = parse_hessian(h2o_nprhessian)\n assert hessian.shape == (9, 9)\n eigvals, _ = np.linalg.eigh(hessian)\n assert eigvals[-1] == pytest.approx(1.12157030e00)",
"def hessian(poly: PolyLike) -> ndpoly:\n return gradient(gradient(poly))",
"def read_Hessian(self, workdir=None, param_names=None):\n if workdir is None:\n workdir = self.workdir\n if param_names is None:\n param_names = self.params\n toread = ['H' + name for name in param_names]\n try:\n mat = h5.File(workdir + \"/\" + self.file_gout, 'r')\n output = [np.transpose(mat[v]) for v in toread]\n except OSError:\n raise SeisCLError('Could not read Hessian')\n\n return output",
"def default_hessian(self, x, f):\r\n n = len(x)\r\n G = zeros((n,n))\r\n h = 1e-3\r\n \r\n for i in range(n):\r\n for j in range(n):\r\n\r\n G[i,j] = (f(x + h*self._basisvec(n,(i,j),(1,1))) - f(x + h*self._basisvec(n,(i,j), (1,-1)))\r\n - f(x + h*self._basisvec(n,(i,j),(-1,1))) + f(x + h*self._basisvec(n,(i,j),(-1,-1))))/(4*h**2)\r\n G = (G + G.T)/2\r\n return linalg.inv(G)",
"def get_hessian_eig(self):\n dynmat = self.kcmat\n eig_val = self.eig_val\n eig_vec = self.eig_vec\n gradmat = self.grad_mat\n hess_mat= self.hessian_mat\n hessian = np.zeros((3, 3, 3))\n idmat = np.identity(3)\n for n in xrange(3):\n hessian[n] += np.dot(np.dot(hess_mat, eig_vec[n]), eig_vec[n])\n pseudoinv = np.linalg.pinv(eig_val[n]*idmat - dynmat, rcond=1e-10)\n deriv_vec = np.dot(gradmat, eig_vec[n])\n hessian[n] += 2.0 * np.dot(np.dot(deriv_vec, pseudoinv), deriv_vec.T)\n #Take deriv of eigenvec into account: 2 * (d/dx s_i) * pinv_ij * (d_dy s_j)\n self.hessian_eig= hessian\n return"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns a full symmetric matrix filled the distinct elements of v, filled rowwise | def vecsym(cls, v):
D,P = v.data.shape[:2]
Nv = v.data[0,0].size
tmp = numpy.sqrt(1 + 8*Nv)
if abs(int(tmp) - tmp) > 1e-16:
# hackish way to check that the input length of v makes sense
raise ValueError('size of v does not match any possible symmetric matrix')
N = (int(tmp) - 1)//2
A = cls(numpy.zeros((D,P,N,N)))
count = 0
for row in range(N):
for col in range(row,N):
A[row,col] = A[col,row] = v[count]
count +=1
return A | [
"def fill_diagonal(self, v: 'int const &') -> \"vnl_diag_matrixSI &\":\n return _vnl_diag_matrixPython.vnl_diag_matrixSI_fill_diagonal(self, v)",
"def vector_to_matrix(v):\r\n if not (np.sqrt(8 * v.shape[0] + 1) == int(np.sqrt(8 * v.shape[0] + 1))):\r\n print(\"ERROR: Can not make a square matrix.\")\r\n exit(1)\r\n\r\n n = v.shape[0]\r\n w = ((-1 + int(np.sqrt(8 * n + 1))) // 2) + 1\r\n m = np.zeros((w, w))\r\n\r\n index = 0\r\n for i in range(w):\r\n for j in range(w):\r\n if i > j - 1:\r\n continue\r\n\r\n m[i, j] = v[index]\r\n m[j, i] = m[i, j]\r\n\r\n index += 1\r\n return m",
"def fill_diagonal(self, v: 'double const &') -> \"vnl_diag_matrixD &\":\n return _vnl_diag_matrixPython.vnl_diag_matrixD_fill_diagonal(self, v)",
"def fill_diagonal(self, v: 'stdcomplexF') -> \"vnl_diag_matrixCF &\":\n return _vnl_diag_matrixPython.vnl_diag_matrixCF_fill_diagonal(self, v)",
"def fill_diagonal(self, v: 'float const &') -> \"vnl_diag_matrixF &\":\n return _vnl_diag_matrixPython.vnl_diag_matrixF_fill_diagonal(self, v)",
"def fill_diagonal(self, v: 'long double const &') -> \"vnl_diag_matrixLD &\":\n return _vnl_diag_matrixPython.vnl_diag_matrixLD_fill_diagonal(self, v)",
"def one_vs_all(vector):\n elements = np.unique(vector)\n result = np.zeros((len(vector), len(elements)))\n # create column for each unique element in vector\n # fill the column with either 0 or 1\n for c in range(len(elements)): result[:, c] = (vector == elements[c])\n return result",
"def cross_prod_matrix(v):\n\n v_x = np.array([ [0., -v[2], v[1]],\n [v[2], 0, -v[0]],\n [-v[1], v[0], 0]\n ])\n\n return v_x",
"def assemble_kernel_matrix(x,v):\n #use R(fabs(dot(i,j))) in case angles are considered in the range (0,pi/2)\n return array([R(dot(i,j)) for i in x for j in v]).reshape(len(x),len(v))",
"def householder(v):\n return np.eye(3) - (2 * np.dot(v, v.T))",
"def unflatten_symmetric(m):\n \n vector_size = m.shape[0]\n matrix_size = int((math.sqrt(1 + 8*vector_size) - 1) / 2)\n M = np.empty((matrix_size, matrix_size), dtype=m.dtype)\n \n k = 0\n for i in range(matrix_size):\n for j in range(i, matrix_size):\n M[i,j] = m[k]\n M[j,i] = m[k]\n k += 1\n \n return M",
"def vector_to_matrix(self, vec):\n vec = vectorization.expand_dims(vec, to_ndim=2)\n # TODO(nina): do we need factor np.sqrt(2) and why?\n _, vec_dim = vec.shape\n mat_dim = int((np.sqrt(8 * vec_dim + 1) - 1) / 2)\n mat = np.zeros((mat_dim,) * 2)\n\n lower_triangle_indices = np.tril_indices(mat_dim)\n diag_indices = np.diag_indices(mat_dim)\n\n mat[lower_triangle_indices] = 2 * vec / np.sqrt(2)\n mat[diag_indices] = vec\n\n mat = make_symmetric(mat)\n return mat",
"def flatten_symmetric(M):\n \n matrix_size = M.shape[0]\n vector_size = matrix_size * (1 + matrix_size) / 2\n m = np.empty((vector_size,), dtype=M.dtype)\n \n k = 0\n for i in range(matrix_size):\n for j in range(i, matrix_size):\n m[k] = M[i,j]\n k += 1\n \n return m",
"def set(self, v: 'vnl_vectorSI') -> \"vnl_diag_matrixSI &\":\n return _vnl_diag_matrixPython.vnl_diag_matrixSI_set(self, v)",
"def out_vertices(self, v):\n\n ns = []\n for w in self[v]:\n if w not in ns:\n ns.append(w)\n return ns",
"def set(self, v: 'vnl_vectorF') -> \"vnl_diag_matrixF &\":\n return _vnl_diag_matrixPython.vnl_diag_matrixF_set(self, v)",
"def set(self, v: 'vnl_vectorD') -> \"vnl_diag_matrixD &\":\n return _vnl_diag_matrixPython.vnl_diag_matrixD_set(self, v)",
"def PartialPivot(A,v):\r\n\r\n N = len(v)\r\n \r\n # Gaussian elimination\r\n for m in range(N):\r\n heads = A[::,m] #collecting leading elements of the m-th stel in the elimination to ultimately select a good candidate. \r\n abs_heads = list(abs(heads))\r\n winning = abs_heads.index(max(abs_heads))\r\n if heads[m] == 0:\r\n A[m, :], A[winning, :] = copy(A[winning, :]), copy(A[m, :])\r\n v[m], v[winning] = copy(v[winning]), copy(v[m])\r\n else:\r\n pass\r\n # Divide by the diagonal element\r\n div = A[m,m]\r\n A[m,:] /= div\r\n v[m] /= div\r\n \r\n # Now subtract from the lower rows\r\n for i in range(m+1,N):\r\n mult = A[i,m]\r\n A[i,:] -= mult*A[m,:]\r\n v[i] -= mult*v[m]\r\n \r\n # Backsubstitution\r\n x = empty(N,float)\r\n for m in range(N-1,-1,-1):\r\n x[m] = v[m]\r\n for i in range(m+1,N):\r\n x[m] -= A[m,i]*x[i]\r\n return x",
"def _get_unvisted_matrix(self):\n visted = [[False] * self.get_grid_width() \\\n for dummy_idx in range(self.get_grid_height())]\n return visted",
"def _svi_lower_triang_vec_to_mat(vec):\n m = len(vec)\n k = (-1 + np.sqrt(1 + 8 * m)) / 2\n if k != int(k):\n raise ValueError(\"Vec has an invalid size\")\n indices = np.tril_indices(k)\n mat = np.zeros((int(k), int(k)))\n mat[indices] = vec.reshape(-1)\n return mat"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Import public keys of a friend (receiver) into the PKFernet object's. The receiver_public_keyring dictionary object can contain public keys for multiple receivers, but this method will only import the keys associated with the receiver_name parameter. | def import_pub_keys(self, receiver_name, receiver_public_keyring):
self.public_keys[receiver_name] = receiver_public_keyring[receiver_name] | [
"def import_key_pair(DryRun=None, KeyName=None, PublicKeyMaterial=None):\n pass",
"def sync(self, ask_anyway=False):\n info = {}\n added = []\n crypto = self.crypto\n\n first = True\n while first or (ask_anyway or not crypto.can_encrypt):\n first = False\n if \"urls\" not in info:\n info[\"urls\"] = []\n if \"pems\" not in info:\n info[\"pems\"] = []\n if \"locations\" not in info:\n info[\"locations\"] = {}\n\n urls, pems, locations, new_ones = self.get_public_keys(ask_anyway=ask_anyway, known_private_key_fingerprints=self.crypto.private_key_fingerprints)\n if not new_ones and ask_anyway:\n break\n\n info[\"urls\"].extend(urls)\n info[\"pems\"].extend(pems)\n info[\"locations\"].update(locations)\n\n downloaded = []\n for url in info.get(\"urls\", []):\n downloaded.extend(self.download_pems(url))\n info[\"pems\"].extend(downloaded)\n\n for pem in info[\"pems\"]:\n location = info[\"locations\"].get(pem)\n fingerprint = crypto.add_public_keys([(pem, location)]).get(pem)\n added.append(fingerprint)\n if not fingerprint:\n log.error(\"Failed to add public key\\tpem=%s\", pem)\n else:\n if location:\n log.debug(\"Adding a public key\\tlocation=%s\\tfingerprint=%s\", location, fingerprint)\n else:\n log.debug(\"Adding a public key\\tfingerprint=%s\", fingerprint)\n\n if not crypto.can_encrypt:\n log.error(\"Was unable to find any public keys\")\n del info[\"urls\"]\n del info[\"pems\"]\n else:\n break\n\n for fingerprint in list(crypto.public_key_fingerprints):\n if fingerprint not in added:\n log.info(\"Removing public key we aren't encrypting with anymore\\tfingerprint=%s\", fingerprint)\n crypto.remove_public_key(fingerprint)",
"def send_public_key(self):\n self.send(str(self.PUBLIC_KEY[0]) + \",\" + str(self.PUBLIC_KEY[1]))",
"def deploy_public_key(self):\n cprint(str(self) + \" deploying public key to the contract\", \"yellow\")\n txn_dict = {'from': self.address}\n pk_to_export = self.public_key.export_key()\n self.contract.functions.addCandidateEncryptKey(pk_to_export).transact(txn_dict)",
"def send_pubkeys(self):\n for agent_name, agent in self.directory.clients.items():\n pubkey = self.pubkeyList[agent_name] # retrieve pubkey for client we're sending to\n body = {'pubkey': pubkey}\n msg = Message(sender_name=self.name, recipient_name=agent_name, body=body)\n agent.receive_pubkey(msg) # invoke method of receiving agent",
"def put_key(\n self,\n slot: SLOT,\n private_key: Union[\n rsa.RSAPrivateKeyWithSerialization,\n ec.EllipticCurvePrivateKeyWithSerialization,\n ],\n pin_policy: PIN_POLICY = PIN_POLICY.DEFAULT,\n touch_policy: TOUCH_POLICY = TOUCH_POLICY.DEFAULT,\n ) -> None:\n slot = SLOT(slot)\n key_type = KEY_TYPE.from_public_key(private_key.public_key())\n check_key_support(self.version, key_type, pin_policy, touch_policy, False)\n ln = key_type.bit_len // 8\n numbers = private_key.private_numbers()\n if key_type.algorithm == ALGORITHM.RSA:\n numbers = cast(rsa.RSAPrivateNumbers, numbers)\n if numbers.public_numbers.e != 65537:\n raise NotSupportedError(\"RSA exponent must be 65537\")\n ln //= 2\n data = (\n Tlv(0x01, int2bytes(numbers.p, ln))\n + Tlv(0x02, int2bytes(numbers.q, ln))\n + Tlv(0x03, int2bytes(numbers.dmp1, ln))\n + Tlv(0x04, int2bytes(numbers.dmq1, ln))\n + Tlv(0x05, int2bytes(numbers.iqmp, ln))\n )\n else:\n numbers = cast(ec.EllipticCurvePrivateNumbers, numbers)\n data = Tlv(0x06, int2bytes(numbers.private_value, ln))\n if pin_policy:\n data += Tlv(TAG_PIN_POLICY, int2bytes(pin_policy))\n if touch_policy:\n data += Tlv(TAG_TOUCH_POLICY, int2bytes(touch_policy))\n\n logger.debug(\n f\"Importing key with pin_policy={pin_policy}, touch_policy={touch_policy}\"\n )\n self.protocol.send_apdu(0, INS_IMPORT_KEY, key_type, slot, data)\n logger.info(f\"Private key imported in slot {slot} of type {key_type}\")\n return key_type",
"def check_publickey(self, data):\n\n return RSA.importKey(self.get_key(data))",
"def load_keys(self):\n try:\n with open(f'wallet-{self.node_id}.txt', mode='r') as f:\n keys = f.readlines()\n public_key = keys[0][:-1]\n private_key = keys[1]\n self.public_key = public_key\n self.private_key = private_key\n return True\n except (IOError, IndexError):\n print('Loading wallet failed...')\n return False",
"def load_public_key( config, key_type, object_id ):\n key_path = conf.object_key_path( config, key_type, object_id, public=True )\n return read_public_key( key_path )",
"def pre_import_ssh_public_key(\n self,\n request: oslogin.ImportSshPublicKeyRequest,\n metadata: Sequence[Tuple[str, str]],\n ) -> Tuple[oslogin.ImportSshPublicKeyRequest, Sequence[Tuple[str, str]]]:\n return request, metadata",
"def generate_key_pair(self):\n assert self.public_key is None, 'This user already has a public key'\n assert self.private_key is None, 'This user already has a private key'\n key_pair = RSA.generate(NUM_KEY_BITS)\n self.private_key = key_pair.export_key().decode()\n self.public_key = key_pair.publickey().export_key().decode()",
"def publickey(self):\n k = PublicKey.PublicKey(self.key)\n k.e = self.e\n k.n = self.n\n return k",
"def get_public_key(self):\n return self.rsa.publickey()",
"def testPublicKeyPatternMatchesValidPublicKeys(self):\n pattern = regex.REGEXES_AND_ERRORS_DICTIONARY['publicKeyPattern']\n potentialPublicKeys = []\n for email in POTENTIAL_EMAIL_ADDRESSES:\n ssh_key = RSA.generate(2048).publickey().exportKey('OpenSSH')\n potentialPublicKeys.append(ssh_key)\n self._patternMatchHelper(pattern, potentialPublicKeys)",
"def load_public_keys(path):\n \n key_files = glob.glob(f\"{path}/*.pem\")\n\n return key_files",
"def add_participant(self, public_key):\n self._participants[_get_id_from_key(public_key)] = public_key",
"def parse_host_key(self):\n self.host_key = paramiko.RSAKey(filename=self.host_key)",
"def get_public_key(private_key):\n return private_key.public_key()",
"def import_keys(self, path):\n\n os.environ['GNUPGHOME'] = path\n\n ctx = gpgme.Context()\n\n keys = []\n for key in list(ctx.keylist()):\n for subkey in key.subkeys:\n content = BytesIO()\n ctx.export(str(subkey.keyid), content)\n keys.append(content)\n\n os.environ['GNUPGHOME'] = self.keyring_path\n ctx = gpgme.Context()\n\n for key in keys:\n key.seek(0)\n ctx.import_(key)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transform the coordinate to agentcentric. Input state tensor is of size (batch_size, state_length) 'px', 'py', 'vx', 'vy', 'gx', 'gy', 'px1', 'py1', 'vx1', 'vy1', 'radius1' 0 1 2 3 4 5 6 7 8 9 10 | def rotate(state):
size = state.shape # (100, 20, 11)
last_dim = len(size) - 1 # 3
dx = state[..., 4] - state[..., 0] # (100, 20)
dy = state[..., 5] - state[..., 1]
rot = torch.atan2(dy, dx) # (100, 20)
dg = torch.norm(torch.cat([dx.unsqueeze(dim = last_dim), dy.unsqueeze(dim = last_dim)], dim=last_dim), 2, dim=last_dim, keepdim=True) # (100, 20, 1)
vx = (state[..., 2] * torch.cos(rot) + state[..., 3] * torch.sin(rot)).unsqueeze(dim = last_dim) # (100, 20, 1)
vy = (state[..., 3] * torch.cos(rot) - state[..., 2] * torch.sin(rot)).unsqueeze(dim = last_dim)
vx1 = (state[..., 8] * torch.cos(rot) + state[..., 9] * torch.sin(rot)).unsqueeze(dim = last_dim) # (100, 20, 1)
vy1 = (state[..., 9] * torch.cos(rot) - state[..., 8] * torch.sin(rot)).unsqueeze(dim = last_dim)
px1 = (state[..., 6] - state[..., 0]) * torch.cos(rot) + (state[..., 7] - state[..., 1]) * torch.sin(rot) # (100, 20)
px1 = px1.unsqueeze(dim = last_dim) # (100, 20, 1)
py1 = (state[..., 7] - state[..., 1]) * torch.cos(rot) - (state[..., 6] - state[..., 0]) * torch.sin(rot)
py1 = py1.unsqueeze(dim = last_dim)
# radius1 = state[..., 10].unsqueeze(dim = last_dim)
da = torch.norm(torch.cat([(state[..., 0] - state[..., 6]).unsqueeze(dim = last_dim), (state[..., 1] - state[..., 7]).
unsqueeze(dim = last_dim)], dim=last_dim), 2, dim=last_dim, keepdim=True) # (100, 20, 1)
new_state = torch.cat([dg, vx, vy, px1, py1, vx1, vy1, da], dim=last_dim)
# new_state = torch.cat([dg, vx, vy, px1, py1, vx1, vy1, radius1, da], dim=last_dim)
return new_state, rot[...,0] # (100, 20, 9) | [
"def discretize_state(self, state):\n x, x_dot, phi, phi_dot = state\n if x > 1.:\n x = 1\n elif x < -1.:\n x = -1\n else: \n x = 0\n\n if x_dot < -0.1:\n x_dot = -2\n elif x_dot > 0.1:\n x_dot = 2\n elif x_dot < -0.03:\n x_dot = -1\n elif x_dot > 0.03:\n x_dot = 1\n else:\n x_dot = 0\n\n if phi > 0.1:\n phi = 1\n elif phi < -0.1:\n phi = -1\n else: \n phi = 0\n\n if phi_dot < -0.1:\n phi_dot = -2\n elif phi_dot > 0.1:\n phi_dot = 2\n elif phi_dot < -0.03:\n phi_dot = -1\n elif phi_dot > 0.03:\n phi_dot = 1\n else:\n phi_dot = 0\n \n return (x, x_dot, phi, phi_dot)",
"def state_model_input(cls, state: State) -> np.ndarray:\n st = state.state_as_array()\n st = st.reshape([1, 9])\n return st",
"def get_joint_state(self):\n\n joint_state = np.zeros((p.num_rovers, p.num_inputs))\n\n for rover_id in range(self.num_agents):\n self_x = self.rover_pos[rover_id, 0]; self_y = self.rover_pos[rover_id, 1]\n self_orient = self.rover_pos[rover_id, 2]\n\n rover_state = [0.0 for _ in range(int(360 / p.angle_resolution))]\n poi_state = [0.0 for _ in range(int(360 / p.angle_resolution))]\n temp_poi_dist_list = [[] for _ in range(int(360 / p.angle_resolution))]\n temp_rover_dist_list = [[] for _ in range(int(360 / p.angle_resolution))]\n\n # Log POI distances into brackets\n for poi_id in range(p.num_pois):\n poi_x = self.poi_pos[poi_id, 0]\n poi_y = self.poi_pos[poi_id, 1]\n poi_value = self.poi_values[poi_id]\n\n angle, dist = self.get_angle_dist(self_x, self_y, poi_x, poi_y)\n\n if dist >= self.obs_radius:\n continue # Observability radius\n\n angle -= self_orient\n if angle < 0:\n angle += 360\n\n bracket = int(angle / p.angle_resolution)\n if bracket >= len(temp_poi_dist_list):\n print(\"ERROR: BRACKET EXCEED LIST\", bracket, len(temp_poi_dist_list))\n bracket = len(temp_poi_dist_list) - 1\n if dist < p.min_distance: # Clip distance to not overwhelm tanh in NN\n dist = p.min_distance\n\n temp_poi_dist_list[bracket].append(poi_value/dist)\n\n # Log rover distances into brackets\n for other_rover_id in range(p.num_rovers):\n if other_rover_id == rover_id: # Ignore self\n continue\n rov_x = self.rover_pos[other_rover_id, 0]\n rov_y = self.rover_pos[other_rover_id, 1]\n angle, dist = self.get_angle_dist(self_x, self_y, rov_x, rov_y)\n\n if dist >= self.obs_radius:\n continue # Observability radius\n\n angle -= self_orient\n if angle < 0:\n angle += 360\n\n if dist < p.min_distance: # Clip distance to not overwhelm sigmoid in NN\n dist = p.min_distance\n\n bracket = int(angle / p.angle_resolution)\n if bracket >= len(temp_rover_dist_list):\n print(\"ERROR: BRACKET EXCEED LIST\", bracket, len(temp_rover_dist_list))\n bracket = len(temp_rover_dist_list) - 1\n temp_rover_dist_list[bracket].append(1/dist)\n\n # Encode the information into the state vector\n for bracket in range(int(360 / p.angle_resolution)):\n # POIs\n num_poi = len(temp_poi_dist_list[bracket]) # Number of POIs in bracket\n if num_poi > 0:\n if p.sensor_model == 'density':\n poi_state[bracket] = sum(temp_poi_dist_list[bracket]) / num_poi # Density Sensor\n elif p.sensor_model == 'summed':\n poi_state[bracket] = sum(temp_poi_dist_list[bracket]) # Summed Distance Sensor\n elif p.sensor_model == 'closest':\n poi_state[bracket] = max(temp_poi_dist_list[bracket]) # Closest Sensor\n else:\n sys.exit('Incorrect sensor model')\n else:\n poi_state[bracket] = -1.0\n joint_state[rover_id, bracket] = poi_state[bracket]\n\n # Rovers\n num_agents = len(temp_rover_dist_list[bracket]) # Number of rovers in bracket\n if num_agents > 0:\n if p.sensor_model == 'density':\n rover_state[bracket] = sum(temp_rover_dist_list[bracket]) / num_agents # Density Sensor\n elif p.sensor_model == 'summed':\n rover_state[bracket] = sum(temp_rover_dist_list[bracket]) # Summed Distance Sensor\n elif p.sensor_model == 'closest':\n rover_state[bracket] = max(temp_rover_dist_list[bracket]) # Closest Sensor\n else:\n sys.exit('Incorrect sensor model')\n else:\n rover_state[bracket] = -1.0\n joint_state[rover_id, (bracket + 4)] = rover_state[bracket]\n\n return joint_state",
"def build_state(self):\n\n # Collect data about the environment\n waypoint = self.planner.next_waypoint() # The next waypoint \n inputs = self.env.sense(self) # Visual input - intersection light and traffic\n for key, value in iter(inputs.items()):\n if value is None:\n inputs.update({key:'None'})\n deadline = self.env.get_deadline(self) # Remaining deadline\n\n ########### \n ## TO DO ##\n ###########\n \n # NOTE : you are not allowed to engineer features outside of the inputs available.\n # Because the aim of this project is to teach Reinforcement Learning, we have placed \n # constraints in order for you to learn how to adjust epsilon and alpha, and thus learn about the balance between exploration and exploitation.\n # With the hand-engineered features, this learning process gets entirely negated.\n \n # Set 'state' as a tuple of relevant data for the agent \n return self.build_index(inputs,waypoint)",
"def to_States_grid(self):\n\t\tfor j in range(0, self.height):\t\t# we iterate on every line\n\t\t\tfor i in range(0, self.width):\t# we iterate on every column\n\t\t\t\tself.States_grid.append(State(i*self.reso + self.origin[0], j*self.reso + self.origin[1], i, j, self.map.data[j*self.width + i]))",
"def _celestial(self):\n cos = np.cos(self.lat)\n sin = np.sin(self.lat)\n transfo = np.matrix([ \n [0, -sin, cos],\n [1, 0, 0],\n [0, cos, sin]\n ])\n return transfo",
"def get_cube_state(coords, direction, length, initial_cube):\n size = len(initial_cube)\n segment_cube = [[[0 for z in range(size)] for y in range(size)] for x in range(size)]\n operator, axes = direction\n for i in range(length):\n x = operator(coords.x, i) if axes.x else coords.x\n y = operator(coords.y, i) if axes.y else coords.y\n z = operator(coords.z, i) if axes.z else coords.z\n segment_cube[x][y][z] = 1\n\n return tuple(\n tuple(\n tuple(\n initial_cube[x][y][z] + segment_cube[x][y][z]\n for z in range(size)\n ) for y in range(size)\n ) for x in range(size)\n )",
"def __call__(self, input_: nd.NDArray):\n if not self._state:\n self._state = self._gru_cell.begin_state(input_.shape[0])\n output, new_state = self._gru_cell(input_, self._state)\n self._state = new_state\n\n return output",
"def transform_to_physical_coordinates(index):\n return np.matmul(\n np.matmul(np.array(properties.direction).reshape(3, 3), np.diag(properties.spacing)),\n index) + properties.origin",
"def ref_cart_case_circ():\n pos = np.array([7000, 0, 0]) #km \n vel = np.array([0, 7.456, 0]) #km \n mu = 398600\n time = 0\n return Cartesian_State(pos, vel, mu, time)",
"def move(agent, firefly, network, hx):\n #network_output = network(encode(firefly - agent), hx)\n network_output = network(encode(firefly - agent))\n x_step = network_output[0,0]\n y_step = network_output[0,1]\n x = agent[0,0]\n y = agent[0,1]\n new_x = x + x_step\n new_y = y + y_step\n new_agent = torch.stack([new_x, new_y], dim=1)\n return new_agent",
"def preprocess_inputs(self, state, goal):\n #state, goal = self.clip_states_goals(state, goal)\n state_norm = self.state_normalizer.normalize(state)\n goal_norm = self.goal_normalizer.normalize(goal)\n inputs = np.concatenate([state_norm, goal_norm])\n return torch.tensor(inputs, dtype=torch.float32).unsqueeze(0)",
"def actuator_coords(self):\n\n mask = np.ones((11, 11), np.bool)\n for i in range(0, 3):\n for j in range(3 - i):\n mask[i, j] = False\n mask = np.bitwise_and(mask, mask[::-1])\n mask = np.bitwise_and(mask, mask[:, ::-1])\n rs = np.stack(np.where(mask)).T - 5\n return rs",
"def cube2latlon_preprocess(x, y, xi, yi):",
"def get_normalized_state(self) -> np.array:\n return np.array(self.env.state)",
"def state_shape(self):\n pass",
"def forward(self, t, state):\n xs = state[:, :-1]\n dlogp = state[:, -1:]\n state = (xs, dlogp)\n *dxs, div = self._dynamics(t, state)\n state = torch.cat([*dxs, div], dim=-1)\n return state",
"def geometryTransformMatrix(*args, **kwargs):\n \n pass",
"def _network_2_robot_action(self, state):\n with torch.no_grad():\n if self.is_spike:\n state = self._state_2_state_spikes(state)\n if self.is_record:\n self.record_data.append(state)\n state = torch.Tensor(state).to(self.device)\n action = self.actor_net(state, 1).to('cpu')\n elif self.is_scale:\n state = self._state_2_scale_state(state)\n if self.is_record:\n self.record_data.append(state)\n state = torch.Tensor(state).to(self.device)\n action = self.actor_net(state).to('cpu')\n else:\n state = np.array(state).reshape((1, -1))\n if self.is_record:\n self.record_data.append(state)\n state = torch.Tensor(state).to(self.device)\n action = self.actor_net(state).to('cpu')\n action = action.numpy().squeeze()\n noise = np.random.randn(2) * self.action_rand\n action = noise + (1 - self.action_rand) * action\n action = np.clip(action, [0., 0.], [1., 1.])\n action = wheeled_network_2_robot_action_decoder(\n action, self.max_spd, self.min_spd\n )\n return action"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a dataframe containing the path to each different picture. Two pictures are considered the same if they have the same file name (image id). | def get_list_image_file(path_to_root_dir):
dictionary = {}
for filename in glob.iglob(path_to_root_dir + '/**/*.jpg', recursive=True):
file_without_jpg = os.path.basename(filename).replace(".jpg", '')
dictionary[int(file_without_jpg)] = os.path.abspath(filename)
df_filepath = pd.DataFrame(list(dictionary.items()),
columns=['_image_id','_image_path'])
return df_filepath | [
"def get_image_df(image_path, df):\n image = process_image(image_path)\n return image, df",
"def build_hash_to_image_dataframe(self):\n\n # df_hashes's columns: file, hash, hash_list\n # file -> image's file path\n # hash -> hash code associated to image\n # hash_list -> list of all hash code's elements\n df_hashes = pd.DataFrame()\n already_exist_counter = 0\n # hash code -> image's file path\n dict_hash_to_images = {}\n\n # For each image calculate the phash and store it in a DataFrame\n for image in tqdm(self.img_file_list):\n\n hash_code = self.img_hash(image, self.hash_size, self.hash_algo)\n\n result = {'file': image, 'short_file': image.split(os.sep)[-1], 'hash': hash_code,\n 'hash_list': list(str(hash_code))}\n df_hashes = df_hashes.append(result, ignore_index=True)\n\n if hash_code in dict_hash_to_images:\n if self.verbose == 2:\n print(image, ' already exists as', ' '.join(dict_hash_to_images[hash_code]))\n already_exist_counter += 1\n\n dict_hash_to_images[hash_code] = dict_hash_to_images.get(hash_code, []) + [image]\n\n # Are there any duplicates in terms of hashes of size 'hash_size'?\n print(\"{0} out to {1}\".format(already_exist_counter, len(self.img_file_list)))\n # TODO warning\n # assert already_exist_counter == 0, \"it actually can only represent 16^\" + str(self.hash_size) + \\\n # \" values let's try with a bigger hash.\"\n\n return df_hashes",
"def create_fullpath(df):\n import os\n df['Path'] = df.apply(lambda x: os.path.join('/media/DATOS/ORIG/padchest', x['ImageDir'], x['ImageID']), axis=1)\n return df",
"def remove_exact_duplicates():\n records = []\n\n path = '../data/flowers/'\n dirs = glob.glob(path + \"*/\")\n for cl in dirs:\n cl = cl[len(path):-1]\n for img in os.listdir(path + cl):\n \n with open(path + cl + \"/\" + img, 'rb') as fd:\n md5sum = hashlib.md5(fd.read()).hexdigest()\n\n records.append({\n 'filename': img,\n 'class': cl,\n 'md5sum': md5sum,\n })\n\n df = pd.DataFrame.from_records(records)\n\n\n counts = df.groupby('md5sum')['class'].count()\n duplicates = counts[counts > 1]\n print(\"Number of exact duplicates: \", len(duplicates))\n\n # print(duplicates)\n for md5sum in duplicates.index:\n subset = df[df['md5sum'] == md5sum]\n print(subset)\n if len(subset['filename'].value_counts()) > 1:\n \n img1_name = path + subset.iloc[0, 1] + \"/\" + subset.iloc[0, 0]\n img2_name = path + subset.iloc[1, 1] + \"/\" + subset.iloc[1, 0]\n\n # visualize duplicates\n img1 = cv2.cvtColor(cv2.imread(img1_name), cv2.COLOR_BGR2RGB)\n img2 = cv2.cvtColor(cv2.imread(img2_name), cv2.COLOR_BGR2RGB)\n \n fig = plt.figure()\n fig.add_subplot(121)\n plt.title(subset.iloc[0, 0])\n plt.imshow(img1)\n\n fig.add_subplot(122)\n plt.title(subset.iloc[1, 0])\n plt.imshow(img2)\n\n plt.show()\n \n if os.path.exists(img1_name):\n os.remove(img1_name)\n if os.path.exists(img2_name):\n os.remove(img2_name)\n\n print('------')",
"def create_symlink_for_images(images, logging):\n origins = []\n image_paths = []\n availables = []\n\n # Resetar o index do Dataframe.\n images = images.reset_index()\n\n for i, image in images.iterrows():\n origin = os.path.join(os.getenv(\"IMAGES_PATH\"), image['filename'])\n\n if os.path.exists(origin):\n\n filename = os.path.basename(origin)\n filename = \"%s.fits\" % str(image['id'])\n dest = os.path.join(os.getenv(\"DATA_DIR\"), filename)\n os.symlink(origin, dest)\n\n # Registra se a imagem esta disponivel ou nao.\n availables.append(True)\n origins.append(origin)\n image_paths.append(dest)\n\n logging.debug(\n \"Link Image. Origin [%s] to [%s]\" % (origin, dest))\n else:\n availables.append(False)\n origins.append(None)\n image_paths.append(None)\n\n logging.warning(\"Image not found. [%s]\" % origin)\n\n images['available'] = availables\n images['original_path'] = origins\n images['current_path'] = image_paths\n\n # Volta a utilizaro index pelo ID.\n images = images.set_index('id')\n return images",
"def image_path_at(self, i):\n imgName = self.imgNames[self.image_id_at(i)]\n imgPath = osp.join(self.splitPath, \"image\", imgName)\n\n # print(\"Path:\\n\")\n # print(imgPath)\n\n return imgPath",
"def to_csv(self, filename):\n\n\t\t\n\t\t# Convert each image to a pandas data frame and append them\n\t\tdf = pd.DataFrame()\n\t\tfor i in range(len(self.image)):\n\t\t\t# Print updates\n\t\t\tprint self.image[i].file_name\n\t\t\tentry = self.image[i].to_pandas()\n\n\t\t\t# Append to growing data frame\n\t\t\tdf = df.append(entry)\n\n\t\t# return df to testing\n\t\tdf.to_csv(filename, index_label='id')\n\n\t\treturn df",
"def to_pandas(self):\n\t\t# reshape_image = np.reshape(self.image.copy(), np.prod(self.get_shape()), 1)\n\n\t\t# Initialize data frame\n\t\tdf = pd.DataFrame()\n\n\t\t# Get file number, used in index below\n\t\tfile_number = self.file_name.split('/')[1].split('.png')[0]\n\t\trow_labels = [str(y) for y in range(1, self.get_shape()[0]+1)]\n\n\t\t\n\t\t# labels = ['{0}_{1}_'.format(file_number, i) for i in row_labels]\n\n\t\t# for col in range(self.get_shape()[1]):\n\t\t\t# row_labels = [str(y) for y in 1:self.get_shape()[0]]\n\t\t#\tlabels = ['{0}_{1}_{2}'.format(file_number, i, col+1) for i in row_labels]\n\n\t\t\t# Make a data frame\n\t\t#\tentry = pd.DataFrame(data={'value': self.image[:,col]},\n\t\t#\t\t\t\t\t\t index=labels)\n\n\t\t\t# Append to growing data frame\n\t\t#\tdf = df.append(entry)\n\t\t\t\n\t\t\t# Grab the column we need\n\t\t# for column in range(self.get_shape()[1]):\n\t\t# \t# for row in range(self.get_shape()[0]):\n\t\t# \t\tprint row, '_', column\n\t\t# \t\tentry = pd.DataFrame(data={'val': self.image[row][column]},\n\t\t# \t\t\t\t\t\t\t index=[file_number + '_' + str(row) + '_' + str(column)])\n\t\t# \t\tdf = df.append(entry)\n\n\t\tif np.prod(df.shape) != np.prod(self.get_shape()):\n\t\t\tprint 'Error in: ' + self.file_name\n\t\t\tprint self.get_shape(), df.shape\n\t\treturn(df)",
"def image_path_at(self, i):\n imgName = self.imgNames[self.image_id_at(i)]\n imgPath = osp.join(self.splitPath, \"image\", imgName)\n\n return imgPath",
"def get_image_filepath(data_dir, row):\n return os.path.join(data_dir, f\"{row.Species}___{row.Label}\", row.Filename)",
"def print_out_paired_images(df, dataset, pair_idxs, title_fxn, directory_to_save):\n for i in range(len(pair_idxs)):\n img_1 = dataset[pair_idxs[i][0]]['image'][0, :, :]\n img_1 = (img_1 - img_1.mean()) / img_1.std()\n img_2 = dataset[pair_idxs[i][1]]['image'][0, :, :]\n img_2 = (img_2 - img_2.mean()) / img_2.std()\n \n plt.figure()\n plt.subplot(121)\n plt.imshow(img_1, clim=[-3, 3], cmap='bone')\n plt.title(title_fxn(df, pair_idxs[i][0]))\n plt.xticks([])\n plt.yticks([])\n plt.subplot(122)\n plt.imshow(img_2, clim=[-3, 3], cmap='bone')\n plt.title(title_fxn(df, pair_idxs[i][1]))\n plt.xticks([])\n plt.yticks([])\n plt.savefig(os.path.join(directory_to_save, 'pair_%i.png' % i), dpi=300)\n plt.show()",
"def getListDataPath(self, imgList): \n result = []\n for imgName in imgList:\n result.append(self.getDataPath(imgName))\n return result",
"def fetch_osg_abs_paths_picture(rawDataItemId):\n abs_paths = \"\"\n \n fetch_osg_abs_path_statement = 'select abs_path from osg_data_item natural join osg_data_item_picture where raw_data_item_id = %s'\n abs_paths,num = utils.fetchDataFromDB(cursor, fetch_osg_abs_path_statement, [rawDataItemId,],[], False)\n \n \n return abs_paths, num",
"def get_filenames(self):\n self.filenames = pd.read_csv('../image_names.csv',header=None)[0] #Only one column, hence [0] loads all filenames in self.filenames\n\tself.im2cap = pickle.load(open('img_to_cap.pkl','r'))\n self.nImgs = len(self.filenames)",
"def get_image_paths_by_views(self, exam: dict, img_dir: str):\n\n def get_view(view_name: str, img_dir: str):\n image_paths_w_view = [(view, os.path.join(img_dir, image_path)) for view, image_path in zip(exam['views'], exam['files']) if view.startswith(view_name)]\n\n image_paths_w_view = image_paths_w_view[:1]\n image_paths = [path for _ , path in image_paths_w_view]\n return image_paths\n\n left_ccs = get_view('L CC', img_dir)\n left_mlos = get_view('L MLO', img_dir)\n right_ccs = get_view('R CC', img_dir)\n right_mlos = get_view('R MLO', img_dir)\n\n return left_ccs, left_mlos, right_ccs, right_mlos",
"def _load_split_img_names(self):\n assert self.split in (\"train\", \"gallery\")\n if self.split == \"train\":\n imgs = loadmat(osp.join(self.root, \"frame_train.mat\"))[\"img_index_train\"]\n else:\n imgs = loadmat(osp.join(self.root, \"frame_test.mat\"))[\"img_index_test\"]\n return [img[0][0] + \".jpg\" for img in imgs]",
"def _pair_dicom_and_contour(self):\n pid_oid = pd.read_csv(self.link_fn)\n pid_oid['file_id'] = pid_oid.apply(\n lambda row: list(set(self._get_all_dicom_ids(\n row['patient_id'])).intersection(set(self._get_all_contour_ids(\n row['original_id'])))), axis=1)\n return pid_oid",
"def get_df():\n im1 = [] # there are 2 images associated with a report\n im2 = []\n # stores info\n comparisons = []\n indications = []\n findings = []\n impressions = []\n report = [] # stores xml file name\n for file in tqdm(os.listdir(reports_folder)):\n report_file = os.path.join(reports_folder, file)\n with open(report_file, 'r') as f: # reading the xml data\n data = f.read()\n\n regex = r\"parentImage id.*\" # getting all the image names\n k = re.findall(regex, data)\n\n if len(k) == 2:\n regex = r\"\\\".*\\\"\" # getting the name\n image1 = re.findall(regex, k[0])[0]\n image2 = re.findall(regex, k[1])[0]\n\n image1 = re.sub(r\"\\\"\", \"\", image1)\n image2 = re.sub(r\"\\\"\", \"\", image2)\n\n image1 = image1.strip() + \".png\"\n image2 = image2.strip() + \".png\"\n im1.append(image1)\n im2.append(image2)\n\n comparison, indication, finding, impression = get_final(data)\n comparisons.append(comparison)\n indications.append(indication)\n findings.append(finding)\n impressions.append(impression)\n report.append(file) # xml file name\n\n elif len(k) < 2: # 如果一份报告链接的图片少于两张\n regex = r\"\\\".*\\\"\" # getting the name\n try: # if the exception is raised means no image file name was found\n image1 = re.findall(regex, k[0])[0]\n image1 = re.sub(r\"\\\"\", \"\", image1) # removing \"\n image2 = np.nan\n\n image1 = image1.strip() + \".png\"\n except:\n image1 = np.nan\n image2 = np.nan\n\n im1.append(image1)\n im2.append(image2)\n comparison, indication, finding, impression = get_final(data)\n comparisons.append(comparison)\n indications.append(indication)\n findings.append(finding)\n impressions.append(impression)\n report.append(file) # xml file name\n\n # if there are more than 2 images concerned with report\n # creat new datapoint with new image and same info\n else:\n comparison, indication, finding, impression = get_final(data)\n\n for i in range(len(k) - 1):\n regex = r\"\\\".*\\\"\" # getting the name\n image1 = re.findall(regex, k[i])[0] # re.findall returns a list\n image2 = re.findall(regex, k[i + 1])[0]\n\n image1 = re.sub(r\"\\\"\", \"\", image1) # removing \"\n image2 = re.sub(r\"\\\"\", \"\", image2) # removing \"\n\n image1 = image1.strip() + \".png\"\n image2 = image2.strip() + \".png\"\n\n im1.append(image1)\n im2.append(image2)\n comparisons.append(comparison)\n indications.append(indication)\n findings.append(finding)\n impressions.append(impression)\n report.append(file) # xml file name\n\n df = pd.DataFrame(\n {\"image_1\": im1, \"image_2\": im2, \"comparison\": comparisons, \"indication\": indications, \"findings\": findings,\n \"impression\": impressions, \"xml file name\": report})\n return df",
"def make_image_list(image_dir):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reshapes the Keras model weight as a vector. | def model_weights_as_vector(model):
weights_vector = []
for layer in model.layers: # model.get_weights():
if layer.trainable:
layer_weights = layer.get_weights()
for l_weights in layer_weights:
vector = numpy.reshape(l_weights, newshape=(l_weights.size))
weights_vector.extend(vector)
return numpy.array(weights_vector) | [
"def flatten_weights(model, numpy_output: bool = True):\n all_params = []\n for param in model.parameters():\n all_params.append(param.view(-1))\n all_params = torch.cat(all_params)\n if numpy_output:\n return all_params.cpu().detach().numpy()\n return all_params",
"def _reshape_weights(X, sample_weight, device='cpu'):\n\n\tif sample_weight is None:\n\t\tif torch.is_floating_point(X):\n\t\t\tsample_weight = torch.ones(1, device=device, \n\t\t\t\tdtype=X.dtype).expand(*X.shape)\n\t\telse:\n\t\t\tsample_weight = torch.ones(1, device=device, \n\t\t\t\tdtype=torch.float32).expand(*X.shape)\n\telse:\n\t\tif not torch.is_floating_point(sample_weight):\n\t\t\tsample_weight = sample_weight.type(torch.float32)\n\n\n\tif len(sample_weight.shape) == 1: \n\t\tsample_weight = sample_weight.reshape(-1, 1).expand(-1, X.shape[1])\n\t\t_check_parameter(sample_weight, \"sample_weight\", min_value=0)\n\n\telif sample_weight.shape[1] == 1:\n\t\tsample_weight = sample_weight.expand(-1, X.shape[1])\n\t\t_check_parameter(sample_weight, \"sample_weight\", min_value=0)\n\n\tif isinstance(X, torch.masked.MaskedTensor):\n\t\tif not isinstance(sample_weight, torch.masked.MaskedTensor):\n\t\t\tsample_weight = torch.masked.MaskedTensor(sample_weight, \n\t\t\t\tmask=X._masked_mask)\n\n\t_check_parameter(sample_weight, \"sample_weight\", shape=X.shape, \n\t\tndim=X.ndim)\n\treturn sample_weight",
"def normalize_weight(w):\n return w.numpy() / np.linalg.norm(w.numpy())",
"def getWeight(self):\n return np.concatenate([self.weight.ravel()] * 4)",
"def weight(self):\n vec = np.array([[reqt.weight for reqt in self.requirements]])\n return vec.T # Return as column vector",
"def normalize_weights(self):\n\n w = tf.reshape(self.w, [-1, self.w_shape[-1]])\n u = self.u\n\n with tf.name_scope(\"spectral_normalize\"):\n for _ in range(self.power_iterations):\n v = tf.math.l2_normalize(tf.matmul(u, tf.transpose(w)))\n u = tf.math.l2_normalize(tf.matmul(v, w))\n\n sigma = tf.matmul(tf.matmul(v, w), tf.transpose(u))\n\n self.w.assign(self.w / sigma)\n self.u.assign(u)",
"def normalize_weights(self):\n\n w = tf.reshape(self.w, [-1, self.w_shape[-1]])\n u = self.u\n\n with tf.name_scope(\"spectral_normalize\"):\n for _ in range(self.power_iterations):\n v = tf.math.l2_normalize(tf.matmul(u, w, transpose_b=True))\n u = tf.math.l2_normalize(tf.matmul(v, w))\n\n sigma = tf.matmul(tf.matmul(v, w), u, transpose_b=True)\n\n self.w.assign(self.w / sigma)\n self.u.assign(u)",
"def _reshape(self, x: torch.FloatTensor) -> torch.FloatTensor:\n new_shape = x.size()[:-1] + (self.n_graph, self.hidden_size)\n x = x.view(*new_shape)\n\n return x.permute(0, 2, 1, 3)",
"def change_layout(model_input):\n model_input = model_input.transpose((2, 0, 1))\n model_input = np.expand_dims(model_input, axis=0)\n return model_input",
"def update_kernel_weights(self):\n\n kernel_weights = []\n return_weights = None\n\n layer = self.rc.train_layer\n tensor_prev = self.model.tensors[layer-1]\n tensor = self.model.tensors[layer]\n neurons_post = tensor[2] / tensor_prev[2]\n\n if self.rc.train_layer:\n\n got = self._projections_stdp.get('weight', 'array')\n # reshape\n for kernel in range(neurons_post):\n kern = got[:, kernel]\n kernel_weights.append(kern)\n\n return_weights = np.array(kernel_weights) # shape e.g. (4,64)\n self.w_layer[layer] = np.array(kernel_weights)\n\n try:\n output = MODEL_PATH+\"w_layer_{}\".format(layer)\n number = self.model.layers[layer].kernels\n w = self.w_layer[layer].reshape((number,-1))\n np.savetxt(output, w, delimiter=',')\n except:\n self.rc.logging.error(\"Could not save weights of Layer {} to model/\".format(layer))\n\n else:\n self.rc.logging.error(\"Weights not received\")\n\n return return_weights",
"def _canonical_weight_shape(self, layer):\n if layer < 0 or layer >= self._num_layers:\n raise ValueError(\"\\'layer\\' is not valid, got %s, expecting [%d, %d]\" %\n (layer, 0, self._num_layers-1))\n if not self._input_size:\n raise RuntimeError(\n \"%s._canonical_weight_shape invoked before input shape is known\" %\n type(self).__name__)\n\n input_size = self._input_size\n num_units = self._num_units\n num_gates = self._num_params_per_layer // 2\n is_bidi = self._direction == CUDNN_RNN_BIDIRECTION\n\n if layer == 0:\n wts_applied_on_inputs = [(num_units, input_size)] * num_gates\n else:\n if is_bidi:\n wts_applied_on_inputs = [(num_units, 2 * num_units)] * num_gates\n else:\n wts_applied_on_inputs = [(num_units, num_units)] * num_gates\n wts_applied_on_hidden_states = [(num_units, num_units)] * num_gates\n tf_wts = wts_applied_on_inputs + wts_applied_on_hidden_states\n return tf_wts if not is_bidi else tf_wts * 2",
"def flatten(x):\n return tf.reshape(x, [-1])",
"def weights(self) -> np.ndarray:\n self._check_fitted()\n return np.asarray(self._fit_result.x)",
"def canonical_weight_shapes(self):\n if not self._input_size:\n raise RuntimeError(\n \"%s.canonical_weight_shapes invoked before input shape is known\" %\n type(self).__name__)\n\n shapes = []\n for i in range(self._num_layers):\n shapes.extend(self._canonical_weight_shape(i))\n return shapes",
"def windows(self) :\n w_data = self.vector[:-1]\n w_edge = self.window_edge\n return w_data.reshape( (len(self.CHANNELS),w_edge,w_edge) )",
"def get_weight(self):\n return self.graph_weights.reshape(self.size_graph_rows, self.size_graph_cols)",
"def preprocess_input(self, x):\n return x.reshape(x.shape[0], *self.input_shape[1:])",
"def factorize_model(model, layer_idx):\n return factorize_weight(model, layer_idx)",
"def model_vectors(self, model_dm, model_dbow, X, size=300):\n vecs_dm = self.gensim_vectors(model_dm, X, size)\n vecs_dbow = self.gensim_vectors(model_dbow, X, size)\n\n vecs = np.hstack((vecs_dm, vecs_dbow))\n return vecs"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates the initial population of the genetic algorithm as a list of networks' weights (i.e. solutions). Each element in the list holds a different weights of the Keras model. The method returns a list holding the weights of all solutions. | def create_population(self):
model_weights_vector = model_weights_as_vector(model=self.model)
net_population_weights = []
net_population_weights.append(model_weights_vector)
for idx in range(self.num_solutions-1):
net_weights = copy.deepcopy(model_weights_vector)
net_weights = numpy.array(net_weights) + numpy.random.uniform(low=-1.0, high=1.0, size=model_weights_vector.size)
# Appending the weights to the population.
net_population_weights.append(net_weights)
return net_population_weights | [
"def compute_weights(self) -> list:\n weights = []\n for num in self.population:\n # Our purpose: find x with fitness value near 0 as much as possible\n # So if abs(x) is large, negative of it (weight) will be small\n weights.append(0 - abs(self.equation(num+self.offset))) # abs to find x near 0\n return weights",
"def _create_all_weights(self, var_list):\n\n _ = self.iterations\n self._create_hypers()\n self._create_slots(var_list)",
"def init_weights(self):\r\n if self.init_seed:\r\n np.random.seed(self.init_seed)\r\n\r\n weights_list = []\r\n biases_list = []\r\n\r\n for layer in range(self.nb_layers):\r\n new_W = np.random.randn(self.K_list[layer], self.K_list[layer + 1])\r\n new_b = np.zeros(self.K_list[layer + 1])\r\n weights_list.append(new_W)\r\n biases_list.append(new_b)\r\n\r\n self.weights_list = weights_list\r\n self.biases_list = biases_list",
"def default_weight_initializer(self):\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\n self.weights = [np.random.randn(y, x)/np.sqrt(x)\n for x, y in zip(self.sizes[:-1], self.sizes[1:])]",
"def create_population(self, count):\n\n pop = []\n for _ in range(count):\n # Create a random network.\n network = Network(self.nn_param_choices)\n network.create_random()\n\n # Add the network to our population.\n pop.append(network)\n\n return pop",
"def init_weights(self):\r\n self.weights = [0 for i in range(len(self.inputs[0][0]))]",
"def create_initial_solutions(self) -> List[FloatSolution]:\n population = [self.problem.create_solution() for x in range(self.each_species_size * self.no_species)]\n\n for i in range(len(population)):\n population[i].species_index = i % self.no_species\n \n return population",
"def randInitializeWeights(layers):\n # numbers in each layer\n nel = (layers[:-1]+1)*layers[1:]\n nel = nel.astype('int')\n \n # the init apmlitudes for each layer\n epsilon_init = np.repeat(efun(layers[:-1], layers[1:]),nel)\n \n # the init weights for each neuron\n w = (2*np.random.uniform(size = sum(nel))-1)*epsilon_init\n \n return w",
"def _generate_weights(self, models: List[nn.Module]) -> List[Tensor]:\r\n X = []\r\n for model in models:\r\n X.append(self._generate_coords(model))\r\n\r\n return X",
"def get_weights(self, ):\n return [w for l in self.weights for w in l.flat]",
"def initialize_weights(self,seed=None):\r\n if seed != None: # using seed to initialize the weights if the seed is given\r\n np.random.seed(seed)\r\n\r\n self.weights=[] \r\n self.weights=np.random.randn(self.number_of_nodes,self.input_dimensions+1) #initialize the weights using random number\r\n return None",
"def generate_weights(nr_neurons, syn_placement_uniform, \\\n l23_5_weights_e, l23_5_weights_i, l23_5_weights_std, dendrites):\n weights = []\n\n prob_length = np.array([section.L for section in dendrites])\n prob_length = prob_length / sum(prob_length)\n\n for stype in ['AMPA', 'NMDA', 'GABAa', 'GABAb']:\n\n nr_neurons_type = nr_neurons[stype]\n for _ in range(nr_neurons_type):\n\n weight = {}\n weight['name'] = stype\n weight['loc'] = np.random.choice(range(len(dendrites)), p=prob_length)\n weight['loc_name'] = dendrites[weight['loc']].name()\n weight['place'] = np.random.uniform(syn_placement_uniform[0], syn_placement_uniform[1])\n\n if stype in ['AMPA', 'NMDA']:\n # generate excitatory synaptic weight\n l23_5weight = np.random.lognormal(l23_5_weights_e, l23_5_weights_std)\n else:\n # generate inhibitory synaptic weight\n l23_5weight = np.random.lognormal(l23_5_weights_i, l23_5_weights_std)\n\n weight['weight'] = l23_5weight\n weights.append(weight)\n\n return weights",
"def _initialize_weights(self):\n self.weights = np.random.randn(self.number_of_classes,self.input_dimensions+1)",
"def build_weights(self) -> Dict[object, float]:\n self.build()\n\n self._weights = [np.random.rand() for x in range(0, self.n)]\n return dict(zip(self.build_property, self.weights))",
"def generate(generations, population, nn_param_choices, dataset):\n optimizer = Optimizer(nn_param_choices)\n networks = optimizer.create_population(population)\n # Evolve the generation.\n for i in range(generations):\n logging.info(\"***Doing generation %d of %d***\" %\n (i + 1, generations))\n # Train and get accuracy for networks.\n train_networks(networks)\n\n # Evolve, except on the last iteration.\n if i != generations - 1:\n # Do the evolution.\n networks = optimizer.evolve(networks)\n print(networks)\n\n # Sort our final population.\n networks = sorted(networks, key=lambda x: x.loss, reverse=False)\n\n # Print out the top 5 networks.\n print_networks(networks[:5])",
"def init_weights(self):\r\n default_init_weights(self, 1)",
"def initialize_individual():\r\n individual = []\r\n for bound_region in bounds:\r\n last_weight = bound_region[-1][1]+1\r\n while(last_weight<bound_region[-1][0] or last_weight>bound_region[-1][1]):\r\n weights = [np.random.uniform(bound[0], bound[1], 1)[0] for bound in bound_region[:-1]]\r\n last_weight = 1-sum(weights)\r\n weights.append(last_weight)\r\n individual.append(weights)\r\n return individual",
"def weights ( self ) :\n N = len ( self ) \n return array ( 'd' , ( self.weight ( i ) for i in range ( N ) ) )",
"def generate_parameters(tune_weights, weight_lower, weight_upper, weight_step,\n tc_lower, tc_upper, tc_step,\n pop_lower, pop_upper, pop_step,\n cp_lower, cp_upper, cp_step,\n mp_lower, mp_upper, mp_step,\n ts_lower, ts_upper, ts_step,\n param_sets_numb):\n\n parameter_sets = [] # list of param_sets_numb parameter sets\n for i in range(0, param_sets_numb):\n if tune_weights: # if weight tuning is on\n weight = random.randrange(weight_lower, weight_upper + 1, weight_step) / 100 # generate weight\n else:\n weight = weight_lower\n tc = random.randrange(tc_lower, tc_upper + 1, tc_step) # iterations\n pop = random.randrange(pop_lower, pop_upper+1, pop_step) # population size\n cp = random.randrange(cp_lower, cp_upper+1, cp_step) / 100 # crossover probability\n mp = random.randrange(mp_lower, mp_upper+1, mp_step) / 100 # mutation probability\n ts = random.randrange(ts_lower, ts_upper+1, ts_step) / 100 # tournament size\n while [tc, pop, cp, mp, ts] in parameter_sets: # if parameter set is already listed\n if tune_weights:\n weight = random.randrange(weight_lower, weight_upper + 1, weight_step) / 100\n tc = random.randrange(tc_lower, tc_upper + 1, tc_step)\n pop = random.randrange(pop_lower, pop_upper + 1, pop_step)\n cp = random.randrange(cp_lower, cp_upper + 1, cp_step) / 100\n mp = random.randrange(mp_lower, mp_upper + 1, mp_step) / 100\n ts = random.randrange(ts_lower, ts_upper + 1, ts_step) / 100\n parameter_sets.append([weight, tc, pop, cp, mp, ts]) # add set to list\n\n return parameter_sets",
"def get_weights(self):\n\n weights = []\n for layer in self.NN:\n for node in layer:\n for weight in node.weights:\n weights.append(weight)\n return weights"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Class which sets up the google client and the environment. If "audio_length" is received (so it is different than 0) that means that we the user is listening to an audio and we only care about "stop" and "end" commands. If by the time the audio finishes none of those commands have been said by the user, the the mic stops listening and keeps executing the main program. | def listen_command(self, keyword_bool, audio_length = 0):
# we calculate current time (approximate time at which the audio starts)
audio_init_time = time.time()
with MicrophoneStream.MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator(audio_init_time, audio_length)
requests = (types.StreamingRecognizeRequest(audio_content=content) for content in audio_generator)
####Uncomment if this sound wants to be implemmented
#if we are ready to listen to the real commands because the keyword has been listened to, then we give a cue to the user with a sound.
# I commented it because now there is a VUI so the user knows the mic listened thanks to the voiced answer.
# if (keyword_bool == false):
# time.sleep(0.1) #para que suene el pitido ya que daba problemas cuando hecho por solo consola headless
# os.system('mpg321 -l 3 ../audioskorean/bips/beep-29.mp3 > /dev/null 2>&1 &')
responses = self.client.streaming_recognize(self.streaming_config, requests)
#function that print the results and exit if keyword appears
transcript = listen_print_loop(responses, self.language_code, keyword_bool, audio_length)
if (transcript):
command = transcript.replace(u" ",u"")
else:
command = ""
return command | [
"def start_audio(self):\n raylibpy.init_audio_device()",
"def initiate_Pepper(self):\n\n # starts the recognizer\n r = sr.Recognizer()\n\n with sr.Microphone() as source:\n\n while True:\n logger.debug(\"Awaiting user input.\")\n audio = r.listen(source)\n\n logger.debug(\"Interpreting user input.\")\n\n # Speech recognition using Google Speech Recognition\n try:\n result = r.recognize_google(audio)\n #result = r.recognize_sphinx(audio)\n\n self.handle_action(result)\n\n except sr.UnknownValueError:\n logger.debug(\"Could not understand audio\")\n #Pepper.speak(\"I'm sorry, but I couldn't understand what you said.\")\n except sr.RequestError as e:\n logger.warn(\"Could not request results from Google Speech Recognition service: %s\", e)\n except Exception as e:\n logger.error(\"Could not process text: %s\", e)",
"def start_listening(self):\n # obtain audio from the microphone\n recognizer = sr.Recognizer()\n with sr.Microphone() as source:\n PyElant.printv(self.verbose, \"Say something\")\n audio = recognizer.listen(source)\n\n # recognize speech using Google Speech Recognition\n try:\n self.text = recognizer.recognize_google(audio, language=self.input_language)\n PyElant.translate_text(self)\n except sr.UnknownValueError:\n PyElant.printv(self.verbose, \"Speech Recognition could not understand audio\")\n except sr.RequestError as error:\n PyElant.printv(self.verbose, \"Could not request results from Speech Recognition service; {0}\".format(error))",
"def __init__(self, cfg, commands, audio_in, asr_hypotheses_out, close_event):\n\n multiprocessing.Process.__init__(self)\n\n self.cfg = cfg\n self.commands = commands\n self.local_commands = deque()\n self.audio_in = audio_in\n self.local_audio_in = deque()\n self.asr_hypotheses_out = asr_hypotheses_out\n self.close_event = close_event\n\n # Load the ASR\n self.asr = asr_factory(cfg)\n\n self.system_logger = self.cfg['Logging']['system_logger']\n self.session_logger = self.cfg['Logging']['session_logger']\n\n self.recognition_on = False",
"def my_recognize_google_cloud(self, audio_data, credentials_json=None, language=\"en-US\", preferred_phrases=None,\n show_all=False):\n assert isinstance(audio_data, AudioData), \"``audio_data`` must be audio data\"\n assert isinstance(language, str), \"``language`` must be a string\"\n assert preferred_phrases is None or all(\n isinstance(preferred_phrases, (type(\"\"), type(u\"\"))) for preferred_phrases in\n preferred_phrases), \"``preferred_phrases`` must be a list of strings\"\n\n # See https://cloud.google.com/speech/reference/rest/v1/RecognitionConfig\n flac_data = audio_data.get_flac_data(\n convert_rate=None if 8000 <= audio_data.sample_rate <= 48000 else max(8000, min(audio_data.sample_rate, 48000)),\n # audio sample rate must be between 8 kHz and 48 kHz inclusive - clamp sample rate into this range\n convert_width=2 # audio samples must be 16-bit\n )\n\n try:\n #from oauth2client.client import GoogleCredentials\n from googleapiclient.discovery import build\n import googleapiclient.errors\n import google.auth\n from google.oauth2 import service_account\n # cannot simply use 'http = httplib2.Http(timeout=self.operation_timeout)'\n # because discovery.build() says 'Arguments http and credentials are mutually exclusive'\n import socket\n import googleapiclient.http\n if self.operation_timeout and socket.getdefaulttimeout() is None:\n # override constant (used by googleapiclient.http.build_http())\n googleapiclient.http.DEFAULT_HTTP_TIMEOUT_SEC = self.operation_timeout\n\n if credentials_json is None:\n api_credentials = google.auth.default()\n else:\n api_credentials = service_account.Credentials.from_service_account_file(credentials_json)\n # the credentials can only be read from a file, so we'll make a temp file and write in the contents to work around that\n #with PortableNamedTemporaryFile(\"w\") as f:\n # f.write(credentials_json)\n # f.flush()\n # api_credentials = GoogleCredentials.from_stream(f.name)\n\n speech_service = build(\"speech\", \"v1\", credentials=api_credentials, cache_discovery=False)\n except ImportError:\n raise RequestError(\n \"missing google-api-python-client module: ensure that google-api-python-client is set up correctly.\")\n\n speech_config = {\"encoding\": \"FLAC\", \"sampleRateHertz\": audio_data.sample_rate, \"languageCode\": language}\n if preferred_phrases is not None:\n speech_config[\"speechContexts\"] = [{\"phrases\": preferred_phrases}]\n if show_all:\n speech_config[\"enableWordTimeOffsets\"] = True # some useful extra options for when we want all the output\n request = speech_service.speech().recognize(\n body={\"audio\": {\"content\": base64.b64encode(flac_data).decode(\"utf8\")}, \"config\": speech_config})\n\n try:\n response = request.execute()\n except googleapiclient.errors.HttpError as e:\n raise RequestError(e)\n except URLError as e:\n raise RequestError(\"recognition connection failed: {0}\".format(e.reason))\n\n if show_all: return response\n if \"results\" not in response or len(response[\"results\"]) == 0: raise UnknownValueError()\n transcript = \"\"\n averageConfidence = 0\n numberOfTranscripts = 0\n for result in response[\"results\"]:\n transcript += result[\"alternatives\"][0][\"transcript\"].strip() + \" \"\n averageConfidence += result[\"alternatives\"][0][\"confidence\"]\n numberOfTranscripts += 1\n\n averageConfidence /= numberOfTranscripts\n return {\n 'transcript': transcript,\n 'confidence': averageConfidence\n }",
"def processCommand():\r\n command = source1.Recognizer()\r\n with source1.Microphone() as source:\r\n print(\"Listening...\")\r\n command.pause_threshold = 1.5\r\n audio = command.listen(source)\r\n\r\n # Executing Query\r\n try:\r\n print('Recognizing...')\r\n query = command.recognize_google(audio, language='en-us')\r\n print(f\"You said; {query}\\n\")\r\n\r\n except Exception as error:\r\n return 'None'\r\n\r\n return query",
"def take_input(self):\n with sr.Microphone() as source:\n print(\"Listening...\")\n audio = self.voice_recogniser.listen(source)\n print(\"Recognising...\")\n\n try:\n audio_input = self.voice_recogniser.recognize_google(audio)\n print(\"Heard: \\\"{}\\\"\".format(audio_input))\n return audio_input.lower()\n\n except sr.UnknownValueError:\n print(\"Could not understand audio.\")\n return False\n\n except sr.RequestError as e:\n print(\n \"Could not request results from Google Speech Recognition service; {0}\".format(e))\n return False\n\n except KeyError:\n return False",
"def listen_for_speech(self , num_phrases = -1):\n\n\t\t#Open stream\n\t\tself.s = connection.Connection()\n\t\t\n\t\tsilence_thread = Thread(target=self.silence_analysys)\n\t\tsilence_thread.start()\n\n\t\tp = pyaudio.PyAudio()\n\n\t\tstream = p.open(format = self.FORMAT,\n\t\t\t\t\t\tchannels = self.CHANNELS,\n\t\t\t\t\t\trate = self.RATE,\n\t\t\t\t\t\tinput = True,\n\t\t\t\t\t\tframes_per_buffer = self.CHUNK,\n\t\t\t\t\t\tstream_callback = self.callback)\n\t\ttry:\n\n\t\t\tstream.start_stream()\n\n\t\t\twhile stream.is_active():\n\t\t\t\ttime.sleep(0.1)\n\t\t\t\n\t\texcept KeyboardInterrupt:\n\t\t\tself.log.debug(\"INTERRUPTED BY USER: Finished\")\n\t\texcept Exception as e:\n\t\t\tself.log.debug('ERROR: ' + str(e))\n\t\tfinally:\n\t\t\tself.audioQueue.put(None)\n\t\t\tself.s.destroy()\n\t\t\tself.log.info(\"* Done recording\")\n\n\t\t\tstream.stop_stream()\n\t\t\tstream.close()\n\t\t\tp.terminate()",
"def __init__(self, bot):\n self.user = None # This lets us check music permissions for a given user\n self.server = None # Info on a given server\n\n self.bot = bot # Lets us use the bot in various other parts of the bot to access information like the voice state of the bot\n self.players = {} # Each servers' music player is stored here so we can get information on the current song, pause, play, ect while being server exclusive\n self.queues = {} # Each servers' Queue object is stored here\n self.play_status = {} # Stores information on if the server will play music or not, changed with any play commands and the stop command\n\n self.config = json.load(open(self.bot.home_dir + '/config/config.json'))\n\n self.FailEmbed = int(self.config[\"Embeds Colors\"][\"Fail Embed\"], 16)\n self.SuccessEmbed = int(self.config[\"Embeds Colors\"][\"Success Embed\"], 16)\n self.VoteEmbed = int(self.config[\"Embeds Colors\"][\"Vote Embed\"], 16)",
"def _SetupAndStart(self):\n self._SetupEnvVars()\n\n # Sometimes goma is lingering around if something went bad on a previous\n # run. Stop it before starting a new process. Can ignore the return code\n # since it will return an error if it wasn't running.\n self._Stop()\n\n if subprocess.call([self._abs_path_to_goma_file, 'start']):\n raise RuntimeError('GOMA failed to start.')",
"def main(args):\n # Check if the output directory exists.\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n # List all audio files in the bucket.\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(gcloud.config.BUCKET_NAME)\n blobs = bucket.list_blobs()\n # `blobs` is a list of Google blob objects. We need to extract filenames.\n original_filenames = [b.name for b in blobs]\n\n # Create a single Google API client and configuration to reuse.\n # For a list of configuration options, see the Google Speech API documentation:\n # https://cloud.google.com/speech-to-text/docs/word-confidence\n client = speech.SpeechClient()\n rc = speech.types.RecognitionConfig(\n encoding=speech.enums.RecognitionConfig.AudioEncoding.FLAC,\n sample_rate_hertz=16000,\n language_code=\"en-US\",\n enable_word_confidence=True,\n enable_word_time_offsets=True,\n enable_speaker_diarization=True,\n diarization_speaker_count=2,\n model=\"video\",\n )\n\n # Skip already completed files.\n filenames: List[str] = []\n for filename in original_filenames:\n output_fqn = os.path.join(\n args.output_dir, filename.replace(\".flac\", \".json\")\n )\n if os.path.exists(output_fqn):\n continue\n else:\n filenames.append(filename)\n\n print(f\"Saving json output to: {args.output_dir}\")\n print(\n f\"Transcribing {len(filenames)} files from bucket: {gcloud.config.BUCKET_NAME}\"\n )\n for filename in tqdm(filenames):\n # Run ASR.\n audio = speech.types.RecognitionAudio(\n uri=f\"gs://{gcloud.config.BUCKET_NAME}/{filename}\"\n )\n ret = transcribe(client, rc, audio)\n\n # Save the output to json.\n with open(output_fqn, \"w\") as pointer:\n json.dump(ret, pointer, indent=2, separators=(\",\", \": \"))",
"def on_startup(conf: Mapping):\n\n async def startup_handler(app):\n \"\"\"Run all initialization tasks.\n\n These are tasks that should be run after the event loop has been started but before the HTTP\n server has been started.\n \"\"\"\n # Pull configurations\n joke_url = conf[\"jokes_external_api_url\"]\n telnyx_api_key = conf[\"telnyx_api_key\"]\n telnyx_connection_id = conf[\"telnyx_connection_id\"]\n src_number = conf[\"src_number\"]\n\n # Setup client session\n client_session = aiohttp.ClientSession()\n\n # Setup Telnyx settings\n telnyx.api_key = telnyx_api_key\n\n # Setup the jinga template for the front-end webpage\n templates_dir = Path(conf[\"templates_dir\"]).resolve()\n aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(str(templates_dir)))\n\n # Setup the Call Control App\n call_control_app = CallControl(\n client_session, telnyx, telnyx_connection_id, joke_url, src_number\n )\n\n # Setup the Call Schedule\n call_scheduler = UnifiedTimedQueue(call_control_app)\n\n # Register App dependencies\n # These will be accessible via the Request object\n app[constants.SCHEDULER] = call_scheduler\n app[constants.TELNYX] = telnyx\n app[constants.CALL_CONTROL_APP] = call_control_app\n\n # Define required cleanup\n async def cleanup(app):\n \"\"\"Perform required cleanup on shutdown\"\"\"\n await client_session.close()\n\n app.on_shutdown.append(cleanup)\n\n return startup_handler",
"def input_command():\r\n\r\n r = sr.Recognizer()\r\n m = sr.Microphone()\r\n with m as source:\r\n r.adjust_for_ambient_noise(source)\r\n with m as source:\r\n print('listening...')\r\n audio = r.listen(source)\r\n\r\n try:\r\n print('recognizing...')\r\n query = r.recognize_google(audio)\r\n print(f\"\\\"{query}\\\"\")\r\n except Exception as e:\r\n print(e)\r\n speak(\"Sorry!! Didn't catch that\")\r\n query = input_command()\r\n return query",
"def __init__(self, application):\r\n sys.path.append('.')\r\n\r\n # create a store of services\r\n self.services = service.IServiceCollection(application)\r\n self.amp_protocol = None # set by amp factory\r\n self.sessions = SESSIONS\r\n self.sessions.server = self\r\n\r\n # Database-specific startup optimizations.\r\n self.sqlite3_prep()\r\n\r\n # Run the initial setup if needed\r\n self.run_initial_setup()\r\n\r\n self.start_time = time.time()\r\n\r\n # initialize channelhandler\r\n channelhandler.CHANNELHANDLER.update()\r\n\r\n # set a callback if the server is killed abruptly,\r\n # by Ctrl-C, reboot etc.\r\n reactor.addSystemEventTrigger('before', 'shutdown',\r\n self.shutdown, _reactor_stopping=True)\r\n\r\n self.game_running = True\r\n\r\n self.run_init_hooks()",
"def main():\n play_to_client(\"flight_number_ask.wav\")\n check_valid_flight_number(speech_to_text(\"passenger_flight_number.wav\"))\n record_flight_number(\"passenger_flight_number.wav\")\n make_csv()\n print(analyze())\n subprocess.call(\"node firebase.js\")\n negative_response = make_csv().split(\";\")[-1] <= 5\n if negative_response:\n twilio_call(\"+12674750425\", \"+12674607556\",\n \"Thank you for your feedback.\\n We are sorry you had this issue.\"\n \"We value our customers and are looking into this matter.\\n\"\n \"Thank you! \\n\")\n else:\n twilio_call(\"+12674750425\", \"+12674607556\",\n \"Thank you for your feedback!\\n We are glad that your experience was enjoyable.\"\n \"The people responsible will be thanked on our behalf\\n\"\n \"Thank you! \\n\")",
"def __init__(self):\r\n # prepare client\r\n self.client = MPDClient()\r\n self.client.timeout = None\r\n self.client.idletimeout = None\r\n self.client.connect(\"localhost\", 6600)\r\n\r\n # gather playlists\r\n self.playlists = [x[\"playlist\"] for x in self.client.listplaylists()]\r\n\r\n # gather songs\r\n self.client.clear()\r\n for playlist in self.playlists:\r\n self.client.load(playlist)\r\n\r\n soup = self.client.playlist()\r\n for i in range(0, len(soup) / 10):\r\n index = i * 10\r\n id = soup[index].strip()\r\n title = soup[index + 3].strip().upper()\r\n artist = soup[index + 2].strip().upper()\r\n album = soup[index + 4].strip().upper()\r\n\r\n self.songs.append(Song(id, title, artist, album))\r\n\r\n self.song_titles.append(title)\r\n self.song_artists.append(artist)",
"def check_music_settings(self):\n self.gstMusic = None\n self.gstSound = None\n if gst_media_imported:\n try: \n if self.music_enabled:\n self.gstMusic = gst_media.MusicPlayer()\n if self.sound_enabled:\n self.gstSound = gst_media.SoundPlayer()\n except:\n # Playbin object creation failed\n self.log_msg('Warning: Failed to create Music gstreamer objects','0')\n return\n # Check dir\n if not os.path.isdir(self.wahcade_ini.get('music_path')):\n self.log_msg('Error: Music Path [%s] does not exist' % (self.musicpath))\n return\n # Set dir\n tracks = self.gstMusic.set_directory(self.musicpath, MUSIC_FILESPEC)\n # Set volume\n self.gstMusic.set_volume(self.music_vol)\n # Play\n if len(tracks) > 0:\n self.gstMusic.load_playlist(\n playlist = tracks,\n play = True,\n shuffle = self.musicshuffle)",
"def initialize(self, vars):\n self.initvars(vars)\n self.parent_conn, child_conn = mp.Pipe()\n self.p = mp.Process(target=self.deviceloop, args=(child_conn,))\n self.p.start()",
"def test_no_play(self):\n text = \"This is a test.\"\n self.client.subscribe(TtsSay.topic())\n self.client.subscribe(AudioPlayBytes.topic(site_id=self.site_id))\n self.client.subscribe(TtsSayFinished.topic())\n self.client.subscribe(AudioToggleOff.topic())\n self.client.subscribe(AudioToggleOn.topic())\n\n response = requests.post(\n self.api_url(\"text-to-speech\"),\n data=text,\n params={\n \"siteId\": self.site_id,\n \"sessionId\": self.session_id,\n \"play\": \"false\",\n },\n )\n self.check_status(response)\n\n wav_data = response.content\n self.assertGreater(len(wav_data), 0)\n\n # Check audioServer/toggleOff\n audio_off_msg = self.mqtt_messages.get(timeout=5)\n self.assertTrue(AudioToggleOff.is_topic(audio_off_msg.topic))\n\n audio_off = AudioToggleOff.from_dict(json.loads(audio_off_msg.payload))\n self.assertEqual(audio_off.site_id, self.site_id)\n\n # Check tts/say\n tts_say_msg = self.mqtt_messages.get(timeout=5)\n self.assertTrue(TtsSay.is_topic(tts_say_msg.topic))\n\n tts_say = TtsSay.from_dict(json.loads(tts_say_msg.payload))\n self.assertEqual(tts_say.site_id, self.site_id)\n self.assertEqual(tts_say.session_id, self.session_id)\n self.assertEqual(tts_say.text, text)\n\n # Check audioServer/playBytes (will be ignored by audio output system)\n play_bytes_msg = self.mqtt_messages.get(timeout=5)\n self.assertTrue(AudioPlayBytes.is_topic(play_bytes_msg.topic))\n self.assertEqual(AudioPlayBytes.get_site_id(play_bytes_msg.topic), self.site_id)\n self.assertEqual(play_bytes_msg.payload, wav_data)\n\n # Check tts/sayFinished\n tts_finished_msg = self.mqtt_messages.get(timeout=5)\n self.assertTrue(TtsSayFinished.is_topic(tts_finished_msg.topic))\n\n tts_finished = TtsSayFinished.from_dict(json.loads(tts_finished_msg.payload))\n self.assertEqual(tts_finished.site_id, self.site_id)\n self.assertEqual(tts_finished.session_id, self.session_id)\n\n # Check audioServer/toggleOn\n audio_on_msg = self.mqtt_messages.get(timeout=5)\n self.assertTrue(AudioToggleOn.is_topic(audio_on_msg.topic))\n\n audio_on = AudioToggleOn.from_dict(json.loads(audio_on_msg.payload))\n self.assertEqual(audio_on.site_id, self.site_id)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a usergiven position to an usable one. Position can be either a tuple of x and y or an int. | def _position(self, input_pos: (int, int) or int) -> (int, int):
if type(input_pos) == tuple:
return input_pos
if type(input_pos) == int:
return input_pos % self.width, input_pos // self.width
raise IndexError | [
"def convert_coordinates(coord):\n if len(coord) != 2:\n raise ValueError(\"Invalid coordinate %s\" % coord)\n column = ord(coord.lower()[0]) - 97\n if not 0 <= column <= 7:\n raise ValueError(\"Invalid coordinate %s\" % coord)\n try:\n row = int(coord[1]) - 1\n except ValueError:\n raise ValueError(\"Invalid coordinate %s\" % coord)\n if not 0 <= row <= 7:\n raise ValueError(\"Invalid coordinate %s\" % coord)\n return (column, row)",
"def position(self, value):\n if type(value) != tuple:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n elif len(value) != 2:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n elif type(value[0]) != int or type(value[1]) != int:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n elif len(value) != 2 or value[0] < 0 or value[1] < 0:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n self.__position = value",
"def pos_to_coord(pos):\n x, y = pos\n return \"%s%s\" % (string.letters[x], string.letters[y])",
"def __check_pos__(self, position):\n if (type(position) != tuple or len(position) != 2 or\n type(position[0]) != int or type(position[1]) != int or\n position[0] < 0 or position[1] < 0):\n # otherwise, raise a TypeError\n raise TypeError(\"position must be a tuple of 2 positive integers\")",
"def pos_to_coord(self, pos: int) -> (int, int):\n return pos // BOARD_DIM, pos % BOARD_DIM",
"def pos_to_tuple(self):\n return self.pos.x, self.pos.y",
"def _convertPositions(pos, func):\n try:\n # Check for PositionD or PositionI:\n if isinstance(pos,galsim.PositionD) or isinstance(pos,galsim.PositionI):\n return ( np.array([pos.x], dtype='float'),\n np.array([pos.y], dtype='float') )\n\n # Check for list of PositionD or PositionI:\n # The only other options allow pos[0], so if this is invalid, an exception \n # will be raised and appropriately dealt with:\n elif isinstance(pos[0],galsim.PositionD) or isinstance(pos[0],galsim.PositionI):\n return ( np.array([p.x for p in pos], dtype='float'),\n np.array([p.y for p in pos], dtype='float') )\n\n # Now pos must be a tuple of length 2\n elif len(pos) != 2:\n raise TypeError() # This will be caught below and raised with a better error msg.\n\n # Check for (x,y):\n elif isinstance(pos[0],float):\n return ( np.array([pos[0]], dtype='float'),\n np.array([pos[1]], dtype='float') )\n\n # Only other valid option is ( xlist , ylist )\n else:\n return ( np.array(pos[0], dtype='float'),\n np.array(pos[1], dtype='float') )\n\n except:\n raise TypeError(\"Unable to parse the input pos argument for %s.\"%func)",
"def pos2Coord(pos):\n # transform a solution value into a 2 components position\n return (self._shuffle[pos] % self._width * 5,\n self._shuffle[pos] / self._height * 5)",
"def _xy(self, pos, update):\n x = pos[0] + update[0]\n y = pos[1] + update[1]\n assert 0 <= x < self.shape[0], f\"Coordinate x out of bound: {x}\"\n assert 0 <= y < self.shape[1], f\"Coordinate y out of bound: {y}\"\n return (x,y)",
"def get_location_of_play():\n location_string = input('enter the location of your move as a tuple')\n location_tuple = tuple(int(x) for x in location_string.split(\",\"))\n while location_tuple[0] > 3 or location_tuple[0] < 0 or location_tuple[1] > 3 or location_tuple[1] < 0:\n location_string = input(\n 'ERROR: location tuple has to be between [1,1] to [3,3], please enter new - valid location')\n location_tuple = tuple(int(x) for x in location_string.split(\",\"))\n return location_tuple",
"def pixel_to_position(self, pixel):\n x, y = pixel\n return y // LENGTH, x // LENGTH",
"def _string_location (self, point):\n\n #Remove brackets\n point = point.replace (\"(\", \"\")\n point = point.replace (\")\", \"\")\n\n #Parse the location string\n loc = point.split (',')\n\n #Returns tuple\n try:\n return (float (loc[1]), float (loc[0]))\n except IndexError:\n return ((0, 0))",
"def convert_coord(plot, axis, to_system, coord):\n to_system = int(to_system)\n from_system = 2 - to_system\n from_name = axis.upper() + (\"2\" if from_system == 2 else \"\")\n to_name = axis.upper() + (\"2\" if to_system == 2 else \"\")\n from_min = get_var(plot, \"GPVAL_%s_MIN\" % from_name, float)\n from_max = get_var(plot, \"GPVAL_%s_MAX\" % from_name, float)\n to_min = get_var(plot, \"GPVAL_%s_MIN\" % to_name, float)\n to_max = get_var(plot, \"GPVAL_%s_MAX\" % to_name, float)\n if None not in (from_min, from_max, to_min, to_max):\n return to_min + (to_max - to_min) * \\\n (coord - from_min) / (from_max - from_min)\n else:\n return None",
"def _coords(self, x, y):\n return y, x * 2",
"def _SouthPosition(self,position):\n return (position[0]+1,position[1])",
"def __create_coord(self, position, representation, args):\n if len(position) < 2:\n raise CoordinateError(\"You need at least two coordinates\")\n if representation == 'unitspherical':\n return self.__create_unitspherical_coord(position, args)\n elif representation == 'spherical':\n return self.__create_spherical_coord(position, args)\n elif representation == 'cartesian':\n return self.__create_cartesian_coord(position, args)\n else:\n raise RepresentationError(\"The representation {0} is not yet supported\".format(self.repr))",
"def position(self):\n\t\t\n\t\treturn tuple(self._position)",
"def position_to_field(pos, inverted):\n if inverted:\n row = int(pos[1] / 80)\n col = int((640 - pos[0]) / 80)\n else:\n row = int((640 - pos[1]) / 80)\n col = int(pos[0] / 80)\n return (row, col)",
"def mov_pos_x_y(self, str, sx, sy):\n x,y = self.integerize(sx), self.integerize(sy)\n l = list(str)\n del l[x]\n l.insert(y, str[x])\n return ''.join(l)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Maps evaluation of `self[position]` to the matrix. | def __getitem__(self, position: (int, int) or int):
x, y = self._position(position)
return self.__matrix[y][x] | [
"def _matrix_(self):\n return self.to_matrix()",
"def get_mapping_matrix(self):\r\n return self.matrix",
"def __setitem__(self, position: (int, int) or int, value):\n x, y = self._position(position)\n\n self.__matrix[y][x] = value",
"def __getitem__(self, index):\n return self.__matrix[index]",
"def map_mat(fn, mat):\n res_mat = Mat(mat.cols, mat.rows)\n for (x, y), v in mat:\n res_mat.set_cell(x, y, fn(v))\n return res_mat",
"def get_matrix(self):\n return self.mvp",
"def get_position(self, matrix):\n return v3.transform(matrix, self.position)",
"def global_to_local(self, pos):\n matrix = self.obj.matrix_world.inverted()\n return matrix * pos",
"def __mul__(self, matrix):",
"def __getitem__(self, i: 'int') -> \"SbMatrix const &\":\n return _coin.SoMFMatrix___getitem__(self, i)",
"def _compute_matrix_profile(self):\n raise NotImplementedError",
"def test_indexing(self, matrices):\n # Test __getitem__ functionality\n n = matrices.n\n square_mat = chap5.Matrix(matrices.square)\n assert square_mat[(0, 0)] == 1\n # Test __setitem__ functionality\n square_mat[(n-1, n-1)] = -99\n assert square_mat[(n-1, n-1)] == -99",
"def _get_matrix(self):\n for row in self.active_sheet.rows:\n row_container = []\n for cell in row:\n row_container.append(cell.value)\n self.matrix.append(tuple(row_container))",
"def __setitem__(self, *args):\n return _coin.SoMFMatrix___setitem__(self, *args)",
"def matrix_transform(coords, matrix):\n return ProjectiveTransform(matrix)(coords)",
"def __fill_pos_matrix(self) -> None:\n n = self.n\n pos_matrix = []\n for i in range(n**2):\n j = i % n\n x = 1.5 * j\n if i in self.sub_matrix[0]:\n x += 0.5\n y = np.sqrt(3)/2 * (n - (i // n) - 1)\n pos_matrix.append([x, y])\n\n self.pos_matrix = np.asarray(pos_matrix)",
"def mul(self, matrix):",
"def __setitem__(self, i: 'int', value: 'SbMatrix') -> \"void\":\n return _coin.SoMFMatrix___setitem__(self, i, value)",
"def get_inner_matrix(self):\n return self.matrix"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Maps assignment of `self[position]` to the matrix. | def __setitem__(self, position: (int, int) or int, value):
x, y = self._position(position)
self.__matrix[y][x] = value | [
"def get_mapping_matrix(self):\r\n return self.matrix",
"def __setitem__(self, *args):\n return _coin.SoMFMatrix___setitem__(self, *args)",
"def __getitem__(self, position: (int, int) or int):\n x, y = self._position(position)\n\n return self.__matrix[y][x]",
"def __setitem__(self, i: 'int', value: 'SbMatrix') -> \"void\":\n return _coin.SoMFMatrix___setitem__(self, i, value)",
"def _matrix_(self):\n return self.to_matrix()",
"def set_matrix(self, m):\n self.m = m",
"def assignTo(self, m, type=None): # real signature unknown; restored from __doc__\n pass",
"def global_to_local(self, pos):\n matrix = self.obj.matrix_world.inverted()\n return matrix * pos",
"def __setitem__(self, position, stack: Stack):\n row, column = position\n self._items[row][column] = stack",
"def SoBumpMapMatrixElement_set(state: 'SoState', node: 'SoNode') -> \"SbMatrix &\":\n return _coin.SoBumpMapMatrixElement_set(state, node)",
"def transform(self, matrix):\n for index, point in enumerate(transform_points(self.points, matrix)):\n self.points[index].x = point[0]\n self.points[index].y = point[1]\n self.points[index].z = point[2]",
"def __setitem__(self, *args):\n return _core.MatrixXdVec___setitem__(self, *args)",
"def __fill_pos_matrix(self) -> None:\n n = self.n\n pos_matrix = []\n for i in range(n**2):\n j = i % n\n x = 1.5 * j\n if i in self.sub_matrix[0]:\n x += 0.5\n y = np.sqrt(3)/2 * (n - (i // n) - 1)\n pos_matrix.append([x, y])\n\n self.pos_matrix = np.asarray(pos_matrix)",
"def __getitem__(self, index):\n return self.__matrix[index]",
"def map_to_cell(self, vertices, basis=None):\n raise NotImplementedError()",
"def set_cell(self, col, row, value):\n \n super(Vec, self).set_cell(col, row, value)\n if self._mat_parent:\n self._mat_parent[1](row, value)",
"def transform(self, matrix):\n point = transform_points([self.point], matrix)\n normal = transform_vectors([self.normal], matrix)\n self.point.x = point[0]\n self.point.y = point[1]\n self.point.z = point[2]\n self.normal.x = normal[0]\n self.normal.y = normal[1]\n self.normal.z = normal[2]",
"def populate(self):\n for allow, sources, sinks in self.constraints:\n for src in sources:\n for snk in sinks:\n self.matrix.set(\n self.src_to_row[src], # Row index\n self.sink_to_col[snk], # Column index\n (1 if allow else 0) # Value to set (1 -> allow)\n )",
"def get_position(self, matrix):\n return v3.transform(matrix, self.position)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Maps deletion of `self[position]` to the matrix. | def __delitem__(self, position: (int, int) or int):
x, y = self._position(position)
self.__matrix[y][x] = None | [
"def remove(self, pos: vec2d):\n pos //= self.fulltile\n x, y = pos.ftup()\n self.array.delete(x, y)",
"def remove_tensor(self, position):\n target_indices = self.tensors[position].non_der_indices\n new_tensors = []\n for tensor in rest(self.tensors, position):\n new_indices = remove_indices(tensor.indices, target_indices)\n new_tensors.append(tensor.change_indices(new_indices))\n return Operator(new_tensors)",
"def name_mapping_delete(self, position, direction):\n return self.request( \"name-mapping-delete\", {\n 'position': [ position, 'position', [ int, 'None' ], False ],\n 'direction': [ direction, 'direction', [ basestring, 'name-mapping-direction' ], False ],\n }, {\n } )",
"def _submatrix(F, i):\n submat = np.copy(F)\n submat = np.delete(submat, i, axis=1)\n submat = np.delete(submat, i, axis=0)\n return submat",
"def free_cell(board, position):\n if position in board:\n if board[position] is True:\n del board[position]",
"def get_mapping_matrix(self):\r\n return self.matrix",
"def _matrix_(self):\n return self.to_matrix()",
"def _get_distance_matrix(self):\n\n # implement here",
"def draw(self, matrix, mario_pos):\r\n pass",
"def _distance_matrix(self):\n def dist(ii, jj):\n \"\"\"\n Calculates a distance between two points at indices ii and jj in\n the xy data matrix.\n ARGS:\n ii, jj (int): Indices\n \"\"\"\n return (sqrt((self.xy[0][ii] - self.xy[0][jj]) ** 2 + (self.xy[1][ii] - self.xy[1][jj]) ** 2))\n return np.array([np.array([dist(ii, jj) for jj in range(len(self.xy[0]))]) for ii in range(len(self.xy[0]))])",
"def unmark_position(self):\n idx = np.where((self.marked_positions[:, 0] == self.cur_i) &\n (self.marked_positions[:, 1] == self.cur_j))[0]\n if len(idx) > 0:\n self.marked_positions = np.delete(self.marked_positions, (idx[0]), axis=0)\n self.update_flags()",
"def delete(self, position=0):\n if not isinstance(position, int):\n raise TypeError\n if self.head is None:\n return None\n head = self.head\n if position > self.length - 1:\n position = self.length - 1\n if position <= 0:\n deleted = {\n 'data': head['data'],\n 'next': None,\n }\n head = head['next']\n self.head = head\n self.length -= 1\n return deleted\n current = head\n counter = 0\n while current.get('next') is not None and counter < position - 1:\n current = current.get('next')\n counter += 1\n deleted = {\n 'data': current.get('next').get('data'),\n 'next': None,\n }\n current['next'] = current.get('next').get('next')\n self.head = head\n self.length -= 1\n return deleted",
"def __getitem__(self, position: (int, int) or int):\n x, y = self._position(position)\n\n return self.__matrix[y][x]",
"def remove(self):\n self.map.remove(self)",
"def _delete(self):\n self._check_mode_is_write('delete column data')\n\n if hasattr(self, '_col'):\n del self._col\n\n super()._delete()\n\n # remove index if it exists\n self.delete_index()",
"def position_matrix(seq_len):\n x = torch.arange(0, seq_len).unsqueeze(-1).expand(seq_len, seq_len)\n return x.t() - x",
"def _update_to_delete(self):\n cur_ins = []\n insertions = []\n for npos,pos in enumerate(self.positions):\n pos.to_delete = False\n if pos.temp=='-' and pos.targ=='-':\n continue\n if pos.temp=='-' and not pos.do_not_delete:\n cur_ins.append(npos)\n else:\n if len(cur_ins)>self.max_ins_len:\n insertions.append(cur_ins)\n for nipos in cur_ins:\n self.positions[nipos].to_delete = True\n cur_ins = []\n if len(cur_ins)>self.max_ins_len:\n insertions.append(cur_ins)\n for nipos in cur_ins:\n self.positions[nipos].to_delete = True\n return insertions",
"def remove_col(\n matrix: np.matrix, j: int, index_by_1: bool = False\n) -> np.matrix:\n # Handle possible 1-indexing\n if index_by_1:\n j += 1\n\n return np.delete(matrix, (j), axis=1)",
"def remove_row(\n matrix: np.matrix, i: int, index_by_1: bool = False\n) -> np.matrix:\n # Handle possible 1-indexing\n if index_by_1:\n i += 1\n\n return np.delete(matrix, (i), axis=0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
'Greater than' comparison between each values of the instance and an external value or the values of another Matrix. | def __gt__(self, other):
if isinstance(other, self.__class__):
result = self.__class__(self.width, self.height)
for index, value in enumerate(self):
result[index] = value > other[index]
return result
else:
for value in self:
if value <= other:
return False
return True | [
"def __gt__(self, other: 'MetricList') -> bool:\n self._compare_metrics(other)\n for name, metric in self.metrics.items():\n cmp = metric.better(self.values[name], other.values[name])\n if cmp is not None:\n return cmp\n return False",
"def __gt__(i1, i2):\n return i1.inf > i2.sup",
"def __gt__(self, other):\n return self.fitness < other.fitness",
"def __gt__(self, other):\n if self.suit == other.suit:\n self_value = self.value\n other_value = other.value\n\n if self.value == 1:\n self_value = 14\n if other.value == 1:\n other_value = 14\n return self_value > other_value\n else:\n return self.suit.value > other.suit.value",
"def test_greater_than(self):\n self.assertFalse(self.bond2 > self.bond1) # Because the sorting keys should be identical\n self.assertGreater(self.bond3, self.bond1)\n self.assertGreater(self.bond4, self.bond1)",
"def __gt__(self, rhs: Value):\n\t\treturn self.cmp(rhs) > 0",
"def test_greater_than(self):\n self.assertFalse(self.atom2 > self.atom1) # Because the sorting keys should be identical\n self.assertGreater(self.atom3, self.atom1)\n self.assertGreater(self.atom1, self.atom4)",
"def greater_equal(x1: ArrayOrScalar, x2: ArrayOrScalar) -> Union[Array, bool]:\n return _compare(x1, x2, \">=\")",
"def test_greater_than(self):\n self.assertFalse(self.mol2 > self.mol1) # Because the sorting keys should be identical\n self.assertGreater(self.mol3, self.mol1)",
"def isGreaterThan(self, columnA, columnB, assertion = is_one):\n function = jc.scala_function1(self.spark.sparkContext._gateway,\n assertion)\n jvmConstraint = self.jvmCheck.isGreaterThan(\n columnA,\n columnB,\n function,\n getattr(self.jvmCheck, \"isGreaterThan$default$4\")()\n )\n return Check(\n self.spark,\n self.level,\n self.description,\n jvmConstraint\n )",
"def vm_impl_greater(self):\n\n def vm_impl(x, y):\n x = x.asnumpy()\n y = y.asnumpy()\n out = vm.greater(x, y)\n return Tensor(np.array(out))\n\n return vm_impl",
"def greater(x1: ArrayOrScalar, x2: ArrayOrScalar) -> Union[Array, bool]:\n return _compare(x1, x2, \">\")",
"def __gt__(self, other) -> bool:\n return self.magnitude > other.magnitude",
"def __gt__(self, other):\n return self._index > other._index or (self._index == other._index and self._ic > other._ic)",
"def __gt__(self, other: 'LikertQuestionGroup') -> Series:\n if set(self._item_dict.keys()) != set(other._item_dict.keys()):\n raise ValueError(\n 'Keys must be the same to compare LikertQuestionGroups'\n )\n results = []\n for key in self._item_dict.keys():\n results.append({\n 'name': key,\n 'p': self._item_dict[key] > other._item_dict[key]\n })\n return DataFrame(results).set_index('name')['p']",
"def __gt__(self, other):\n # student code goes here\n if self.rank > other.rank:\n return True\n return False",
"def __gt__(self, other):\n if Temperature(self.celsius > other.celsius) is True:\n return True\n else:\n return False",
"def __gt__(self, other):\n if Envelope._comparison(self, other, 'gt'):\n return True\n return False",
"def agtb(a, b):\n return matrix(list(map(lambda x, y: x > y, a, b)), a.size)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates registration information of the peer with new values. | def register_update(self, port):
self.reg_times.append(time.time())
self.reg_date = datetime.datetime.now()
self.port = port
self.update() | [
"def update_registration():\n try:\n requests.post(\n f'http://{CATALOG_IP_PORT[\"ip\"]}:{CATALOG_IP_PORT[\"port\"]}/catalog/devices',\n data=UPDATE_BODY,\n headers={\"Content-Type\": \"application/json\"}\n )\n except requests.ConnectionError:\n pass",
"def update_info():\n\n update_channels_list()\n update_user_list()",
"def _on_register(self, ch, method, properties, message):\n message = json.loads(str(message, \"utf-8\"))\n pid = str(uuid.uuid4())\n # create a entry in our plugin database\n p = model.Plugin(\n uuid=pid,\n name=message.get(\"name\"),\n version=message.get(\"version\"),\n description=message.get(\"description\"),\n state=\"REGISTERED\"\n )\n p.save()\n LOG.info(\"REGISTERED: %r\" % p)\n # broadcast a plugin status update to the other plugin\n self.send_plugin_status_update()\n # return result\n response = {\n \"status\": \"OK\",\n \"name\": p.name,\n \"version\": p.version,\n \"description\": p.description,\n \"uuid\": pid,\n \"error\": None\n }\n return json.dumps(response)",
"def connectionMade(self):\r\n pkt = RegistrationRequestPacket()\r\n self.transport.write(pkt.to_binary())",
"def register_nodes():\n values = request.get_json()\n nodes = values.get('nodes') # get from the input of Postman\n if nodes is None:\n return \"Error: Please supply a valid list of nodes\", 400\n \n for node in nodes:\n my_wallet.peer_register(node)\n \n response = {\n 'message': 'New peer nodes have been added',\n 'total_nodes': list(my_wallet.peers),\n }\n return jsonify(response), 201",
"def push_peer_details(self, ip_address: tuple):\n self.registered_peer_list.append(ip_address)\n logging.debug(f\"{self.__class__.__name__} | push_peer_details | {ip_address}\")",
"def register(self, new_user: str, new_ip: str):\n\t\tself.__user[new_user] = new_ip\n\t\tself.__ip[new_ip] = new_user",
"def register():\n\n data = collect_data()\n\n log.debug('data is: {0}'.format(json.dumps(data, default=lambda o: o.__dict__)))\n api_submit('/api/register', data, method='put')",
"def send(self, key, value):\n try:\n data = json.dumps({\"key\": key, \"value\": value})\n r = requests.post(url=self.endpoint, data=data)\n r.close()\n except:\n print(\"couldn't update peer\", self.endpoint)",
"async def async_update_registered_device_info(self):\n if self.api is not None:\n device_registry = self.hass.helpers.device_registry.async_get(self.hass)\n # Get or create also updates existing entries\n device_registry.async_get_or_create(\n config_entry_id=self.config_entry.entry_id,\n identifiers={(DOMAIN, self.api.device.unique_id)},\n name=self.api.device.name,\n manufacturer=MANUFACTURER_NAME,\n model=self.api.device.device_type,\n sw_version=self.api.device.firmware_version,\n # Uniqueid seems to be the mac. Adding the connection allows other integrations\n # like e.g. Mikrotik Router to link their entities to this device\n connections={(CONNECTION_NETWORK_MAC, self.api.device.unique_id)},\n )\n\n self.hass.config_entries.async_update_entry(\n self.config_entry,\n title=self.api.device.name,\n )\n\n return True",
"def register_with_existing_node():\n node_address = request.get_json()[\"node_address\"]\n if not node_address:\n return \"Invalid data\", 400\n\n data = {\"node_address\": request.host_url}\n headers = {'Content-Type': \"application/json\"}\n\n # Make a request to register with remote node and obtain information 放入节点名单\n response = requests.post(node_address + \"/register_node\",\n data=json.dumps(data), headers=headers)\n\n if response.status_code == 200:\n global blockchain\n global peers\n # update chain and the peers\n chain_dump = response.json()['chain']\n blockchain = create_chain_from_dump(chain_dump)\n peers.update(response.json()['peers'])\n return \"Registration successful\", 200\n else:\n # if something goes wrong, pass it on to the API response\n return response.content, response.status_code",
"def update(parameters, session):\n # Received 'parameters' --> [id_classification, name]\n classification_aux = session.query(Classification).filter(Classification.id == parameters[0]).first()\n classification_aux.name = parameters[1]\n session.commit()\n session.close()\n msg_rspt = Message(action=2, comment='Register updated successfully')\n return msg_rspt",
"def test_update_registration(self):\n body = Registrations()\n response = self.client.open(\n '/phuthien007/test/1.0.0/api/registrations',\n method='PUT',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def update_profile(self):\n pass",
"def sendRegistration(self, host, port):\n message = RGSTMessage(sender=self.name, message_data=self._get_registration_data())\n self.connection.send_message(host, port, message.encode_message())\n self.logger.debug(\"Sent registration to {}:{}\".format(host, port))",
"async def register(self, ctx):\n manager = MessageManager(self.bot, ctx.author, ctx.channel, ctx.prefix, [ctx.message])\n\n if not isinstance(ctx.channel, discord.abc.PrivateChannel):\n await manager.say(\"Registration instructions have been messaged to you\")\n\n msg_sent = await manager.say(\"Registering your Destiny 2 account with me will allow \"\n + \"you to invoke commands that use information from your \"\n + \"public Destiny 2 profile.\", dm=True)\n if not msg_sent:\n return await manager.clear()\n\n platform = None\n while not platform:\n res = await manager.say_and_wait(\"Enter your platform (**xbox** or **playstation**):\", dm=True)\n if not res:\n return await manager.clear()\n platform = constants.PLATFORMS.get(res.content.upper())\n if not platform:\n await manager.say(\"Invalid platform. Try again.\", dm=True)\n\n act = await manager.say_and_wait(\"Enter your exact **account name**:\", dm=True)\n if not act:\n return await manager.clear()\n\n try:\n res = await self.destiny.api.search_destiny_player(platform, act.content)\n except ValueError as e:\n await manager.say(\"Invalid account name. If this seems wrong, please contact the developer.\")\n return await manager.clear()\n except pydest.PydestException as e:\n await manager.say(\"I can seem to connect to Bungie right now. Try again later.\")\n return await manager.clear()\n\n act_exists = False\n if res['ErrorCode'] == 1 and len(res['Response']) == 1:\n act_exists = True\n membership_id = res['Response'][0]['membershipId']\n elif res['ErrorCode'] == 1 and len(res['Response']) > 1:\n for entry in res['Response']:\n if act.content == entry['displayName']:\n act_exists = True\n membership_id = entry['membershipId']\n break\n\n if not act_exists:\n await manager.say(\"An account with that name doesn't seem to exist.\", dm=True)\n else:\n await manager.say(\"Account successfully registered!\", dm=True)\n self.bot.db.add_user(ctx.author.id)\n self.bot.db.update_registration(platform, membership_id, ctx.author.id)\n\n return await manager.clear()",
"def _update_info(self):",
"def update_broker_registry(self):\n self.get_broker_registry()\n self.app.send_all_web_socket_message(u\"brokersUpdated\")",
"def peers_save(self, peer_ip):\n self.peer_dict[peer_ip] = self.config.port"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates information of the peer per leave request. | def leave_update(self):
self.port = None
self.flag = False
self.ttl = 0 | [
"def on_peer_leave(self, node_info):\n msg = json.dumps({'type': 'user_leave', 'id': node_info['node_id'], 'name': node_info['name']})\n self.send_to_all_clients(msg)",
"def leave(self):\n for n in self.m_nbrs:\n pkt = Packet(self, n.m_peer, PACKET_LEAVE)\n self.send_pkt(pkt)\n self.m_online = False",
"def on_peer_disconnected(peer, peer_count):",
"async def memberleave(self, ctx):\n status = await self.bot.pool.fetch(\"SELECT * FROM loggingsettings WHERE guildid = $1\", ctx.guild.id)\n\n if status[0][\"member_leave\"] == True:\n await self.bot.pool.execute(\"UPDATE loggingsettings SET member_leave = $1 WHERE guildid = $2\", False, ctx.guild.id)\n embed=discord.Embed(title=\"Done!\", color=discord.Color.blurple(), description=\"Logging has been turned off for members leaving.\")\n await ctx.send(embed=embed)\n return\n else:\n await self.bot.pool.execute(\"UPDATE loggingsettings SET member_leave = $1 WHERE guildid = $2\", True, ctx.guild.id)\n embed=discord.Embed(title=\"Done!\", color=discord.Color.blurple(), description=\"Logging has been turned on for members leaving.\")\n await ctx.send(embed=embed)",
"def leave(self):\n try:\n self.user_man.announce_leave()\n self.vanilla_end()\n return 0\n except Exception as e:\n if self._debug:\n print(e)\n return 1",
"def par_disconnected(self, par: Participant):\n super(InfoServer, self).par_disconnected(par)\n full_par_id = par.meeting_id + par.client_id\n\n msgs_to_remove = []\n for msg in self.last_status_msgs.get(par.meeting_id, []):\n msg_name, msg_data = msg\n # if the client who left was in the middle of a sharing\n # inform all the participants that he has stopped sharing\n if full_par_id == msg_data:\n msgs_to_remove.append(msg)\n if msg_name in Info.OPPOSITE_MSGS:\n broadcast_msg = (Info.OPPOSITE_MSGS[msg_name], full_par_id)\n self.broadcast_info_msg(par, broadcast_msg)\n\n if msgs_to_remove:\n for msg in msgs_to_remove:\n self.last_status_msgs[par.meeting_id].remove(msg)\n\n # if there aren't any status messages,\n # delete the empty list in the dict\n if not self.last_status_msgs[par.meeting_id]:\n del self.last_status_msgs[par.meeting_id]\n\n # inform all the others participants that he left\n msg = (Info.CLIENT_LEFT, full_par_id)\n self.broadcast_info_msg(par, msg)\n\n # inform the main server that that client has disconnected\n self.client_disconnected_callback(full_par_id)",
"def on_leave(data):\n username = session[\"login\"][0]\n room = find_room(data[\"bookgroup_id\"], data.get(\"chapter_number\"))\n leave_room(room)\n\n emit('leave_status', {'msg': username + \" has left room \" + str(room)}, room=room)",
"def __handle_player_leaving_gracious(self,peerconn,data,peername):\n #-------------------------------------------------------------------------- \n players_game_id,peer_name,player_num= data.split(\" \") \n if self.play_start==False:\n if self.my_peer_name!=self.bootstrap:\n if players_game_id == self.game_id:\n try:\n self.playernum_hostip_dict.pop(player_num)\n self.leader_list.remove(player_num)\n if int(player_num) < int(self.player_num):\n self.player_num=str(int(self.player_num)-1)\n self.update_datastructures(player_num)\n self.sort_and_assign_leader()\n except KeyError:\n print \"Key not found\"\n elif self.play_start==True:\n if self.my_peer_name!=self.bootstrap:\n if players_game_id == self.game_id:\n try:\n del self.connect_pool[self.playernum_hostip_dict[player_num]]\n self.leader_list.remove(player_num)\n if self.player_num==self.leader_num:\n del self.update_pool[self.playernum_hostip_dict[player_num]]\n if player_num==self.leader_num:\n self.sort_and_assign_leader()\n self.playernum_hostip_dict.pop(player_num)\n \n except KeyError:\n print \"Key not found\"\n if self.enemy[player_num]:\n print self.enemy[player_num].alive\n self.enemy[player_num].alive=False\n self.enemy.pop(player_num)",
"async def leave(self, ctx):\n if not await self.check_pm(ctx.message):\n return\n if ctx.author not in self.players:\n await ctx.send(embed=self.make_embed(\"Avalon\", 0xff4055, \"Error\", \"You are not in a game..\"))\n return\n if self.game_status == 0:\n await ctx.send(embed=self.make_embed(\"Avalon\", 0xff4055, \"Error\", \"No game in progress.\"))\n return\n elif self.game_status == 2:\n await ctx.send(embed=self.make_embed(\"Avalon\", 0xff4055, \"Error\", \"You can not leave an in progress game.\"))\n return\n self.players.remove(ctx.author)\n await ctx.send(embed=self.make_embed(\"Avalon\", 0x77dd77, \"Avalon\", \"You have successfully left the game.\"))",
"def onLeave(self):\n pass",
"def post_leave_channel(self, leaver):\r\n pass",
"def leave(self, g):\n leave_vehicle = []\n for (v, leave_time) in self.vehicle:\n if leave_time == self.time:\n leave_vehicle.append((v, leave_time))\n for (v, leave_time) in leave_vehicle:\n self.vehicle.remove((v, leave_time))\n if v.mode != 'walk':\n logging.info(f'Time {self.time}: Vel {v.id} leave road ({self.ori},{self.dest})')\n g.graph_top[self.dest]['node'].vehicle_arrive(v)",
"async def leave(self, ctx):\n # [p]leave\n\n message = ctx.message\n\n await self.bot.say(\"Are you sure you want me to leave this server? \"\n \"Type yes to confirm.\")\n response = await self.bot.wait_for_message(author=message.author, timeout=30)\n\n if response is not None:\n if response.content.lower().strip() == \"yes\":\n await self.bot.say(\"Alright. Bye :wave:\")\n log.debug('Leaving \"{}\"'.format(message.server.name))\n await self.bot.leave_server(message.server)\n else:\n await self.bot.say(\"Ok I'll stay here then.\")",
"async def ask_leave(self, ctx):\r\n await ctx.send(\"If you would like to leave the game, type the command .leave\")",
"def on_leave_jap(self, data: dict):\n app.logger.info(\n \"Leave jap \"\n + str(data[\"jap_event_id\"])\n + \" received from \"\n + str(data[\"user_id\"])\n )\n\n room = self.__get_jap_event_room(data[\"jap_event_id\"])\n self.__remove_from_event(data[\"user_id\"], room)\n answer = {**data, \"members\": self.connected_by_jap_event[room]}\n\n if \"table_id\" in data:\n self.__remove_from_table(data[\"user_id\"], data[\"table_id\"])\n answer[\"table_members\"] = self.connected_at_table[data[\"table_id\"]]\n\n emit(socket_messages[\"USER_LEFT_JAP\"], answer, room=room)\n\n leave_room(room)\n\n if \"table_id\" in data:\n leave_room(self.__get_table_room(data[\"table_id\"]))",
"def old():\n leave = AllLeave()\n click.echo(leave.approve_old())",
"def IgmpMldLeave(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"igmpMldLeave\", payload=payload, response_object=None)",
"def _leave_cb(self):\n self.shared_activity.emit(\"joined\", False, \"left activity\")",
"def leave_height(self, leave_height):\n if leave_height is None:\n raise ValueError(\"Invalid value for `leave_height`, must not be `None`\") # noqa: E501\n\n self._leave_height = leave_height"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Performs the test whether the peer is still active and updates its TTL field. | def is_active(self):
if self.flag:
diff_time = time.time() - self.reg_times[-1]
self.ttl = TTL - diff_time
self.flag = True if self.ttl > 0 else False
if not self.flag:
self.ttl = 0 | [
"def _ping_keep_alive(self):\n self.client.active_ping(self.channel_name)\n return True",
"def is_expired(self):\n return self.ttl <= 0",
"def check_live_peers(self):\n\n to_delete = set()\n\n with self.peer_lock:\n for peer_id, peer in self.peers.items():\n if peer['alive']:\n peer['alive'] = False\n else:\n to_delete.add(peer_id)\n\n if len(self.peers) > 0.8 * self.max_peers:\n sorted_peers = self.get_sorted_peers()\n if sorted_peers is not None:\n for i in range(int(PERCENTAGE_OF_OLDEST_TO_REMOVE * len(self.peers))):\n peer = sorted_peers[i]\n peer_id = peer['host'] + ':' + str(peer['port'])\n to_delete.add(peer_id)\n\n for peer_id in to_delete:\n del self.peers[peer_id]\n\n m = Message(1, 'PING', [self.server_host, self.server_port])\n self.broadcast_message(m)",
"def is_live(self):\n return self.live_state is not None and self.live_state != ENDED",
"def keep_alive(self):\n self.send_tcp_msg('00')",
"def test_is_active_with_expired(self):\n self.assertTrue(self.instance.is_active)\n with self.settings(PASS_RESET_TOKEN_EXPIRATION_DELTA=timedelta(seconds=-1)):\n self.assertFalse(self.instance.is_active)",
"def check_alive(self) -> None:\n return",
"def hold_time_expired(flags: dict, current_time: datetime) -> bool:\n msg_time = datetime.strptime(flags['time'], DATE_FORMAT)\n hold_expiry = msg_time + timedelta(hours=MESSAGE_HOLD)\n return current_time >= hold_expiry",
"def __check_activated(self) -> None:\n WebcamMotionDetector.logger.debug('Checking if activated according to time settings...')\n d = datetime.now()\n r: bool = self.__config.get_activation_periods().is_in(d)\n if self.__activated != r:\n WebcamMotionDetector.logger.info(ACTIVATED_SPACE + str(r) + ', ' + SUSPENDED_SPACE + str(self.__suspended))\n self.__activated = r\n WebcamMotionDetector.logger.debug(ACTIVATED_SPACE + str(self.__activated) + ', ' + SUSPENDED_SPACE + str(self.__suspended))\n if self.__activated and not self.__suspended:\n self.__start_capture_task()\n self.__check_activated_task = threading.Timer(60, self.__check_activated)\n self.__check_activated_task.start()\n WebcamMotionDetector.logger.debug('Check activated scheduled...' + repr(self.__check_activated_task))",
"def remaining(self):\n if self._expired:\n raise Expired()\n\n obj = {\n u'ID': self.lease_id,\n }\n data = json.dumps(obj).encode('utf8')\n\n url = u'{}/v3alpha/kv/lease/timetolive'.format(self._client._url).encode()\n response = yield treq.post(url, data, headers=self._client._REQ_HEADERS)\n\n obj = yield treq.json_content(response)\n\n ttl = obj.get(u'TTL', None)\n if not ttl:\n self._expired = True\n raise Expired()\n\n # grantedTTL = int(obj[u'grantedTTL'])\n # header = Header._parse(obj[u'header']) if u'header' in obj else None\n\n returnValue(ttl)",
"def _internal_update(self) -> None:\n if self._status == 'paused':\n self.update_time = None\n return\n if self._status == 'done' or (self._seconds_since_check > self._remain\n and self._status == 'active'):\n self._status = 'done'\n self.update_time = None\n self._remain = 0\n if self._status == 'active':\n self._remain = self._remain - self._seconds_since_check\n self.update_time = int(time.time())",
"def is_active(self, secs):\n active = self.last_seen + datetime.timedelta(seconds=secs) >= datetime.datetime.now()\n return active",
"def test_keep_lease_alive_beyond_expiration():\n with throttle(b\"[semaphores]\\nA=1\") as url:\n client = Client(url)\n peer = PeerWithHeartbeat(\n client=client,\n heartbeat_interval=timedelta(seconds=1),\n expiration_time=timedelta(seconds=4),\n )\n set_local_peer(url, peer)\n with lock(url, \"A\") as _:\n sleep(5)\n # Evens though enough time has passed, our lease should not be\n # expired, thanks to the heartbeat.\n assert client.remove_expired() == 0",
"def is_alive(self):\n return False",
"def isReplicaAlive(self):\n try:\n res = requests.get(self.getUrl() + self.path, timeout=self.serverTimeout)\n if res.status_code == 200:\n self.alive = True\n else:\n self.alive = False\n except Exception as E:\n self.alive = False\n return self.alive",
"def test_active_interval(self):\n c1 = ('192.168.0.10', 'testrack1')\n port = 10004\n filename = '%s/test.torrent' % SCRIPTDIR\n log.debug('using test torrent file: %s' % filename)\n ihash = testutils.get_ihash_from_filename(filename)\n log.debug('ihash: %s' % ihash)\n filesz = testutils.get_size_from_filename(filename)\n resp = testutils.post_torrent(self.client, filename)\n log.debug('resp.data on post: %s' % resp.data)\n resp = testutils.add_client(self.app, ihash=ihash, ipaddress=c1[0], rackname=c1[1], event=None, mock_smdb=True, port=port, left=filesz)\n self.assert200(resp)\n print bencode.bdecode(resp.data)\n self.assertEqual(bencode.bdecode(resp.data)['interval'], self.app.config['ACTIVE_INTERVAL'])",
"def checkPing():\n global _pingCounter\n if _nextPingAt <= datetime.datetime.now():\n if _lastReceived != _pingCounter:\n logging.error(\"ping didn't arrive in time, resetting connection\")\n if on_failure:\n on_failure()\n IOT._mqttClient.reconnect()\n return False\n else:\n _pingCounter += 1\n ping()\n\n return True",
"def test_is_alive_true_on_not_terminated(self):\n session = mock.Mock()\n session._terminated = False\n session.is_alive = Sl4aSession.is_alive\n self.assertNotEqual(session._terminated, session.is_alive)",
"def is_alive(self, is_alive):\n\n self._is_alive = is_alive"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs the new thread to execute request and send response back. Continuously waits for connections from the peers on the wellknown port 654231. Once the request is obtained from the peer it calls helper functions to extract the request information and construct the response message. Sends response message back to peer. | def run(self):
# Loop forever waiting for new connections from different peers
while True:
# Wait on accept and create new socket
try:
connection_socket, address = server_socket.accept()
except error:
print 'Shuts down the TCP Register Server welcoming socket...'
exit()
# Read peer's request data from socket
message_chunk = connection_socket.recv(MAX_BUFFER_SIZE)
request_data = message_chunk
while len(message_chunk) == MAX_BUFFER_SIZE:
message_chunk = connection_socket.recv(MAX_BUFFER_SIZE)
request_data += message_chunk
print '\n', request_data.decode()
try:
assert PROTOCOL_EOP in request_data.decode(), \
'Exception: Undefined App Layer Protocol..'
# Obtain response message by extracting request protocol
response_message = extract_data_protocol(request_data.decode())
connection_socket.send(response_message.encode())
except AssertionError, _e:
print _e
connection_socket.close()
del connection_socket | [
"def execute_request(method, host, port, cookie):\n try:\n if method == 'REGISTER':\n if cookie is None:\n # The peer has never register before.\n assert len(dict_peers) not in dict_peers, \\\n 'Error: Cookie for the new peer is in use.'\n # Add the peer to the list and assign cookie to it.\n peer = Peer(host, port, cookie=len(dict_peers), flag=True)\n dict_peers[len(dict_peers)] = peer\n # Call helper function to prepare response message.\n response_message = encapsulate_data_protocol(\n 201, 'Created', cookie=peer.cookie)\n else:\n # Peer has registered previously.\n peer = dict_peers.get(cookie)\n peer.register_update(port)\n # Call helper function to prepare response message.\n response_message = encapsulate_data_protocol(200, 'OK',\n cookie=peer.cookie)\n elif method == 'LEAVE':\n peer = dict_peers.get(cookie)\n # Update peer's information per leave request.\n peer.leave_update()\n # Call helper function to prepare response message.\n response_message = encapsulate_data_protocol(200, 'OK')\n return response_message\n elif method == 'PQUERY':\n if cookie is None:\n # Not legal since peer is not registered.\n response_message = encapsulate_data_protocol(\n 403, 'Forbidden [Peer is NOT register with the RS]')\n else:\n peer = dict_peers.get(cookie)\n peer.is_active()\n if not peer.flag:\n # TTL of the peer is expired.\n response_message = encapsulate_data_protocol(\n 403, 'Forbidden [Peer is NOT register with the RS]')\n else:\n # Get all active peer information ready to send to peer.\n list_active_peers = []\n for key, active_peer in dict_peers.iteritems():\n if active_peer.flag and cookie != active_peer.cookie:\n dict_active_peer = dict([(active_peer.hostname,\n active_peer.port)])\n list_active_peers.append(dict_active_peer)\n if list_active_peers:\n # Call helper function to prepare response message.\n response_message = encapsulate_data_protocol(\n 302, 'Found', list_active_peers=list_active_peers)\n else:\n # No active peers found.\n response_message = encapsulate_data_protocol(\n 404, 'Not Found [No other active peers in the '\n 'P2P-DI system found]')\n elif method == 'KEEPALIVE':\n peer = dict_peers.get(cookie)\n peer.update()\n # Call helper function to prepare response message.\n response_message = encapsulate_data_protocol(200, 'OK',\n cookie=peer.cookie)\n else:\n # Not supported request method.\n # Call helper function to prepare response message.\n response_message = encapsulate_data_protocol(400, 'Bad Request')\n return response_message\n except Exception as _e:\n print _e.__doc__\n print type(_e).__name__\n print _e.message\n response_message = encapsulate_data_protocol(\n 404, 'Not Found [Peer is NOT register with the RS]')\n return response_message",
"def handle_connect(self, req):\r\n \r\n # Create a socket to connect to the remote server\r\n remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n # From now on, we must not forget to close this socket before leaving.\r\n try:\r\n try:\r\n # Connection to the remote server\r\n print thread.get_ident(), 'Connecting to', req['address']\r\n\r\n\r\n # Possible way to handle the timeout defined in the protocol!\r\n # Make the connect non-blocking, then do a select and keep\r\n # an eye on the writable socket, just as I did with the\r\n # accept() from BIND requests.\r\n # Do this tomorrow... Geez... 00:47... Do this this evening.\r\n \r\n remote.connect(req['address'])\r\n \r\n # The only connection that can be reset here is the one of the\r\n # client, so we don't need to answer. Any other socket\r\n # exception forces us to try to answer to the client.\r\n except socket.error:\r\n exception, value, traceback = sys.exc_info()\r\n if value[0] == ERR_CONNECTION_RESET_BY_PEER:\r\n raise Client_Connection_Closed((ERR_CONNECTION_RESET_BY_PEER, socket.errorTab[ERR_CONNECTION_RESET_BY_PEER]))\r\n else:\r\n raise Remote_Connection_Failed\r\n except:\r\n raise Remote_Connection_Failed\r\n \r\n # From now on we will already have answered to the client.\r\n # Any exception occuring now must make us exit silently.\r\n try:\r\n # Telling the client that the connection it asked for is\r\n # granted.\r\n self.answer_granted()\r\n # Starting to relay information between the two peers.\r\n self.forward(self.request, remote)\r\n # We don't have the right to \"speak\" to the client anymore.\r\n # So any socket failure means a \"connection closed\" and silent\r\n # exit.\r\n except socket.error:\r\n raise Connection_Closed\r\n # Mandatory closing of the remote socket.\r\n finally:\r\n remote.close()",
"def run(self):\n response_socket = self._ZMQ_CONTEXT.socket(zmq.DEALER)\n if self._SOCKET_IDENTITY:\n response_socket.setsockopt(zmq.IDENTITY, self._SOCKET_IDENTITY)\n response_socket.connect(self._ZMQ_ENDPOINT)\n poller = zmq.Poller()\n # pylint: disable=E1101\n poller.register(response_socket, zmq.POLLIN)\n # pylint: enable=E1101\n self._is_running.set()\n while self._is_running.is_set():\n socks = dict(poller.poll(self._POLLING_TIMEOUT_MILLI))\n if socks.get(response_socket) == zmq.POLLIN:\n message = response_socket.recv()\n self._handle_binary_omega_message(message)\n time.sleep(2.)\n response_socket.close()",
"def main():\r\n # Check input data\r\n request_type, ip_address, port_num = input_check(sys.argv)\r\n\r\n # Create socket\r\n try:\r\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n except:\r\n raise Exception(\"Unable to create a client socket!\")\r\n\r\n # Make request packet according to dateOrTimeCode entered (date or time)\r\n request_packet = make_request_packet(request_type)\r\n\r\n # Send data to socket\r\n full_address = (ip_address, port_num)\r\n try:\r\n client_socket.sendto(request_packet, full_address)\r\n except:\r\n client_socket.close()\r\n raise Exception(\"Could not send request packet!\")\r\n\r\n # Get response in one second\r\n timeout = 5\r\n read_sockets, write_sockets, error = select.select([client_socket], [], [], timeout)\r\n\r\n # Get contents from socket\r\n if len(read_sockets) != 0:\r\n packet_header_len = 13\r\n received_message, address = client_socket.recvfrom(BUFFER_SIZE)\r\n checked_packet_contents = packet_checker.response_packet_check(received_message)\r\n text = received_message[packet_header_len:].decode('utf-8')\r\n if checked_packet_contents is not None:\r\n pretty_printer(checked_packet_contents, text)\r\n else:\r\n client_socket.close()\r\n raise Exception(\"Packet contents are not well formed!\")\r\n\r\n else:\r\n # No response in one second\r\n client_socket.close()\r\n raise Exception(\"Connection timed out: did not receive response packet in {} second(s)!\".format(timeout))",
"def run(self):\n thread = threading.Thread(target=self.worker, args=(0,))\n logger.info(\"Tunnel connection %r starting\", self.ident)\n try:\n thread.start()\n except Exception:\n self.abort()\n raise\n self.worker(1)",
"def execute(self, request, timeout = 1):\n\n\t\tif not isinstance(request, basestring):\n\t\t\traise TypeError('request must be a string')\n\n\t\tif not isinstance(timeout, int):\n\t\t\traise TypeError('timeout must be an int')\n\n\t\t# Create the socket and connect to it\n\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\t\ttry:\n\t\t\tsock.connect(self._address)\n\t\t\tsock.settimeout(timeout)\n\n\t\t\t# Send the request\n\t\t\tsock.sendall(request)\n\n\t\t\t# Receive a response\n\t\t\tresponse = sock.recv(2048)\n\t\tfinally:\n\t\t\tsock.close()\n\n\t\t# Return None if there is no response\n\t\tif response is None or len(response) == 0:\n\t\t\treturn None\n\n\t\treturn response",
"def request(self, data, ip):\n port = 8000\n s = None\n for res in socket.getaddrinfo(ip, port, socket.AF_UNSPEC, socket.SOCK_STREAM):\n af, socktype, proto, canonname, sa = res\n try:\n s = socket.socket(af, socktype, proto)\n except socket.error, msg:\n s = None\n continue\n try:\n s.connect(sa) # connect to remote host, here we only connect to agent\n s.setblocking(0) # we use non-blocking socket\n except socket.error, msg:\n s.close()\n s = None\n continue\n break\n if s is None:\n print 'could not open socket'\n sys.exit(1)\n\n s.sendall(data) # send data to agent\n\n # put socket into listening list which is waiting for response\n self.inputs.append(s)\n self.message_queue[s] = Queue.Queue()\n gevent.sleep(0.1) # switch to listening handler\n\n # getting response and close socket would be done by listening handler\n return",
"def main():\n check_existence_db()\n logging.basicConfig(filename=\"server.log\", encoding='utf-8', format='%(asctime)s - %(levelname)s - %(message)s')\n\n port = option_reading()\n\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.bind(('', port))\n\n print(\n f\"Server turned on with address: {host_address} and the port: {port}. STATUS: Ready to interact\")\n\n logging.warning('SERVER STARTED ON IP: ' + host_address + ', ' + 'IN THE PORT: ' + str(port))\n\n while True:\n server_socket.listen()\n\n client_socket, client_address = server_socket.accept()\n print(f'\\nGot a connection from: {client_address}')\n logging.warning('ACCEPTED CONNECTION OF THE IP ADDRESS: ' + client_address[0])\n\n multithreading = threading.Thread(target=protocol_tcp, args=(client_socket, client_address))\n multithreading.start()",
"def wait_on_request(self):\n \n self.sock1.listen(5)\n self.sock2.listen(5)\n self.sock3.listen(5)\n self.sock1.setblocking(1)\n self.sock2.setblocking(1)\n self.sock3.setblocking(1)\n inputs = [self.sock1,self.sock2,self.sock3]\n incoming, a, b = select.select(inputs, [], inputs)\n if incoming[0]:\n recev_ad = (incoming[0].getsockname());\n self.connection,address = incoming[0].accept()\n data_in = self.connection.recv(64); \n if self.verify_dt_request(data_in) == True:\n self.lang_type = self.portarray.index(recev_ad[1])+1\n print('Received a request for #{} language and type {}'.format(self.lang_type,self.req_type))\n self.response()\n self.wait_on_request()",
"def start(self):\n\n # Create the bind socket\n context = zmq.Context()\n socket = context.socket(zmq.REP)\n socket.bind(\"tcp://{}:{}\".format(self.address, self.port))\n\n call_count = 0\n\n # Wait for requests and process them\n while self.call_count is None or call_count < self.call_count:\n try:\n request = socket.recv()\n except KeyboardInterrupt:\n # Die gracefully\n socket.setsockopt(zmq.LINGER, 0)\n socket.close()\n return\n\n response = self._handle_request(request)\n\n socket.send(response)\n\n call_count += 1\n\n socket.close()",
"def wait_for_response(self, target_id):\n try:\n out = ''\n while self.is_connected or self.connect():\n try:\n msg = self.sock.recv(1024)\n if self.debug:\n print (msg) # pragma: no cover\n except socket.timeout: # pragma: no cover\n print (\"socket timed out\") # pragma: no cover\n self.is_connected = False # pragma: no cover\n continue # pragma: no cover\n except socket.error: # pragma: no cover\n traceback.print_exc(file=sys.stdout) # pragma: no cover\n raise # pragma: no cover\n\n out += msg\n if msg == '':\n self.is_connected = False # pragma: no cover\n\n # get the list of messages by splitting on newline\n raw_messages = out.split(\"\\n\")\n\n # the last one isn't complete\n out = raw_messages.pop()\n for raw_message in raw_messages:\n message = json.loads(raw_message)\n\n id = message.get('id')\n error = message.get('error')\n result = message.get('result')\n\n if id == target_id:\n if error:\n raise Exception( # pragma: no cover\n \"received error %s\" % message) # pragma: no cover\n else:\n return result\n else:\n # just print it for now\n print (message) # pragma: no cover\n except: # pragma: no cover\n traceback.print_exc(file=sys.stdout) # pragma: no cover\n\n self.is_connected = False # pragma: no cover",
"def handle_request(self):\n try:\n request, client_address = self.get_request()\n except socket.error:\n return\n if self.verify_request(request, client_address):\n try:\n self.process_request(request, client_address)\n except SocketConnected, err:\n self._serve_process(err.slaveFd, err.serverPid)\n return\n except:\n self.handle_error(request, client_address)\n self.close_request(request)",
"def worker(self):\n while True:\n address, msg = self.messenger.receive(return_payload=False)\n if msg is None:\n time.sleep(2)\n continue\n deserialized_msg = self.messenger.deserialize_message_payload(msg)\n if msg.msg_type == message.Message.MSG_TASKUNIT:\n tu = deserialized_msg\n # TODO MA Make this run in a new thread instead of directly here.\n tu.run()\n self.messenger.send_taskunit_result(tu, address)",
"async def start_sending(self):\n\n while self.is_connected:\n await asyncio.sleep(Peer.REQUEST_DELAY_NO_BLOCK)\n # print(\"Sending?\")\n if not self.peer_choking:\n # make block requests\n request_message = self.client.piece_manager.get_next_request(self)\n if request_message is not None:\n await self.send(request_message)",
"def tcpListenerThread(self):\n\t\tself.chatServer.runServer(self.message_queue)",
"def execute(self):\n\n # disconnect from previus socket\n if self.port != None:\n self.socket.disconnect('ipc://127.0.0.1:{}'.format(self.port))\n\n # select free port\n tmpSocket = self.zmqctx.socket(zmq.REP)\n self.port = tmpSocket.bind_to_random_port('ipc://127.0.0.1', self.minPort, self.maxPort, self.maxRetries)\n tmpSocket.unbind('ipc://127.0.0.1:{}'.format(self.port))\n\n # open files to log\n os.makedirs(self.logPath, exist_ok = True)\n self.logStdout = open(os.path.join(self.logPath, self.startUpTime + '-' + self.name + '.out'), 'w')\n self.logStderr = open(os.path.join(self.logPath, self.startUpTime + '-' + self.name + '.err'), 'w')\n\n # run the process\n self.process = subprocess.Popen([self.path, str(self.port)], stdout=self.logStdout, stderr=self.logStderr)\n\n # create client\n self.socket.connect('ipc://127.0.0.1:{}'.format(self.port))\n\n # send config to the process\n self.sendReply({});",
"def request_and_process_task(self):\n if not interrupted:\n self.requests.send(b\"requesting task\") # TODO: if interrupted (SIGUSR1), we could send a different message\n while not interrupted and datetime.datetime.now() < self.timeout:\n # print(\"xxx\") # TODO: use self.replies to send heartbeat\n socks = dict(self.poller.poll(COOKIE_TEST_INTERVAL))\n self.check_broker() # test for broker change\n if socks.get(self.requests) == zmq.POLLIN:\n task = self.requests.recv_multipart()\n if not task:\n print(\"WORKER: got notask ???\")\n break\n else:\n answer = task[:1] + self.process_multipart(task[1:])\n self.replies.send_multipart(answer)\n break",
"def run_test_ok():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n cseq = 1\n session = None\n\n try:\n s.connect((host,port))\n except socket.error, (value,message):\n if s:\n s.close()\n print \"Could not open socket: \" + message\n sys.exit(-1)\n\n for y in range(msg_num):\n s.send(msg[y].format(cseq,session))\n cseq = cseq + 1\n data = s.recv(buffSize)\n\n if y == 0: print \"\"\n print \"Sending:\", msg_sem[y]\n print '\\n', data[:len(data)-3]\n\n if not session:\n session = parse_session_id(data)\n if session:\n print \"\\n>>> Parsed session ID:\", session\n\n print \"*\"*80\n if y == msg_num - 1: print \"\"\n\n s.close()",
"def listen(self, request_dict):\n while True:\n gevent.sleep(1) # switch to request co-routine\n # use select to get readable event\n readable, writable, exceptional = select.select(self.inputs, self.outputs, self.inputs, 0)\n\n if not (readable or writable or exceptional):\n print 'No input event'\n continue\n\n # polling readable event\n for s in readable:\n buf = s.recv(9999)\n data = buf\n while len(buf): # read until there's no data\n buf = s.recv(9999)\n data += buf\n if data:\n # print 'recv data:', data, 'from', s.getpeername()\n self.message_queue[s].put(data) # put data into message_queue\n self.inputs.remove(s) # remove socket because only wait for one response\n s.close()\n else:\n # no data received\n print 'close the connection', s.getpeername()\n if s in self.outputs:\n self.outputs.remove(s)\n self.inputs.remove(s)\n s.close()\n del self.message_queue[s]\n\n # exceptional event\n for s in exceptional:\n print \"exceptional connection:\", s.getpeername()\n self.inputs.remove(s)\n if s in self.outputs:\n self.outputs.remove(s)\n s.close()\n del self.message_queue[s]\n\n # check if all requests have been answered\n if DcaProtocol.check_termination(self.message_queue, request_dict):\n print 'All requests have been answered'\n return request_dict\n return"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts request from the protocol that is received from the peer. Extracts request message from P2PDI/1.0 protocol and calls helper function to prepare response message back to the peer. | def extract_data_protocol(request_data):
data_list = request_data.split()
method = data_list[1]
version = data_list[2]
try:
assert version == PROTOCOL, 'Exception: Undefined App Layer Protocol...'
except AssertionError, _e:
print _e
response_message = encapsulate_data_protocol(417,
'Expectation Failed')
return response_message
host = data_list[data_list.index('Host:') + 1]
port = data_list[data_list.index('Port:') + 1]
cookie = None if data_list[data_list.index('Cookie:') + 1] == 'None' \
else int(data_list[data_list.index('Cookie:') + 1])
# Call helper function to prepare response message.
response_message = execute_request(method, host, port, cookie)
return response_message | [
"def unpack(breq_original):\n\t\tbreq = bytearray(breq_original)\n\n\t\t# Extract request ID and length.\n\t\tr_id, r_len = struct.unpack(\"<BI\", breq[:5])\n\t\tbreq = breq[5:(5+r_len)]\n\t\t# Create a dict of parameters.\n\t\td = {\"id\":r_id}\n\n\t\t# Join\n\t\tif r_id == Protocol.REQ_JOIN:\n\t\t\t# Extract nickname\n\t\t\tbnlen, = struct.unpack(\"<I\", breq[:4])\n\t\t\tbreq = breq[4:]\n\t\t\tbname, = struct.unpack(\"<{}s\".format(bnlen), breq[:bnlen])\n\t\t\td[\"name\"] = bname.decode(\"utf-8\")\n\t\t\t# Extract document name\n\t\t\td[\"doc\"] = breq[bnlen:].decode(\"utf-8\")\n\t\t# Or leave?\n\t\telif r_id == Protocol.REQ_LEAVE:\n\t\t\t# No arguments here.\n\t\t\tpass\n\t\t# A full text request?\n\t\telif r_id == Protocol.REQ_TEXT:\n\t\t\t# No arguments\n\t\t\tpass\n\t\telif r_id == Protocol.RES_TEXT:\n\t\t\t# Extract version, cursor\n\t\t\tversion, cursor, = struct.unpack(\"<II\", breq[:8])\n\t\t\td[\"version\"] = version\n\t\t\td[\"cursor\"] = cursor\n\t\t\t# Extract text\n\t\t\td[\"text\"] = breq[8:].decode(\"utf-8\")\n\t\t# Commit?\n\t\telif r_id == Protocol.RES_COMMIT:\n\t\t\t# Extract version\n\t\t\tversion, = struct.unpack(\"<I\", breq[:4])\n\t\t\td[\"version\"] = version\n\t\t\td[\"sequence\"] = []\n\t\t\t# Extract operations\n\t\t\tbreq = breq[4:]\n\t\t\twhile len(breq) > 0:\n\t\t\t\tbreq, dop = Protocol.unpack_op(breq)\n\t\t\t\td[\"sequence\"].append(dop)\n\t\t# Ok response\n\t\telif r_id == Protocol.RES_OK:\n\t\t\treq, = struct.unpack(\"<B\", breq[:1])\n\t\t\td[\"req_id\"] = req\n\t\t# Error response\n\t\telif r_id == Protocol.RES_ERROR:\n\t\t\terror, = struct.unpack(\"<I\", breq[:4])\n\t\t\td[\"error\"] = error\n\t\treturn d",
"def _parse_request(self) -> None:\n action_and_name_from_request = self._request.split(PROTOCOL)\n self._action_from_request = action_and_name_from_request[0].split()[0]\n self._name_from_request = ' '.join(action_and_name_from_request[0].split()[1:])\n self._phone_from_request = self._request.split('\\r\\n')[1]",
"def decode(self):\n\n buf_len = len(self.buffer)\n if buf_len < 4:\n return None\n \n decoded = None\n msg_len = struct.unpack('>I', self.buffer[0:4])[0]\n \n if msg_len == 0:\n decoded = KeepAlive()\n else:\n if buf_len < 4 + msg_len:\n return None\n data = self.buffer[:4+msg_len]\n msg_id = struct.unpack('>b', data[4:5])[0]\n \n decoded = None\n if msg_id == Choke.ID:\n decoded = Choke()\n elif msg_id == Unchoke.ID:\n decoded = Unchoke()\n elif msg_id == Interested.ID:\n decoded = Interested()\n elif msg_id == NotInterested.ID:\n decoded = NotInterested()\n elif msg_id == Have.ID:\n decoded = Have.decode(data)\n elif msg_id == BitField.ID:\n decoded = BitField.decode(data)\n elif msg_id == Request.ID:\n decoded = Request.decode(data)\n elif msg_id == Piece.ID:\n decoded = Piece.decode(data)\n elif msg_id == Cancel.ID:\n decoded = Cancel.decode(data)\n \n self.buffer = self.buffer[4+msg_len:]\n return decoded",
"def _dump_request(self, hpack_decoder):\n assert(self.stream_done())\n\n # get request header.\n header_list = hpack_decoder.decode(self._server_recv_header)\n req_header = dict(util.header_base64_decode(header_list))\n message_encoding = req_header.get(GRPC_MESSAGE_ENCODING, None)\n\n # get request data info.\n req_data = None\n if self._server_recv_data:\n req_data = proto_util.parse_rpc_data_frame(\n self._server_recv_data,\n req_header[':path'],\n myconfig.PARAMETER_REQUEST,\n message_encoding\n )\n self.__req_path = req_header[':path']\n\n print('request header: ')\n print(textwrap.indent(pprint.pformat(req_header), ' '*4))\n print('request data(LPS parse in protobuf): ')\n for lps in req_data:\n print(textwrap.indent(str(lps.__class__), ' '*4))\n print(textwrap.indent(str(lps), ' '*8))",
"def decode(self):\n\n self.src_port = int(data_to_hex_str(self.message[0:2]), 16)\n self.dst_port = int(data_to_hex_str(self.message[2:4]), 16)\n self.sequence_num = int(data_to_hex_str(self.message[4:8]), 16)\n self.ack_num = int(data_to_hex_str(self.message[8:12]), 16)\n self.data_offset = int(data_to_hex_str(self.message[12])[0:3], 16) * 4\n\n #parse the flags: bit operation\n flags = ord(self.message[13])\n if ((flags & (1 << 5)) != 0):\n self.flag_urg = 1\n else:\n self.flag_urg = 0\n\n if ((flags & (1 << 4)) != 0):\n self.flag_ack = 1\n else:\n self.flag_ack = 0\n\n if ((flags & (1 << 3)) != 0):\n self.flag_psh = 1\n else:\n self.flag_psh = 0\n\n if ((flags & (1 << 2)) != 0):\n self.flag_rst = 1\n else:\n self.flag_rst = 0\n\n if ((flags & (1 << 1)) != 0):\n self.flag_syn = 1\n else:\n self.flag_syn = 0\n\n if ((flags & 1) != 0):\n self.flag_fin = 1\n else:\n self.flag_fin = 0\n\n self.window_size = int(data_to_hex_str(self.message[14 : 16]), 16)\n self.checksum = data_to_hex_str(self.message[16 : 18])\n self.urgent_pointer = data_to_hex_str(self.message[18 : 20])\n\n header_len = self.data_offset\n if (header_len > 20):\n self.opt_paddings = data_to_hex_str(self.message[20 : header_len])",
"def parse_request(self, request_body: str) -> RequestData:\n try:\n params, method = xmlrpc_client.loads(\n request_body, use_builtin_types=self.use_builtin_types\n )\n\n except ExpatError as exc:\n raise RPCParseError(f\"Error while parsing XML-RPC request: {exc}\") from exc\n\n except Exception as exc:\n raise RPCInvalidRequest(\"The request appear to be invalid.\") from exc\n\n else:\n if not method:\n raise RPCInvalidRequest(\n \"Missing methodName. Please provide the name of the procedure you want to call\"\n )\n return params, method",
"def translate_response(self, s=None, force_reply=False):\n \n if not s:\n if not self.s:\n print \"Error translate_response: Connection required.\"\n return None\n else:\n s = self.s\n \n result = {}\n head = s.recv(4)\n if not head:\n print \"Error translate_response: Connection closed.\"\n s.close()\n \n return None\n elif head == \"\\xFF\\xFF\\xFF\\xFF\":\n body = s.recv(18)\n \n result[\"packet_id\"] = head\n result[\"status_code\"] = body[0:2]\n result[\"command\"] = body[2:13].replace(\"\\x00\", \"\")\n result[\"body_type\"] = body[13:14]\n result[\"body_length\"] = struct.unpack(\"I\", body[14:18])[0]\n result[\"body\"] = decode_all(s.recv(result[\"body_length\"]))[0]\n \n return result\n else:\n encrypted_body_length = struct.unpack(\"I\", head)[0]\n \n encrypted_body = \"\"\n recv_encrypted_body_length = 0\n while recv_encrypted_body_length < encrypted_body_length:\n new = s.recv(encrypted_body_length - recv_encrypted_body_length)\n encrypted_body += new\n recv_encrypted_body_length += len(new)\n total_body = self.dec_aes(encrypted_body)\n \n total_body_length = struct.unpack(\"I\", total_body[18:22])[0]\n recv_total_body_length = len(total_body[22:])\n while recv_total_body_length < total_body_length:\n encrypted_body_length = struct.unpack(\"I\", s.recv(4))[0]\n \n encrypted_body = \"\"\n recv_encrypted_body_length = 0\n while recv_encrypted_body_length < encrypted_body_length:\n new = s.recv(encrypted_body_length - recv_encrypted_body_length)\n encrypted_body += new\n recv_encrypted_body_length += len(new)\n \n body = self.dec_aes(encrypted_body)\n total_body += body\n recv_total_body_length += len(body)\n \n result[\"packet_id\"] = total_body[0:4]\n result[\"status_code\"] = total_body[4:6]\n result[\"command\"] = total_body[6:17].replace(\"\\x00\", \"\")\n result[\"body_type\"] = total_body[17:18]\n result[\"body_length\"] = struct.unpack(\"I\", total_body[18:22])[0]\n \n result[\"body\"] = decode_all(total_body[22:])[0]\n \n if result[\"packet_id\"] != \"\\xFF\\xFF\\xFF\\xFF\" and force_reply:\n self.handle_packet(result)\n \n return self.translate_response(s, force_reply)\n else:\n return result",
"def _recv(self, expected_length=-1):\n \n response = \"\"\n\n##################### Modified by yaoming.lin on 2013-07-09 ####################\n\n is_ok = True\n\n #read the 5 bytes of the pdu message\n while (len(response) < 5) and is_ok: \n new_byte = self._sock.recv(1)\n if len(new_byte) == 0:\n is_ok = False\n else:\n response += new_byte\n if is_ok:\n #read the rest of the request\n #length = self._get_request_length(request)\n if ord(response[1]) < 7: # Modified by yaoming.lin on 2015-08-17\n length = ord(response[2]) + 5\n elif ord(response[1]) < 17:\n length = 8\n else:\n length = 5\n \n while (len(response) < length) and is_ok:\n new_byte = self._sock.recv(1)\n if len(new_byte) == 0:\n is_ok = False\n else:\n response += new_byte\n\n################################################################################\n\n retval = call_hooks(\"modbus_rtu_over_tcp.RtuOverTcpMaster.after_recv\", (self, response))\n if retval <> None:\n return response\n return response",
"def formRequestPacket(request):\r\n magicNumber = 0x497E\r\n packetType = 0x0001\r\n #Assign the appropriate request type\r\n #Checks already conducted in input phase\r\n if request == \"date\":\r\n requestType = 0x0001\r\n elif request == \"time\":\r\n requestType = 0x0002\r\n \r\n #Create and fill out the bytearray\r\n requestPacket = bytearray(6)\r\n requestPacket[0:2] = magicNumber.to_bytes(2, byteorder=\"big\")\r\n requestPacket[2:4] = packetType.to_bytes(2, byteorder=\"big\")\r\n requestPacket[4:6] = requestType.to_bytes(2, byteorder=\"big\")\r\n return requestPacket",
"def parse_request(self):\n self.method, self.location, self.http_version = \\\n self.request_line.decode(\"utf-8\").split()",
"def _ParseProtoPayloadRequest(\n self,\n request: Dict[str, Any],\n timesketch_record: Dict[str, Any]) -> None:\n request_attributes = [\n 'name', 'description', 'direction', 'member', 'targetTags', 'email',\n 'account_id'\n ]\n for attribute in request_attributes:\n if attribute in request:\n timesketch_attribute = 'request_{0:s}'.format(attribute)\n timesketch_record[timesketch_attribute] = request[attribute]\n\n # Firewall specific attributes.\n if 'sourceRanges' in request:\n source_ranges = ', '.join(request['sourceRanges'])\n timesketch_record['source_ranges'] = source_ranges\n\n if 'alloweds' in request:\n for allowed in request['alloweds']:\n attribute_name = 'allowed_{0:s}_ports'.format(allowed['IPProtocol'])\n if 'ports' in allowed:\n timesketch_record[attribute_name] = allowed['ports']\n else:\n timesketch_record[attribute_name] = 'all'\n\n if 'denieds' in request:\n for denied in request['denieds']:\n attribute_name = 'denied_{0:s}_ports'.format(denied['IPProtocol'])\n if 'ports' in denied:\n timesketch_record[attribute_name] = denied['ports']\n else:\n timesketch_record[attribute_name] = 'all'\n\n # Service account specific attributes\n if 'service_account' in request:\n service_account_name = request['service_account'].get('display_name')\n timesketch_record['service_account_display_name'] = service_account_name",
"def _read_para_relay_from(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument\n if clen != 20:\n raise ProtocolError(f'HIPv{version}: [ParamNo {code}] invalid format')\n\n _port = self._read_unpack(2)\n _ptcl = self._read_unpack(1)\n _resv = self._read_fileng(1)\n _addr = self._read_fileng(16)\n\n relay_from = dict(\n type=desc,\n critical=cbit,\n length=clen,\n port=_port,\n protocol=TP_PROTO.get(_ptcl),\n ip=ipaddress.ip_address(_addr),\n )\n\n return relay_from",
"def _process_video_request(self, data):\n #print(\"DATA \"+str(data))\n forward_data = self._pack_forward_request(data)\n self.connection[data['payload']['idServer']].send(forward_data, 'forwardchunk')",
"def _unpack_command(self, given_buffer):\n assert self._is_client_side is not None, \"Ambiguous connection state\"\n\n if not given_buffer:\n cmd_type = None\n cmd_args = None\n cmd_len = 0\n elif struct.pack(\"B\", given_buffer[0]) == NULL_CHAR:\n # We'll be expecting a response if we know we're a client side command\n is_response = bool(self._is_client_side)\n cmd_type, cmd_args, cmd_len = parse_binary_command(given_buffer, is_response=is_response)\n else:\n cmd_type, cmd_args, cmd_len = parse_text_command(given_buffer)\n\n if _DEBUG_MODE_ and cmd_type is not None:\n gearman_logger.debug('%s - Recv - %s - %r', hex(id(self)), get_command_name(cmd_type), cmd_args)\n\n return cmd_type, cmd_args, cmd_len",
"def construct_dns_request(request: APIGatewayV2HTTPEvent) -> QueryMessage:\n encoded_body = extract_body(request)\n body = decode(encoded_body)\n return from_wire(body)",
"def recvRequest(self):\n\t\tself.rBuf=self.s.recv(self.size)",
"def parse_request(received: bytearray) -> HTTPRequest:\n raw_request_line, *_ = received.partition(b\"\\r\\n\")\n request_line = str(raw_request_line, \"iso-8859-1\")\n\n try:\n raw_method, raw_target, version = request_line.split()\n except ValueError:\n raise HTTPException(HTTPStatus.BAD_REQUEST)\n\n try:\n method = HTTPMethod[raw_method]\n except KeyError:\n raise HTTPException(HTTPStatus.METHOD_NOT_ALLOWED)\n\n return HTTPRequest(method=method, target=urllib.parse.unquote(raw_target))",
"def ParseRPCReq(self, msgid: RpcMsgType, q: Queue):\n self.logger.info(f'Got RPC msg id: {msgid}')\n\n if msgid == RpcMsgType.TYPE_NODE_INFO:\n rsp = self.GetBasicNodeStats()\n q.put(rsp)\n elif msgid == RpcMsgType.TYPE_SCHEDULER_INFO:\n rsp = self.failed_schedule_count\n q.put(rsp)\n elif msgid == RpcMsgType.TYPE_POD_INFO:\n rsp = self.GetPodStats()\n q.put(rsp)",
"def waitForType1(self):\n\n self.log(\"Waiting for TYPE1 message...\", \"server\")\n self.request, self.requestSize = self.com.getData(16, 1)\n\n if self.requestSize != 0:\n self.responseMsgType = int.from_bytes(self.request[:1], \"little\")\n self.responseServerAddress = int.from_bytes(self.request[1:2], \"little\")\n\n if self.responseServerAddress == self.address:\n self.numberOfPackets = int.from_bytes(self.request[2:5], 'little')\n self.fileExtension = self.request[6:11].decode(\"UTF-8\").replace(\"@\", \"\")\n \n self.fileName = \"file\"\n self.file = bytes()\n self.currentPacket = 0\n self.expectedPacket = 1\n\n self.idle = False\n\n self.log(\"Received TYPE1.\", \"server\")\n else:\n self.log(\"TYPE1 message not addressed to this server.\", \"server\")\n\n self.com.rx.clearBuffer()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Executes peer request and prepares response message back to the peer. Supports request methods Register, Leave, PQuery, and KeepAlive. Takes actions according to the request method. After request is completed it calls another helper function that generates the response message by encapsulating response data into P2PDI/1.0 protocol. | def execute_request(method, host, port, cookie):
try:
if method == 'REGISTER':
if cookie is None:
# The peer has never register before.
assert len(dict_peers) not in dict_peers, \
'Error: Cookie for the new peer is in use.'
# Add the peer to the list and assign cookie to it.
peer = Peer(host, port, cookie=len(dict_peers), flag=True)
dict_peers[len(dict_peers)] = peer
# Call helper function to prepare response message.
response_message = encapsulate_data_protocol(
201, 'Created', cookie=peer.cookie)
else:
# Peer has registered previously.
peer = dict_peers.get(cookie)
peer.register_update(port)
# Call helper function to prepare response message.
response_message = encapsulate_data_protocol(200, 'OK',
cookie=peer.cookie)
elif method == 'LEAVE':
peer = dict_peers.get(cookie)
# Update peer's information per leave request.
peer.leave_update()
# Call helper function to prepare response message.
response_message = encapsulate_data_protocol(200, 'OK')
return response_message
elif method == 'PQUERY':
if cookie is None:
# Not legal since peer is not registered.
response_message = encapsulate_data_protocol(
403, 'Forbidden [Peer is NOT register with the RS]')
else:
peer = dict_peers.get(cookie)
peer.is_active()
if not peer.flag:
# TTL of the peer is expired.
response_message = encapsulate_data_protocol(
403, 'Forbidden [Peer is NOT register with the RS]')
else:
# Get all active peer information ready to send to peer.
list_active_peers = []
for key, active_peer in dict_peers.iteritems():
if active_peer.flag and cookie != active_peer.cookie:
dict_active_peer = dict([(active_peer.hostname,
active_peer.port)])
list_active_peers.append(dict_active_peer)
if list_active_peers:
# Call helper function to prepare response message.
response_message = encapsulate_data_protocol(
302, 'Found', list_active_peers=list_active_peers)
else:
# No active peers found.
response_message = encapsulate_data_protocol(
404, 'Not Found [No other active peers in the '
'P2P-DI system found]')
elif method == 'KEEPALIVE':
peer = dict_peers.get(cookie)
peer.update()
# Call helper function to prepare response message.
response_message = encapsulate_data_protocol(200, 'OK',
cookie=peer.cookie)
else:
# Not supported request method.
# Call helper function to prepare response message.
response_message = encapsulate_data_protocol(400, 'Bad Request')
return response_message
except Exception as _e:
print _e.__doc__
print type(_e).__name__
print _e.message
response_message = encapsulate_data_protocol(
404, 'Not Found [Peer is NOT register with the RS]')
return response_message | [
"def xmlrpc_dispatch(self, request):\r\n try:\r\n params, method = xmlrpclib.loads(request.raw_post_data)\r\n if method != 'pingback.ping':\r\n raise Exception('Method \"%s\" not supported' % method)\r\n source_uri, target_uri = params\r\n response = self.register_ping(source_uri, target_uri)\r\n response = (response,)\r\n response = xmlrpclib.dumps(response, methodresponse=1,\r\n allow_none=0, encoding='utf-8')\r\n except xmlrpclib.Fault, fault:\r\n response = xmlrpclib.dumps(fault, allow_none=0, encoding='utf-8')\r\n except:\r\n import sys\r\n exc_type, exc_value, exc_tb = sys.exc_info()\r\n response = xmlrpclib.dumps(\r\n xmlrpclib.Fault(1, '%s:%s' % (exc_type, exc_value)),\r\n encoding='utf-8', allow_none=0,\r\n )\r\n return response",
"def execute(self, request: PlcRequest) -> Awaitable[PlcResponse]:\n pass",
"def do_request(self, method, params=None):\n request_json = {\n 'jsonrpc':'2.0',\n 'method': method,\n 'params': params or {},\n 'auth': self.auth,\n 'id': '1',\n }\n\n logger.debug('urllib2.Request({0}, {1})'.format(self.url,json.dumps(request_json)))\n req = urllib2.Request(self.url, json.dumps(request_json))\n req.get_method = lambda: 'POST'\n req.add_header('Content-Type', 'application/json-rpc')\n\n try:\n res = urllib2.urlopen(req)\n response_json = json.load(res)\n except ValueError:\n raise ZabbixAPIException(\"Unable to parse json: %\" % res)\n\n logger.debug(\"Response Body: %s\" % json.dumps(response_json, indent=4,\n separators=(',', ': ')))\n\n if 'error' in response_json:\n msg = \"Error {code}: {message}, {data} while sending {json}\".format(\n code=response_json['error']['code'],\n message=response_json['error']['message'],\n data=response_json['error']['data'],\n json=str(request_json)\n )\n raise ZabbixAPIException(msg, response_json['error']['code'])\n\n return response_json",
"def SendCommandResponse(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _call(self, method, params=None, request_id=None):\n params = params or []\n\n # Determines which 'id' value to use and increment the counter associated with the current\n # client instance if applicable.\n rid = request_id or self._id_counter\n if request_id is None:\n self._id_counter += 1\n\n # Prepares the payload and the headers that will be used to forge the request.\n payload = {'jsonrpc': '2.0', 'method': method, 'params': params, 'id': rid}\n headers = {'Content-Type': 'application/json'}\n scheme = 'https' if self.tls else 'http'\n url = '{}://{}:{}'.format(scheme, self.host, self.port)\n\n # Calls the JSON-RPC endpoint!\n try:\n response = self.session.post(url, headers=headers, data=json.dumps(payload))\n response.raise_for_status()\n except HTTPError:\n raise TransportError(\n 'Got unsuccessful response from server (status code: {})'.format(\n response.status_code),\n response=response)\n\n # Ensures the response body can be deserialized to JSON.\n try:\n response_data = response.json()\n except ValueError as e:\n raise ProtocolError(\n 'Unable to deserialize response body: {}'.format(e), response=response)\n\n # Properly handles potential errors.\n if response_data.get('error'):\n code = response_data['error'].get('code', '')\n message = response_data['error'].get('message', '')\n raise ProtocolError(\n 'Error[{}] {}'.format(code, message), response=response, data=response_data)\n elif 'result' not in response_data:\n raise ProtocolError(\n 'Response is empty (result field is missing)', response=response,\n data=response_data)\n\n return response_data['result']",
"def handle(self, data):\n with open('request.bin', 'wb') as fout:\n fout.write(data)\n msg = dns.message.from_wire(data)\n log.debug('[REQUEST]\\n%s\\n[/REQUEST]', str(msg))\n nameservers = self.config.default\n if len(msg.question) > 1:\n log.warning(\"Warning: multi-question messages \" +\\\n \"are not yet supported. Using default nameserver.\")\n return self.forward_request(msg, nameservers).to_wire()\n question = msg.question[0]\n log.info('%-10s%-8s%s', 'Question:', msg.id, str(question))\n if question.rdtype == dns.rdatatype.A:\n name = question.name.to_text()\n ipaddr, nameservers = self.resolve_by_config(name)\n if ipaddr:\n response = self.create_response(ipaddr, msg)\n log.info('%-10s%-8s%s DNS: %s', 'Answer:', response.id, map(str, response.answer), '[* STATIC IP *]')\n with open('response.bin', 'wb') as fout:\n fout.write(response.to_wire())\n return response.to_wire()\n\n # let some nameserver handle the message\n response = self.forward_request(msg, nameservers)\n log.debug('[RESPONSE]\\n%s\\n[/RESPONSE]', str(response))\n log.info('%-10s%-8s%s DNS: %r', 'Answer:', response.id, map(str, response.answer), nameservers)\n return response.to_wire()",
"def SendRequestToJoinServer(self):\n \n self.log.WriteLine('Requesting to join server (%s)' % (self.peerStr))\n #p = self.rUDP.CreateNewPacket(self.serverAddr, Packet.PC_REQUEST_JOIN)\n p = self.uUDP.CreateNewPacket(Packet.PC_REQUEST_JOIN)\n p.AddFixedString(Settings.NAME)\n p.AddFixedString(Globals.VERSION)\n self.uUDP.SendPacket(p, self.serverAddr)\n self.rUDP.CreatePeerData(self.serverAddr)\n self.rUDP.SetCurrentPeer(self.serverAddr)\n #self.rUDP.SendPacket(p, self.serverAddr)",
"def call(self, request):\n return self.wait(self.send(request))",
"def run(self):\n # Loop forever waiting for new connections from different peers\n while True:\n # Wait on accept and create new socket\n try:\n connection_socket, address = server_socket.accept()\n except error:\n print 'Shuts down the TCP Register Server welcoming socket...'\n exit()\n # Read peer's request data from socket\n message_chunk = connection_socket.recv(MAX_BUFFER_SIZE)\n request_data = message_chunk\n while len(message_chunk) == MAX_BUFFER_SIZE:\n message_chunk = connection_socket.recv(MAX_BUFFER_SIZE)\n request_data += message_chunk\n print '\\n', request_data.decode()\n try:\n assert PROTOCOL_EOP in request_data.decode(), \\\n 'Exception: Undefined App Layer Protocol..'\n # Obtain response message by extracting request protocol\n response_message = extract_data_protocol(request_data.decode())\n connection_socket.send(response_message.encode())\n except AssertionError, _e:\n print _e\n connection_socket.close()\n del connection_socket",
"def respond(self, follow_up=True, **kwargs):\n if self['type'] == 'response':\n assert self['done'] == False, \"Can't respond to a response that is already marked done.\"\n data = {'command_id': self['command_id'], 'type':'response'}\n data.update(kwargs)\n if not data.has_key('done'):\n data['done'] = False\n data_str = json.dumps(data, cls=JSONEncoder)\n log.debug(\"Sending response : {data}\".format(data=data_str))\n self.ws.send(data_str)\n if data['done'] == False and follow_up:\n # We are still expecting a response to our response:\n return self.receive()",
"def handle_connect(self, req):\r\n \r\n # Create a socket to connect to the remote server\r\n remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n # From now on, we must not forget to close this socket before leaving.\r\n try:\r\n try:\r\n # Connection to the remote server\r\n print thread.get_ident(), 'Connecting to', req['address']\r\n\r\n\r\n # Possible way to handle the timeout defined in the protocol!\r\n # Make the connect non-blocking, then do a select and keep\r\n # an eye on the writable socket, just as I did with the\r\n # accept() from BIND requests.\r\n # Do this tomorrow... Geez... 00:47... Do this this evening.\r\n \r\n remote.connect(req['address'])\r\n \r\n # The only connection that can be reset here is the one of the\r\n # client, so we don't need to answer. Any other socket\r\n # exception forces us to try to answer to the client.\r\n except socket.error:\r\n exception, value, traceback = sys.exc_info()\r\n if value[0] == ERR_CONNECTION_RESET_BY_PEER:\r\n raise Client_Connection_Closed((ERR_CONNECTION_RESET_BY_PEER, socket.errorTab[ERR_CONNECTION_RESET_BY_PEER]))\r\n else:\r\n raise Remote_Connection_Failed\r\n except:\r\n raise Remote_Connection_Failed\r\n \r\n # From now on we will already have answered to the client.\r\n # Any exception occuring now must make us exit silently.\r\n try:\r\n # Telling the client that the connection it asked for is\r\n # granted.\r\n self.answer_granted()\r\n # Starting to relay information between the two peers.\r\n self.forward(self.request, remote)\r\n # We don't have the right to \"speak\" to the client anymore.\r\n # So any socket failure means a \"connection closed\" and silent\r\n # exit.\r\n except socket.error:\r\n raise Connection_Closed\r\n # Mandatory closing of the remote socket.\r\n finally:\r\n remote.close()",
"async def execute(self, request: JsonRpcRequest) -> typing.Any:\n send_channel, recv_channel = trio.open_memory_channel(1)\n await self.handle_request(request, send_channel)\n _, result = await recv_channel.receive()\n print(\"execute\", result)\n if isinstance(result, JsonRpcException):\n raise result\n return result",
"def iq_processor(self, elem: domish.Element):\n typ = elem.getAttribute(\"type\")\n # A response is being requested of us.\n if typ == \"get\" and elem.firstChildElement().name == \"ping\":\n # Respond to a ping request.\n pong = domish.Element((None, \"iq\"))\n pong[\"type\"] = \"result\"\n pong[\"to\"] = elem[\"from\"]\n pong[\"from\"] = elem[\"to\"]\n pong[\"id\"] = elem[\"id\"]\n self.xmlstream.send(pong)\n # We are getting a response to a request we sent, maybe.\n elif typ == \"result\":\n if elem.getAttribute(\"id\") in self.outstanding_pings:\n self.outstanding_pings.remove(elem.getAttribute(\"id\"))",
"def handle( self, msg ):\n\t\tPROTOCOL.info( 'Received UMCP %s REQUEST %s' % ( msg.command, msg.id ) )\n\t\tif msg.command == 'EXIT':\n\t\t\tshutdown_timeout = 100\n\t\t\tMODULE.info( \"EXIT: module shutdown in %dms\" % shutdown_timeout )\n\t\t\t# shutdown module after one second\n\t\t\tresp = Response( msg )\n\t\t\tresp.body = { 'status': 'module %s will shutdown in %dms' % (str(msg.arguments[0]), shutdown_timeout) }\n\t\t\tresp.status = SUCCESS\n\t\t\tself.response( resp )\n\t\t\tself.__timer = notifier.timer_add( shutdown_timeout, self._timed_out )\n\t\t\treturn\n\n\t\tif msg.command == 'SET':\n\t\t\tresp = Response( msg )\n\t\t\tresp.status = SUCCESS\n\t\t\tfor key, value in msg.options.items():\n\t\t\t\tif key == 'acls':\n\t\t\t\t\tself.__acls = ACLs( acls = value )\n\t\t\t\t\tself.__handler.acls = self.__acls\n\t\t\t\telif key == 'commands':\n\t\t\t\t\tself.__commands.fromJSON( value[ 'commands' ] )\n\t\t\t\telif key == 'username':\n\t\t\t\t\tself.__username = value\n\t\t\t\t\tself.__handler.username = self.__username\n\t\t\t\telif key == 'credentials':\n\t\t\t\t\tself.__username = value[ 'username' ]\n\t\t\t\t\tself.__user_dn = value[ 'user_dn' ]\n\t\t\t\t\tself.__password = value[ 'password' ]\n\t\t\t\t\tself.__handler.username = self.__username\n\t\t\t\t\tself.__handler.user_dn = self.__user_dn\n\t\t\t\t\tself.__handler.password = self.__password\n\t\t\t\telif key == 'locale' and value is not None:\n\t\t\t\t\tself.__locale = value\n\t\t\t\t\ttry:\n\t\t\t\t\t\tlocale_obj = Locale( value )\n\t\t\t\t\t\tlocale.setlocale( locale.LC_MESSAGES, str( locale_obj ) )\n\t\t\t\t\t\tMODULE.info( \"Setting specified locale (%s)\" % str( locale_obj ) )\n\t\t\t\t\texcept locale.Error:\n\t\t\t\t\t\tMODULE.warn( \"Specified locale is not available (%s)\" % str( locale_obj ) )\n\t\t\t\t\t\tMODULE.warn( \"Falling back to C\" )\n\t\t\t\t\t\t# specified locale is not available -> falling back to C\n\t\t\t\t\t\tlocale.setlocale( locale.LC_MESSAGES, 'C' )\n\t\t\t\t\t\tself.__locale = 'C'\n\t\t\t\t\tself.__handler.set_language( self.__locale )\n\t\t\t\telse:\n\t\t\t\t\tresp.status = BAD_REQUEST_INVALID_OPTS\n\t\t\t\t\tbreak\n\n\t\t\t# if SET command contains 'acls', commands' and\n\t\t\t# 'credentials' it is the initialization of the module\n\t\t\t# process\n\t\t\tif 'acls' in msg.options and 'commands' in msg.options and 'credentials' in msg.options:\n\t\t\t\ttry:\n\t\t\t\t\tself.__handler.init()\n\t\t\t\texcept BaseException, e:\n\t\t\t\t\timport traceback, sys\n\t\t\t\t\tresp.status = MODULE_ERR\n\t\t\t\t\texc_info = sys.exc_info()\n\t\t\t\t\tresp.message = _( 'The init function of the module has failed: %s: %s\\n%s' ) % ( exc_info[ 0 ].__name__, exc_info[ 1 ], '\\n'.join( traceback.format_tb( exc_info[ 2 ] ) ) )\n\t\t\tself.response( resp )\n\n\t\t\tif not self.__active_requests and self.__timer == None:\n\t\t\t\tself.__timer = notifier.timer_add( self.__timeout, self._timed_out )\n\t\t\treturn\n\n\t\tif msg.arguments:\n\t\t\tcmd = msg.arguments[ 0 ]\n\t\t\tcmd_obj = self.command_get( cmd )\n\t\t\tif cmd_obj and ( not self.__check_acls or self.__acls.is_command_allowed( cmd, options = msg.options, flavor = msg.flavor ) ):\n\t\t\t\tself.__active_requests += 1\n\t\t\t\tself.__handler.execute( cmd_obj.method, msg )\n\t\t\t\tif not self.__active_requests and self.__timer == None:\n\t\t\t\t\tself.__timer = notifier.timer_add( self.__timeout, self._timed_out )\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tresp = Response( msg )\n\t\t\t\t# status 415 (command not allowed) should be checked by the server\n\t\t\t\tresp.status = BAD_REQUEST_NOT_FOUND\n\t\t\t\tresp.message = status_description( resp.status )\n\t\t\t\tself.response( resp )\n\n\t\tif not self.__active_requests and self.__timer == None:\n\t\t\tself.__timer = notifier.timer_add( self.__timeout, self._timed_out )",
"def OnRequestJoinResponse(self, data):\n \n reason = ''\n response = data.getBool()\n if(response):\n Globals.MY_PID = data.getUint8()\n print 'Can join server', Globals.MY_PID\n self.SendRequestForEnvironment()\n else:\n reason = Packet.GetFixedString(data)\n self.rUDP.RemovePeerData(self.serverAddr)\n ServerJoinResponseEvent(response, reason).Fire()",
"def response( self, msg ):\n\t\tPROTOCOL.info( 'Sending UMCP RESPONSE %s' % msg.id )\n\t\tdata = str( msg )\n\t\tself.__queue += str(msg)\n\n\t\tif self._do_send( self.__comm ):\n\t\t\tnotifier.socket_add( self.__comm, self._do_send, notifier.IO_WRITE )",
"def onRequestPeers(self):\n selection = self.peerList.curselection()\n if len(selection) == 1:\n peerid = self.peerList.get(selection[0])\n self.btpeer.sendtopeer( peerid, GETPEERS, \"%s\" % ( self.btpeer.myid) )",
"def test_sending_and_accepting_request(self):\n\n self.send_request()\n\n request_response_id = RequestResponse.list(\n self._API_CONTEXT,\n self._USER_ID,\n self._MONETARY_ACCOUNT_ID2\n ).value[self._FIRST_INDEX].id_\n\n self.accept_request(request_response_id)",
"def do_POST(self):\n\t\ttry:\n\t\t\t# get arguments\n\t\t\tdata = self.rfile.read(int(self.headers[\"content-length\"]))\n\t\t\t# In previous versions of SimpleXMLRPCServer, _dispatch\n\t\t\t# could be overridden in this class, instead of in\n\t\t\t# SimpleXMLRPCDispatcher. To maintain backwards compatibility,\n\t\t\t# check to see if a subclass implements _dispatch and dispatch\n\t\t\t# using that method if present.\n\t\t\tresponse = self.server._marshaled_dispatch(data, getattr(self, '_dispatch', None))\n\t\texcept: # This should only happen if the module is buggy\n\t\t\t# internal error, report as HTTP server error\n\t\t\tself.send_response(500)\n\t\t\tself.end_headers()\n\t\telse:\n\t\t\t# got a valid XML RPC response\n\t\t\tself.send_response(200)\n\t\t\tself.send_header(\"Content-type\", \"text/xml\")\n\t\t\tself.send_header(\"Content-length\", str(len(response)))\n\t\t\tself.end_headers()\n\t\t\tself.wfile.write(response)\n\n\t\t\t# shut down the connection\n\t\t\tself.wfile.flush()\n\t\t\tself.connection.shutdown() # Modified here!"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Displays/prints all peers that have registered with this Register Server. Request comes from the user while it continuously prompts for request. The output displays all possible information about each registered peer. | def do_show():
if dict_peers:
print 'Show: Each Registered Peer Information...'
for key, peer in dict_peers.iteritems():
peer.is_active()
print key, ' ==> ', 'Hostname: {} '.format(peer.hostname), \
'Port: {} (RFC Server) '.format(peer.port), \
'Cookie: {} '.format(peer.cookie), \
'Flag: {} '.format(peer.flag), \
'TTL: {} '.format(int(peer.ttl)), \
'Most Recent Registration Date: {} '.format(peer.reg_date), \
'Times host been registered for last 30 days: {} '.format(
len(peer.reg_times))
else:
print 'No Registered Peers are found' | [
"def onRequestPeers(self):\n selection = self.peerList.curselection()\n if len(selection) == 1:\n peerid = self.peerList.get(selection[0])\n self.btpeer.sendtopeer( peerid, GETPEERS, \"%s\" % ( self.btpeer.myid) )",
"def fetch_peers():\n get_chain_address = \"{}/chain\".format(CONNECTED_NODE_ADDRESS)\n response = requests.get(get_chain_address)\n if response.status_code == 200:\n content = []\n chain = json.loads(response.content)\n for peer in chain[\"peers\"]:\n content.append(peer)\n\n global peers\n peers = sorted(content)",
"def get_peerlist():\n response = None\n for seed in SEED_NODES:\n url = \"http://%s/staeon/peerlist?top\" % seed\n print(url)\n try:\n response = requests.get(url).json()\n except (requests.exceptions.ConnectionError, ValueError) as exc:\n print(exc)\n continue\n break\n\n if not response:\n raise Exception(\"Can't get peerlist\")\n\n return response['peers']",
"def register_nodes():\n values = request.get_json()\n nodes = values.get('nodes') # get from the input of Postman\n if nodes is None:\n return \"Error: Please supply a valid list of nodes\", 400\n \n for node in nodes:\n my_wallet.peer_register(node)\n \n response = {\n 'message': 'New peer nodes have been added',\n 'total_nodes': list(my_wallet.peers),\n }\n return jsonify(response), 201",
"def get_peers(self):\n pass",
"def suggest_peers(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def test_peer_info(self):\n #clear the peers list. \n tracker.info_hash_to_peers.clear()\n\n #send first request from one peer\n send_test_params(TEST_DEFAULTS())\n\n #send second params with a different peer_id to get the first peer back\n params = TEST_DEFAULTS()\n params[b\"peer_id\"] = \"TESTPEERID2\"\n result = send_test_params(params)\n \n #check we got the first peer back\n self.assertTrue(len(result[b\"peers\"]) == 1)\n peer = result[b\"peers\"][0]\n self.assertTrue(peer)\n \n #check we got the right info back\n self.assertTrue(peer[b\"peer id\"] == b\"TESTPEERID\")\n self.assertTrue(peer[b\"ip\"] == b\"127.0.0.1\")\n self.assertTrue(peer[b\"port\"] == 8001)\n self.assertTrue(len(peer) == 3)",
"def peers_tracker(self):\r\n\r\n # retrieves both the announce and the announce list structure from\r\n # the current info dictionary and uses both of them to create the\r\n # final list containing the various addresses of trackers, then\r\n # iterates over each of the trackers to retrieve the information\r\n # about the various peers associated with the torrent file\r\n announce = self.info.get(\"announce\", None)\r\n announce_list = self.info.get(\"announce-list\", [[announce]])\r\n for tracker in announce_list:\r\n # retrieves the first element of the tracker structure as the\r\n # url of it and then verifies that it references an http based\r\n # tracker (as that's the only one supported)\r\n tracker_url = tracker[0]\r\n is_http = tracker_url.startswith((\"http://\", \"https://\"))\r\n if not is_http: continue\r\n\r\n # runs the get http retrieval call (blocking call) so that it's\r\n # possible to retrieve the contents for the announce of the tracker\r\n # this is an asynchronous call and the on tracker callback will be\r\n # called at the end of the process with the message\r\n self.owner.http_client.get(\r\n tracker_url,\r\n params = dict(\r\n info_hash = self.info_hash,\r\n peer_id = self.owner.peer_id,\r\n port = 6881,\r\n uploaded = self.uploaded,\r\n downloaded = self.downloaded,\r\n left = self.left(),\r\n compact = 1,\r\n no_peer_id = 0,\r\n event = \"started\",\r\n numwant = 50\r\n ),\r\n on_result = self.on_tracker\r\n )\r\n\r\n # prints a debug message about the request for peer that was just\r\n # performed in order to provide some debugging information\r\n self.owner.debug(\"Requested peers using '%s'\" % tracker_url)",
"def run(self):\n # Loop forever waiting for new connections from different peers\n while True:\n # Wait on accept and create new socket\n try:\n connection_socket, address = server_socket.accept()\n except error:\n print 'Shuts down the TCP Register Server welcoming socket...'\n exit()\n # Read peer's request data from socket\n message_chunk = connection_socket.recv(MAX_BUFFER_SIZE)\n request_data = message_chunk\n while len(message_chunk) == MAX_BUFFER_SIZE:\n message_chunk = connection_socket.recv(MAX_BUFFER_SIZE)\n request_data += message_chunk\n print '\\n', request_data.decode()\n try:\n assert PROTOCOL_EOP in request_data.decode(), \\\n 'Exception: Undefined App Layer Protocol..'\n # Obtain response message by extracting request protocol\n response_message = extract_data_protocol(request_data.decode())\n connection_socket.send(response_message.encode())\n except AssertionError, _e:\n print _e\n connection_socket.close()\n del connection_socket",
"def broadcast_peers(self):\n global peers_online\n\n msg_peers_online = '[SERVER]: PEERS_ONLINE::'\n for i in peers_online:\n peer_online, port_incoming = re.split('/', i)\n msg_peers_online += str(peer_online) + ', '\n msg_peers_online = msg_peers_online[:-2]\n\n print(str(msg_peers_online))\n for ii in connections:\n if '[closed]' not in str(ii):\n # Compare ip and port against current connections\n # if str(addr) == str(ii[1]):\n conn = ii[0]\n\n msg = self.encode_msg(msg_peers_online)\n time.sleep(0.3)\n\n conn.send(msg)",
"def list_network(self):\n self.gui_input_queue.put((ChatTypes.NETWORK, self.usernames))",
"def list_registered_players(self):\n req_registered_players = self.player_connect.method.get_registered_players()\n players = req_registered_players()\n result = \"list_registered_players::> There aren't any registered players at this time...\"\n if players:\n print \"list_registered_players::> \"\n self.print_list(players)\n return\n print result",
"def incoming_peers(self):\n registrations = {a for a in self.actions.filter(include={Receive}) if a.replicative}\n return {peer: registrations.intersection(signals) for peer, signals in self.peers.items()\n if registrations.intersection(signals)}",
"def peers_handler(self, message):\n for peer_id in message.data:\n host, port = peer_id.split(':')\n self.add_peer(host, port)\n\n if not self.__recently_received_order('PEERS'):\n time.sleep(random.randint(1, 10))\n self.send_hello()",
"def get_peers(self, **kwargs):\r\n # TODO: This should probably not be in admin. However listing peers does seems slightly administrative.\r\n _result = []\r\n # Filter out the unserializable web socket\r\n for _session in self.root.peers.values():\r\n _new_session = copy.copy(_session)\r\n _new_session[\"web_socket\"] = \"removed for serialization\"\r\n _new_session[\"queue\"] = \"removed for serialization\"\r\n _result.append(_new_session)\r\n\r\n write_to_log(_process_id=self.process_id, _category=EC_NOTIFICATION, _severity=SEV_DEBUG,\r\n _data=\"Returning a list of peers:\" + str(_result))\r\n return _result",
"def test_get_peers_0(self):\n pass",
"def list_connected(self):\n client_macs = [client.mac for client in self.blue_node.clients.get_connected_clients()]\n self.connected_nodes = {key: value for key, value in self.usernames.items() if key in client_macs}\n self.gui_input_queue.put((ChatTypes.NETWORK, self.connected_nodes))",
"async def background_peers(self):\n while True:\n self.config.app_log.debug(\"background_peers\")\n try:\n await self.config.peer.ensure_peers_connected()\n self.config.health.peer.last_activity = int(time())\n except:\n self.config.app_log.error(format_exc())\n\n await tornado.gen.sleep(self.config.peers_wait)",
"def test_numpeers(self):\n #clear the peers list. \n tracker.info_hash_to_peers.clear()\n #add 49 peers\n for i in range(49):\n params = TEST_DEFAULTS()\n params[\"peer_id\"] += str(i)\n send_test_params(params)\n\n #send without a started event\n params = TEST_DEFAULTS()\n del params[\"event\"]\n result = send_test_params(params)\n\n #check we got 49 peers back\n peers = result[b\"peers\"]\n self.assertTrue(len(peers) == 49)\n\n #add another peer\n params = TEST_DEFAULTS()\n params[\"peer_id\"] += str(50)\n send_test_params(params)\n\n #send without a started event\n params = TEST_DEFAULTS()\n del params[\"event\"]\n result = send_test_params(params)\n\n #check we got 50 peers back\n peers = result[b\"peers\"]\n self.assertTrue(len(peers) == 50)\n\n #set numwant to 25, & check we get 25 peers back\n params = TEST_DEFAULTS()\n params[\"numwant\"] = 25\n del params[\"event\"]\n result = send_test_params(params)\n peers = result[b\"peers\"]\n self.assertTrue(len(peers) == 25)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Starts the gluster volume | def volume_start(mnode, volname, force=False):
data = {
"force-start-bricks": force
}
return RestClient(mnode).handle_request(
"POST", "/v1/volumes/%s/start" % volname,
httplib.OK, data) | [
"def start():\n\n brick_device = os.environ.get(\"BRICK_DEVICE\", None)\n brick_path = os.environ[\"BRICK_PATH\"]\n if brick_device is not None and brick_device != \"\":\n brickfs = os.environ.get(\"BRICK_FS\", \"xfs\")\n create_and_mount_brick(brick_device, brick_path, brickfs)\n\n volume_id = os.environ[\"VOLUME_ID\"]\n brick_path_name = brick_path.strip(\"/\").replace(\"/\", \"-\")\n volname = os.environ[\"VOLUME\"]\n nodename = os.environ[\"HOSTNAME\"]\n\n create_brickdir(brick_path)\n verify_brickdir_xattr_support(brick_path)\n set_volume_id_xattr(brick_path, volume_id)\n\n volfile_id = \"%s.%s.%s\" % (volname, nodename, brick_path_name)\n volfile_path = os.path.join(VOLFILES_DIR, \"%s.vol\" % volfile_id)\n generate_brick_volfile(volfile_path, volname)\n\n # UID is stored at the time of installation in configmap.\n uid = None\n with open(os.path.join(VOLINFO_DIR, \"uid\")) as uid_file:\n uid = uid_file.read()\n\n # Send Analytics Tracker\n # The information from this analytics is available for\n # developers to understand and build project in a better way\n send_analytics_tracker(\"server\", uid)\n\n test_counter = Gauge(\"test\", \"Test Counter\")\n\n os.execv(\n \"/usr/sbin/glusterfsd\",\n [\n \"/usr/sbin/glusterfsd\",\n \"-N\",\n \"--volfile-id\", volfile_id,\n \"-p\", \"/var/run/gluster/glusterfsd-%s.pid\" % brick_path_name,\n \"-S\", \"/var/run/gluster/brick.socket\",\n \"--brick-name\", brick_path,\n \"-l\", \"-\", # Log to stderr\n \"--xlator-option\",\n \"*-posix.glusterd-uuid=%s\" % os.environ[\"NODEID\"],\n \"--process-name\", \"brick\",\n \"--brick-port\", \"24007\",\n \"--xlator-option\",\n \"%s-server.listen-port=24007\" % volname,\n \"-f\", volfile_path\n ]\n )",
"def mount(volname):\n mnt = tempfile.mkdtemp(prefix=\"georep_\")\n execute([\"mount\", \"-t\", \"glusterfs\",\n \"localhost:/%s\" % volname, mnt])\n yield mnt\n execute([\"umount\", \"-l\", mnt])",
"def start_volume( mdserver_name ):\n \n global conf\n \n ctl_root = VOLUME_CTL_ROOT( conf, {'NAME': mdserver_name} )\n config_file = VOLUME_CONF_PATH( ctl_root )\n md_logfile = LOGFILE_PATH( ctl_root )\n md_pidfile = PIDFILE_PATH( ctl_root )\n \n # Get this server's configuration file\n try:\n md_conf = read_config( config_file )\n except Exception, e:\n raise MDMethodFailed( 'start_volume', \"read config exception = '%s'\" % e )\n \n # make sure we're not running...\n if is_volume_running( md_pidfile ):\n return 1\n\n try:\n assert os.path.isdir( ctl_root ), \"Control directory does not exist\"\n assert os.path.isfile( config_file ), \"Config file does not exist\" \n assert os.path.isdir( md_conf['MDROOT'] ), \"Master copy '%s' does not exist\" % (md_conf['MDROOT'])\n except AssertionError, e:\n raise MDInternalError( \"Server is not fully set up: %s\" % str(e) )\n \n try:\n assert not os.path.isfile( md_pidfile )\n except AssertionError, e:\n raise MDInternalError( \"Server is already running\" )\n \n # fire up the binary\n md_proc = subprocess.Popen( [conf['MD_BINARY'], '-c', config_file, '-l', md_logfile ], close_fds = True )\n rc = md_proc.wait()\n \n if rc != 0:\n # could not start the server\n # make sure we remove runtime files, just in case\n\n try:\n os.unlink( md_pidfile )\n except:\n pass\n \n raise MDMethodFailed( \"start_volume\", \"rc = %s when starting metadata server\" % rc )\n \n return 1",
"def test_volume_start(self):\n pass",
"def start() -> int:\n result = os.system(HDFS_handler.START_HDFS)\n time.sleep(HDFS_handler.MIN_START_TIME)\n return result",
"def start_glusterd(servers):\n if isinstance(servers, str):\n servers = [servers]\n\n cmd = \"pgrep glusterd || service glusterd start\"\n results = g.run_parallel(servers, cmd)\n\n _rc = True\n for server, ret_values in results.iteritems():\n retcode, _, _ = ret_values\n if retcode != 0:\n g.log.error(\"Unable to start glusterd on server %s\", server)\n _rc = False\n if not _rc:\n return False\n\n return True",
"def start(args):\n\n # pylint: disable=too-many-locals\n # Pylint doesn't want more than 15 local variables in a function; this one has 17. This is about\n # as low as I want to go because, while I can cheat and stuff unrelated things in a dictionary,\n # that won't improve readability.\n\n uuid = str(uuid4())\n container_cluster_config_dir = join(CLUSTERDOCK_VOLUME, uuid, 'config')\n makedirs(container_cluster_config_dir)\n\n for mount in client.inspect_container(get_clusterdock_container_id())['Mounts']:\n if mount['Destination'] == CLUSTERDOCK_VOLUME:\n host_cluster_config_dir = join(mount['Source'], uuid, 'config')\n break\n else:\n raise Exception(\"Could not find source of {0} mount.\".format(CLUSTERDOCK_VOLUME))\n\n # CLUSTERDOCK_VOLUME/uuid/config in the clusterdock container corresponds to\n # host_cluster_config_dir on the Docker host.\n logger.debug(\"Creating directory for cluster configuration files in %s...\",\n host_cluster_config_dir)\n\n # Generate the image name to use from the command line arguments passed in.\n image = '/'.join(\n [item\n for item in [args.registry_url, args.namespace or DEFAULT_APACHE_NAMESPACE,\n \"clusterdock:{os}_java-{java}_hadoop-{hadoop}_hbase-{hbase}\".format(\n os=args.operating_system, java=args.java_version,\n hadoop=args.hadoop_version, hbase=args.hbase_version\n )]\n if item]\n )\n if args.always_pull or not is_image_available_locally(image):\n pull_image(image)\n\n # Before starting the cluster, we create a throwaway container from which we copy\n # configuration files back to the host. We also use this container to run an HBase\n # command that returns the port of the HBase master web UI. Since we aren't running init here,\n # we also have to manually pass in JAVA_HOME as an environmental variable.\n get_hbase_web_ui_port_command = ('/hbase/bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool '\n 'hbase.master.info.port')\n container_id = client.create_container(image=image, command=get_hbase_web_ui_port_command,\n environment={'JAVA_HOME': '/java'})['Id']\n logger.debug(\"Created temporary container (id: %s) from which to copy configuration files.\",\n container_id)\n\n # Actually do the copying of Hadoop configs...\n _copy_container_folder_to_host(container_id, '/hadoop/etc/hadoop',\n join(container_cluster_config_dir, 'hadoop'),\n join(host_cluster_config_dir, 'hadoop'))\n\n # ... and repeat for HBase configs.\n _copy_container_folder_to_host(container_id, '/hbase/conf',\n join(container_cluster_config_dir, 'hbase'),\n join(host_cluster_config_dir, 'hbase'))\n\n logger.info(\"The /hbase/lib folder on containers in the cluster will be volume mounted \"\n \"into %s...\", join(host_cluster_config_dir, 'hbase-lib'))\n _copy_container_folder_to_host(container_id, '/hbase/lib',\n join(container_cluster_config_dir, 'hbase-lib'),\n join(host_cluster_config_dir, 'hbase-lib'))\n\n # Every node in the cluster will have a shared volume mount from the host for Hadoop and HBase\n # configuration files as well as the HBase lib folder.\n shared_volumes = [{join(host_cluster_config_dir, 'hadoop'): '/hadoop/etc/hadoop'},\n {join(host_cluster_config_dir, 'hbase'): '/hbase/conf'},\n {join(host_cluster_config_dir, 'hbase-lib'): '/hbase/lib'}]\n\n # Get the HBase master web UI port, stripping the newline the Docker REST API gives us.\n client.start(container=container_id)\n if client.wait(container=container_id) == EX_OK:\n hbase_master_web_ui_port = client.logs(container=container_id).rstrip()\n client.remove_container(container=container_id, force=True)\n else:\n raise Exception('Failed to remove HBase configuration container.')\n\n # Create the Node objects. These hold the state of our container nodes and will be started\n # at Cluster instantiation time.\n primary_node = Node(hostname=args.primary_node[0], network=args.network,\n image=image, ports=[NAMENODE_WEB_UI_PORT,\n hbase_master_web_ui_port,\n RESOURCEMANAGER_WEB_UI_PORT,\n HBASE_REST_SERVER_PORT],\n volumes=shared_volumes)\n secondary_nodes = []\n for hostname in args.secondary_nodes:\n # A list of service directories will be used to name folders on the host and, appended\n # with an index, in the container, as well (e.g. /data1/node-1/dfs:/dfs1).\n service_directories = ['dfs', 'yarn']\n\n # Every Node will have shared_volumes to let one set of configs on the host be propagated\n # to every container. If --data-directories is specified, this will be appended to allow\n # containers to use multiple disks on the host.\n volumes = shared_volumes[:]\n if args.data_directories:\n data_directories = args.data_directories.split(',')\n volumes += [{join(data_directory, uuid, hostname, service_directory):\n \"/{0}{1}\".format(service_directory, i)}\n for i, data_directory in enumerate(data_directories, start=1)\n for service_directory in service_directories]\n secondary_nodes.append(Node(hostname=hostname,\n network=args.network,\n image=image,\n volumes=volumes))\n\n Cluster(topology='apache_hbase',\n node_groups=[NodeGroup(name='primary', nodes=[primary_node]),\n NodeGroup(name='secondary', nodes=secondary_nodes)],\n network_name=args.network).start()\n\n # When creating configs, pass in a dictionary of wildcards into create_configurations_from_file\n # to transform placeholders in the configurations.cfg file into real values.\n _create_configs_from_file(filename=args.configurations,\n cluster_config_dir=container_cluster_config_dir,\n wildcards={\"primary_node\": args.primary_node,\n \"secondary_nodes\": args.secondary_nodes,\n \"all_nodes\": args.primary_node + args.secondary_nodes,\n \"network\": args.network})\n\n # After creating configurations from the configurations.cfg file, update hdfs-site.xml and\n # yarn-site.xml to use the data directories passed on the command line.\n if args.data_directories:\n _update_config_for_data_dirs(\n container_cluster_config_dir=container_cluster_config_dir,\n data_directories=data_directories\n )\n\n if not args.dont_start_services:\n _start_services(primary_node, hbase_master_web_ui_port=hbase_master_web_ui_port)",
"def start(tolerant):\n # report operation\n llecho('Activating all existing LVM volume groups')\n\n # tolerant mode on: LVM with --partial\n if tolerant == True:\n status = run('%s %s' % (CMD_START_LVM, '--partial'))\n\n # tolerant mode off: start LVM manually\n else:\n status = run(CMD_START_LVM)\n\n # cannot start volume groups: fail\n if status != 0:\n llecho('Error: cannot activate LVM volume groups')\n sys.exit(1)",
"async def do_start_cluster(self, cluster):\n raise NotImplementedError",
"def share_volume_over_smb(mnode, volname, smb_users_info):\n g.log.info(\"Start sharing the volume over SMB\")\n\n # Set volume option 'user.cifs' to 'on'.\n cmd = \"gluster volume set %s user.cifs on\" % volname\n ret, _, _ = g.run(mnode, cmd)\n if ret != 0:\n g.log.error(\"Failed to set the volume option user.cifs on\")\n return False\n g.log.info(\"Successfully set 'user.cifs' to 'on' on %s\", volname)\n\n # Set volume option 'stat-prefetch' to 'on'.\n cmd = \"gluster volume set %s stat-prefetch on\" % volname\n ret, _, _ = g.run(mnode, cmd)\n if ret != 0:\n g.log.error(\"Failed to set the volume option stat-prefetch on\")\n return False\n g.log.info(\"Successfully set 'stat-prefetch' to 'on' on %s\", volname)\n\n # Set volume option 'server.allow-insecure' to 'on'.\n cmd = \"gluster volume set %s server.allow-insecure on\" % volname\n ret, _, _ = g.run(mnode, cmd)\n if ret != 0:\n g.log.error(\"Failed to set the volume option server-allow-insecure\")\n return False\n g.log.info(\"Successfully set 'server-allow-insecure' to 'on' on %s\",\n volname)\n\n # Set 'storage.batch-fsync-delay-usec' to 0.\n # This is to ensure ping_pong's lock and I/O coherency tests works on CIFS.\n cmd = (\"gluster volume set %s storage.batch-fsync-delay-usec 0\" % volname)\n ret, _, _ = g.run(mnode, cmd)\n if ret != 0:\n g.log.error(\"Failed to set the volume option \"\n \"'storage.batch-fsync-delay-usec' to 0 on %s\", volname)\n return False\n g.log.info(\"Successfully set 'storage.batch-fsync-delay-usec' to 0 on %s\",\n volname)\n\n # Verify if the volume can be accessed from the SMB/CIFS share.\n cmd = (\"smbclient -L localhost -U | grep -i -Fw gluster-%s \" % volname)\n ret, _, _ = g.run(mnode, cmd)\n if ret != 0:\n g.log.error(\"volume '%s' not accessible via SMB/CIFS share\", volname)\n return False\n g.log.info(\"volume '%s' can be accessed from SMB/CIFS share\", volname)\n\n # To verify if the SMB/CIFS share can be accessed by the root/non-root user\n # TBD\n\n # Enable mounting volumes over SMB\n ret = enable_mounting_volume_over_smb(mnode, volname, smb_users_info)\n if not ret:\n g.log.error(\"Failed to enable mounting volumes using SMB\")\n return False\n g.log.info(\"Successfully enabled mounting volumes using SMV for the \"\n \"smbusers: %s\", str(smb_users_info.keys()))\n\n # Verify if volume is shared\n ret = is_volume_exported(mnode, volname, \"smb\")\n if not ret:\n g.log.info(\"Volume %s is not exported as 'cifs/smb' share\", volname)\n return False\n g.log.info(\"Volume %s is exported as 'cifs/smb' share\", volname)\n\n return True",
"def run(self, nodes, master, user, user_shell, volumes):\r\n log.info(\"Running plugin: automount.NfsShares\")\r\n log.debug(\"automount.NfsShares.run Starting AutoMount...\")\r\n log.debug(\"automount.NfsShares.run self.head_ip %s\" % self.head_ip)\r\n\n #### OPEN NFS-RELATED PORTS FOR THIS CLUSTER\r\n self.openNfsPorts()\r\n\n #### FIX mountd PORT ON head AND MASTER/NODES\r\n mountdport = \"32767\"\r\n for node in nodes:\r\n self.setMountdOnNode(node, mountdport)\r\n self.setMountdOnHead(mountdport)\r\n self.restartServicesOnHead()\r\n\n #### MOUNT ON ALL NODES\r\n for node in nodes:\r\n self.mount(node)",
"def launch_new_instance():\n app = IPClusterApp()\n app.start()",
"def enable_self_heal_daemon(mnode, volname):\n cmd = \"gluster volume set %s self-heal-daemon on\" % volname\n ret, _, _ = g.run(mnode, cmd)\n if ret != 0:\n return False\n\n return True",
"def get_gluster_on_compute_volume():\n # type: (None) -> str\n return _GLUSTER_ON_COMPUTE_VOLUME",
"def start_local(self):\n # TODO: the start procedure is not 100% reliable, change that!\n # Possibly related: https://github.com/dask/distributed/issues/1321\n\n # set memory limit to 90% of the total system memory\n mem_per_worker = int(self.memory_per_node * 0.9) / self.ntasks\n logging.debug(\"memory limit per worker: %db\" % mem_per_worker)\n\n # create the cluster by starting the client\n self.scheduler_port = get_first_free_port(self.ip_address, 8786)\n # When using a single node, the port and ip options won't be specified.\n \"\"\"\n self.cluster = distributed.LocalCluster(n_workers=self.ntasks,\n local_dir=self.local_dir,\n scheduler_port=self.scheduler_port,\n ip=self.ip_address,\n silence_logs=logging.WARN,\n threads_per_worker=1,\n memory_limit=mem_per_worker)\n \"\"\"\n self.cluster = distributed.LocalCluster(n_workers=self.ntasks,\n local_directory=self.local_dir,\n silence_logs=logging.WARN,\n threads_per_worker=1,\n memory_limit=mem_per_worker)\n self.client = distributed.Client(self.cluster)\n logging.debug(\"client and cluster started: %s\" % str(self.client))",
"def do_create_volume(sess, size, display_name, attach_it, chap_credentials, mode):\n\n try:\n _logger.info(\"Creating a new %d GB volume %s\", size, display_name)\n inst = sess.this_instance()\n if inst is None:\n raise Exception(\"OCI SDK error: couldn't get instance info\")\n _logger.debug('\\n availability_domain %s\\n compartment_id %s',\n inst.get_availability_domain_name(), inst.get_compartment_id())\n #\n # GT\n # vol = sess.create_volume(inst.get_compartment_id(),\n vol = sess.create_volume(sess.this_compartment().get_ocid(),\n inst.get_availability_domain_name(),\n size=size,\n display_name=display_name,\n wait=True)\n except Exception as e:\n _logger.debug(\"Failed to create volume\", exc_info=True)\n raise Exception(\"Failed to create volume\") from e\n\n _logger.info(\"Volume [%s] created\", vol.get_display_name())\n\n if not attach_it:\n return\n\n compat_info_message(gen_msg=\"Attaching the volume to this instance\", mode=mode)\n try:\n if chap_credentials:\n vol = vol.attach_to(instance_id=inst.get_ocid(), use_chap=True)\n else:\n vol = vol.attach_to(instance_id=inst.get_ocid(), use_chap=False)\n except Exception as e:\n _logger.debug('Cannot attach BV', exc_info=True)\n vol.destroy()\n raise Exception('Cannot attach BV') from e\n #\n # attach using iscsiadm commands\n compat_info_message(gen_msg=\"Attaching iSCSI device.\", mode=mode)\n\n vol_portal_ip = vol.get_portal_ip()\n vol_portal_port = vol.get_portal_port()\n vol_iqn = vol.get_iqn()\n vol_username = vol.get_user()\n vol_password = vol.get_password()\n retval = iscsiadm.attach(ipaddr=vol_portal_ip,\n port=vol_portal_port,\n iqn=vol_iqn,\n username=vol_username,\n password=vol_password,\n auto_startup=True)\n compat_info_message(compat_msg=\"iscsiadm attach Result: %s\" % iscsiadm.error_message_from_code(retval),\n gen_msg=\"Volume [%s] is attached.\" % vol.get_display_name(), mode=mode)\n if retval == 0:\n _logger.debug('Creation successful')\n if chap_credentials:\n _logger.debug('Attachment OK: saving chap credentials.')\n add_chap_secret(vol_iqn, vol_username, vol_password)\n return\n\n # here because of error case\n try:\n _logger.debug('Destroying the volume')\n vol.destroy()\n except Exception as e:\n _logger.debug(\"Failed to destroy volume\", exc_info=True)\n _logger.error(\"Failed to destroy volume: %s\", str(e))\n\n raise Exception('Failed to attach created volume: %s' % iscsiadm.error_message_from_code(retval))",
"def __start_minions(self):\n self.cluster.run(args=[\n 'sudo', 'systemctl', 'restart', 'salt-minion.service'])",
"def start(self) -> None:\n logger.info(\"node(%s) service(%s) starting...\", self.node.name, self.name)\n self.create_shadow_dirs()\n self.create_dirs()\n self.create_files()\n wait = self.validation_mode == ConfigServiceMode.BLOCKING\n self.run_startup(wait)\n if not wait:\n if self.validation_mode == ConfigServiceMode.TIMER:\n self.wait_validation()\n else:\n self.run_validation()",
"def start(\n release,\n openstack_vip,\n sql_pass,\n sql_ip,\n rabbit_ips_list,\n rabbit_pass,\n ceph,\n kolla_ansible_dir,\n cloud_name,\n):\n click.echo(\"starting arcus manager\")\n enable_ceph = \"true\" if ceph else \"false\"\n arcus_mgr.start(\n release,\n openstack_vip,\n sql_pass,\n sql_ip,\n rabbit_ips_list,\n rabbit_pass,\n enable_ceph,\n kolla_ansible_dir,\n cloud_name,\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stops the gluster volume | def volume_stop(mnode, volname, force=False):
return RestClient(mnode).handle_request(
"POST", "/v1/volumes/%s/stop" % volname,
httplib.OK, None) | [
"def test_volume_stop(self):\n pass",
"def stop_vm():\n send_vm_command(VM_STOP)",
"def volume_scrub_stop(self, name=None):\n return self.request( \"volume-scrub-stop\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )",
"def stop():\n # report operation\n llecho('Stopping all LVM volume groups')\n\n # make 10 tries to stop all volume groups\n status = 0\n\n for i in range(0, 10):\n\n # run the command to stop volume groups\n status = run(CMD_STOP_LVM)\n\n # stopped successfully: quit\n if status == 0:\n break\n\n # wait 1 second before another try\n time.sleep(1)\n\n # cannot stop volume groups: fail\n if status != 0:\n llecho('Error: cannot deactivate LVM volume groups')\n raise ZKVMError('PARTITIONER', 'LVM', 'DEACTIVATE_VG')",
"def stop_glusterd(servers):\n if isinstance(servers, str):\n servers = [servers]\n\n cmd = \"service glusterd stop\"\n results = g.run_parallel(servers, cmd)\n\n _rc = True\n for server, ret_values in results.iteritems():\n retcode, _, _ = ret_values\n if retcode != 0:\n g.log.error(\"Unable to stop glusterd on server %s\", server)\n _rc = False\n if not _rc:\n return False\n\n return True",
"def stop(self):\n \n # try to stop minikube\n try:\n\n # stop minikube\n command = str('minikube stop')\n subprocess.call(command.split(), stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)\n\n # update current_status\n self.current_status = 'stopped'\n\n # return error if it doesn't work\n except:\n\n # update current_status\n self.current_status = 'not responding'\n\n # raise error\n raise Exception('I could not stop minikube')",
"def stop(self):\n if self.send('/stop', 'post') is None:\n self.delete()",
"def stop(status=\"\"):\n raise StopScript(status)",
"def stop(self):\n c = Controller()\n instance_id = c.instance.id\n c.terminate_instance()\n\n print('Successfully shut down instance: ' + instance_id)",
"def _stop_node(self, node_id):\n self._do_blocking_operation(\n self._compute.instances().stop,\n timeout_sec=5*60,\n instance=node_id\n )",
"def detach_volume(self, node, volume):\r\n url = REST_BASE + '/instances/%s' % (node.id)\r\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\r\n data = {'storageID': volume.id, 'type': 'detach'}\r\n resp = self.connection.request(action=url,\r\n method='PUT',\r\n headers=headers,\r\n data=data)\r\n return int(resp.status) == 200",
"def volume_clone_split_stop(self, volume):\n return self.request( \"volume-clone-split-stop\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n } )",
"def __stopRestoreInstance(self, dbInst):\n pgCmd = \"gs_ctl stop -Z restoremode -D %s\" % dbInst.datadir \n self.logger.debug(\"stop local instance in restore mode cmd is %s\" % pgCmd)\n (status, output) = commands.getstatusoutput(pgCmd)\n if (status != 0):\n self.logger.debug(\"Stop instance failed!Output: %s\" % output)",
"def stop():\n local('aws ec2 stop-instances --instance-ids %s'%(AWS_INSTANCE_ID))",
"def shutdown_lvm(device):\n device = block.sys_block_path(device)\n # lvm devices have a dm directory that containes a file 'name' containing\n # '{volume group}-{logical volume}'. The volume can be freed using lvremove\n name_file = os.path.join(device, 'dm', 'name')\n (vg_name, lv_name) = lvm.split_lvm_name(util.load_file(name_file))\n # use two --force flags here in case the volume group that this lv is\n # attached two has been damaged\n LOG.debug('running lvremove on %s/%s', vg_name, lv_name)\n util.subp(['lvremove', '--force', '--force',\n '{}/{}'.format(vg_name, lv_name)], rcs=[0, 5])\n # if that was the last lvol in the volgroup, get rid of volgroup\n if len(lvm.get_lvols_in_volgroup(vg_name)) == 0:\n util.subp(['vgremove', '--force', '--force', vg_name], rcs=[0, 5])\n # refresh lvmetad\n lvm.lvm_scan()",
"def detach(self, volume):\r\n return volume.detach()",
"def stop(self):\n if self.is_running:\n print('Stopping container...')\n cli.stop(container=self.container_id)\n self.is_running = False",
"def stop(self):\n self.logger.debug('Server - td-agent-bit - stop call.')\n self.change_service_status(\"stop\")",
"def _stop(self):\n return self.client.stop_notebook_instance(NotebookInstanceName=self.notebook_instance_name)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes the gluster volume if given volume exists in gluster and deletes the directories in the bricks associated with the given volume | def volume_delete(mnode, volname, xfail=False):
hosts = []
paths = []
volinfo = get_volume_info(mnode, volname, xfail)
if not volinfo:
if xfail:
g.log.info(
"Volume {} does not exist in {}"
.format(volname, mnode)
)
return True
else:
g.log.error(
"Unexpected: volume {} does not exist in {}"
.format(volname, mnode))
return False
_, _, err = RestClient(mnode).handle_request(
"DELETE", "/v1/volumes/%s" % volname,
httplib.NO_CONTENT, None)
if err:
if xfail:
g.log.info("Volume delete is expected to fail")
return True
g.log.error("Volume delete failed")
return False
# remove all brick directories
for j in volinfo['subvols']:
for i in j['bricks']:
g.run(i['host'], "rm -rf %s" % i['path'])
return True | [
"def delete_volume(client, volume_id):\n if supports_volumes_api(client):\n client.remove_volume(volume_id)\n return\n\n def clear_path(path):\n if exists(path):\n logging.info(\"Removing path: %s\", path)\n rmtree(path)\n clear_path(os.path.join(DOCKER_VFS_DIR, volume_id))\n clear_path(os.path.join(DOCKER_VOLUMES_DIR, volume_id))",
"def test_delete_volume(self):\n self._driver.create_volume(self.TEST_VOLUME)\n self._driver.delete_volume(self.TEST_VOLUME)\n self.assertFalse(os.path.isfile(self.TEST_VOLPATH))",
"def _delete_volume_for_cleanup(volumes_client, volume_id):\n try:\n vol = volumes_client.show_volume(volume_id)['volume']\n if vol['status'] == 'in-use':\n waiters.wait_for_volume_resource_status(volumes_client,\n volume_id,\n 'available')\n except lib_exc.NotFound:\n pass\n BaseVolumeTest.delete_volume(volumes_client, volume_id)",
"def delete_volume(client, volume_id):\n client.delete_volume(volume_id)\n client.wait_for_resource_deletion(volume_id)",
"def destroy_volume(self, volume):\r\n url = REST_BASE + '/storage/%s' % (volume.id)\r\n status = int(self.connection.request(action=url,\r\n method='DELETE').status)\r\n return status == httplib.OK",
"def block_delete(mnode, volname, blockname, unlink_storage=\"yes\", force=False):\n force_delete_block = ''\n if force is True:\n force_delete_block = \"force\"\n\n if unlink_storage == \"yes\":\n cmd = \"gluster-block delete %s/%s %s\" % (volname, blockname,\n force_delete_block)\n else:\n cmd = (\"gluster-block delete %s/%s unlink-storage no %s\"\n % (volname, blockname, force_delete_block))\n return g.run(mnode, cmd)",
"def remove_fs(self, client, vol_name, validate=True, **kwargs):\n rmvolume_cmd = f\"ceph fs volume rm {vol_name} --yes-i-really-mean-it\"\n cmd_out, cmd_rc = client.exec_command(\n sudo=True, cmd=rmvolume_cmd, check_ec=kwargs.get(\"check_ec\", True)\n )\n if validate:\n out, rc = client.exec_command(sudo=True, cmd=\"ceph fs ls --format json\")\n volname_ls = json.loads(out.read().decode())\n if vol_name in [i[\"name\"] for i in volname_ls]:\n raise CommandFailed(f\"Creation of filesystem: {vol_name} failed\")\n return cmd_out, cmd_rc",
"def _cleanup_migrate_disk(self, context, instance, volume):\n try:\n self._volume_api.delete(context, volume['id'])\n except Exception as err:\n LOG.exception(_(\"Unable to cleanup the resized volume: %s\" % err))",
"def delete(self, force=False):\r\n if force:\r\n self.detach()\r\n self.delete_all_snapshots()\r\n try:\r\n super(CloudBlockStorageVolume, self).delete()\r\n except exc.VolumeNotAvailable:\r\n # Notify the user? Record it somewhere?\r\n # For now, just re-raise\r\n raise",
"def remove_brick(mnode, volname, bricks_list, option, xml=False, **kwargs):\n if option == \"commit\" or option == \"force\":\n option = option + \" --mode=script\"\n\n replica_count = None\n replica = ''\n\n if 'replica_count' in kwargs:\n replica_count = int(kwargs['replica_count'])\n\n if replica_count is not None:\n replica = \"replica %d\" % replica_count\n\n xml_str = ''\n if xml:\n xml_str = \"--xml\"\n log_level = 'DEBUG'\n else:\n log_level = 'INFO'\n\n cmd = (\"gluster volume remove-brick %s %s %s %s %s\" %\n (volname, replica, ' '.join(bricks_list), option, xml_str))\n\n return g.run(mnode, cmd, log_level=log_level)",
"def delete_volumes(self):\n return SystemCommand(self.cmd.delete_volumes)",
"def __remove_volumes_for_tenant(self, tenant_id):\n try:\n cur = self.conn.execute(\n \"SELECT name FROM tenants WHERE id = ?\",\n (tenant_id,)\n )\n result = cur.fetchone()\n except sqlite3.Error as e:\n logging.error(\"Error %s when querying from tenants table\", e)\n return str(e)\n\n error_msg = \"\"\n if result:\n logging.debug(\"remove_volumes_for_tenant: %s %s\", tenant_id, result)\n tenant_name = result[0]\n vmdks = vmdk_utils.get_volumes(tenant_name)\n # Delete all volumes for this tenant.\n dir_paths = set()\n for vmdk in vmdks:\n vmdk_path = os.path.join(vmdk['path'], \"{0}\".format(vmdk['filename']))\n dir_paths.add(vmdk['path'])\n logging.debug(\"path=%s filename=%s\", vmdk['path'], vmdk['filename'])\n logging.debug(\"Deleting volume path%s\", vmdk_path)\n datastore_url = vmdk_utils.get_datastore_url(vmdk['datastore'])\n err = vmdk_ops.removeVMDK(vmdk_path=vmdk_path,\n vol_name=vmdk_utils.strip_vmdk_extension(vmdk['filename']),\n vm_name=None,\n tenant_uuid=tenant_id,\n datastore_url=datastore_url)\n if err:\n logging.error(\"remove vmdk %s failed with error %s\", vmdk_path, err)\n error_msg += str(err)\n\n # remove symlink\n err = self.remove_symlink_for_tenant(tenant_id)\n if err:\n logging.error(\"remove symlink for tenant %s return with error %s\", tenant_name, err)\n error_msg += str(err)\n\n # Delete path /vmfs/volumes/datastore_name/tenant_uuid\n logging.debug(\"Deleting dir paths %s\", dir_paths)\n for path in list(dir_paths):\n try:\n os.rmdir(path)\n except os.error as e:\n msg = \"remove dir {0} failed with error {1}\".format(path, e)\n logging.error(msg)\n error_msg += str(err)\n\n err = self.remove_volumes_from_volumes_table(tenant_id)\n if err:\n logging.error(\"Failed to remove volumes from database %s\", err)\n error_msg += str(err)\n\n if error_msg:\n return error_msg\n\n return None",
"def delete_volumes(client, volumes):\n failed_volumes = []\n for volume in volumes:\n try:\n client.delete_volume(VolumeId=volume)\n except ClientError as error:\n code = error.response['Error']['Code']\n if code == 'VolumeInUse':\n client.detach_volume(\n VolumeId=volume,\n Force=True)\n waiter = client.get_waiter('volume_available')\n waiter.wait(VolumeIds=[volume])\n client.delete_volume(VolumeId=volume)\n continue\n failed_volumes.append(volume)\n return failed_volumes",
"def clean():\n global madeVolume\n if madeVolume:\n ret = subprocess.run(\n [\"docker\", \"volume\", \"rm\", c.TMP_VOL],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n )\n if ret.returncode == 0:\n steprint(f\"Removed volume: {c.TMP_VOL}\")\n else:\n steprint(\n f\"Could not delete temporary docker volume: {ret.returncode}\\n\"\n f\"You can try: docker volume rm {c.TMP_VOL}\"\n )",
"def heketi_blockvolume_delete(heketi_client_node, heketi_server_url,\n block_volume_id, raise_on_error=True, **kwargs):\n\n heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(\n heketi_server_url, **kwargs)\n\n cmd = \"heketi-cli -s %s blockvolume delete %s %s %s %s\" % (\n heketi_server_url, block_volume_id, json_arg, admin_key, user)\n cmd = TIMEOUT_PREFIX + cmd\n out = heketi_cmd_run(\n heketi_client_node, cmd, raise_on_error=raise_on_error)\n return out",
"def detach_and_delete_vols(self, volumes):\n for v in volumes:\n if v.status == \"in-use\":\n v.detach()\n v.get()\n sample = TimeoutSampler(\n 100,\n 5,\n self.check_expected_vol_status,\n vol=v,\n expected_state=\"available\",\n )\n if not sample.wait_for_func_status(True):\n logger.error(f\"Volume {v.name} failed to detach\")\n raise exceptions.PSIVolumeNotInExpectedState()\n\n v.delete()\n sample = TimeoutSampler(100, 5, self.check_vol_deleted, vol=v)\n if not sample.wait_for_func_status(True):\n logger.error(f\"Failed to delete Volume {v.name}\")\n raise exceptions.PSIVolumeDeletionFailed()",
"def delete(self, volume_id):\n return self._snap_operation(2, volume_id)",
"def test_volumes_remove(self):\n ctx = sm.ServiceContext(INFILENAME)\n svc = filter(lambda x: x.description == \"Zope server\", ctx.services)[0]\n self.assertEqual(len(svc.volumes), 8)\n svc.volumes = filter(lambda r: r.resourcePath not in [\"zenjobs\", \"zenoss-export\"], svc.volumes)\n ctx.commit(OUTFILENAME)\n ctx = sm.ServiceContext(OUTFILENAME)\n svc = filter(lambda x: x.description == \"Zope server\", ctx.services)[0]\n self.assertEqual(len(svc.volumes), 6)\n for v in svc.volumes:\n if v.resourcePath in [\"zenjobs\", \"zenoss-export\"]:\n raise ValueError(\"Error removing volume.\")",
"def detach_volume(self, node, volume):\r\n url = REST_BASE + '/instances/%s' % (node.id)\r\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\r\n data = {'storageID': volume.id, 'type': 'detach'}\r\n resp = self.connection.request(action=url,\r\n method='PUT',\r\n headers=headers,\r\n data=data)\r\n return int(resp.status) == 200"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Resets the gluster volume | def volume_reset(mnode, volname, force=False,
options=None, all_volumes=False):
if not 'options':
options = {}
data = {
"options": options,
"force": force,
"all": all_volumes,
}
return RestClient(mnode).handle_request(
"DELETE", "/v1/volumes/%s/options" % volname,
httplib.OK, data) | [
"def reset(to_unblock):\n log_everywhere(resource.nodes, 'Resetting cluster state')\n always_blocked = connections(from_nodes=[A, B, C], to_node=B)\n all_blocked = always_blocked.extend(to_unblock)\n all_blocked.unblock()\n all_blocked.event(r'connection .* connection:Connected')\n\n resource.down()\n resource.nodes.remove(D)\n resource.nodes.get_diskful().drbdadm(['forget-peer', '{}:{}'.format(resource.name, D.name)])\n\n if D.volumes[0].disk:\n D.volumes[0].create_md(max_peers=3)\n\n resource.touch_config()\n resource.nodes.adjust()\n resource.nodes.event(r'quorum:yes')",
"def test_fully_reset_cluster(self):\n self.reset_cluster()",
"def reset(self):\n self.fuse_region = []",
"def reset_cluster_after_upgrade_neutron_ceph(self):\n self.env.revert_snapshot('upgrade_master_neutron_ceph')\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.fuel_web.stop_reset_env_wait(cluster_id)\n\n self.env.bootstrap_nodes(\n self.env.d_env.nodes().slaves[6:7])\n\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-02': ['controller'],\n 'slave-03': ['controller'],\n 'slave-05': ['compute', 'ceph-osd'],\n 'slave-06': ['compute', 'ceph-osd']\n }, False, True\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-07': ['controller']}\n )\n\n self.fuel_web.run_network_verify(cluster_id)\n self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)\n self.fuel_web.run_ostf(cluster_id,\n test_sets=['ha', 'sanity', 'smoke'],\n should_fail=1)",
"def reset(self):\n self.fuse = []",
"def reset(self):\n LOG.info('Reset nodes: %s', self)\n self.power_management.reset(self.get_macs())",
"def resetHealth(self):\n\t\tself.health = 10",
"def reset_stock(self) -> None:\n StockManager.get_instance().stock.clear()",
"def reset_health(self):\n self.health = 100",
"def reset(self):\n self.state = self.env.reset()",
"def reset(self):\r\n Neuron.reset(self)\r\n self.voltage.set_value(numpy.zeros(self.size).astype('float32'))\r\n self.refractory_time.set_value(numpy.zeros(self.size).astype('float32'))",
"def notclustered(self):\n\n LinuxVolumeManager.has_lvm()\n LinuxVolumeManager.lvm('vgchange', '-cn', str(self.getAttribute(\"name\")))",
"def request_reset(self, req):\n # First get the docker container ID\n with open(\"/proc/self/cgroup\", \"r\") as f:\n line = f.readline()\n idx = line.split(\"/\")[-1].strip()\n req.reply(\"ok\",)\n os.system(\"docker restart {}\".format(idx))",
"def hard_reset(self):\n if self.repository.reset_head():\n self.index.reset(self.repository.head)\n self.working_directory.reset(self.index)",
"def reset(self):\n self.current_shard = None\n self.current_shard_n = None\n self.current_offset = None",
"def reset(ctx,\n # Mandatory main parameter\n drives,\n # Mandatory main parameter\n force):\n \"\"\"in an existing node or used in an upgraded node. This method requires the force parameter to be included in the method call.\"\"\"\n\n \n\n cli_utils.establish_connection(ctx)\n \n \n \n\n \n\n ctx.logger.info(\"\"\": \"\"\"\"\"\"drives = \"\"\" + str(drives)+\";\"+\"\"\"force = \"\"\" + str(force)+\"\"\";\"\"\"+\"\")\n try:\n _ResetDrivesResult = ctx.element.reset_drives(drives=drives, force=force)\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n if ctx.json:\n print(simplejson.dumps(simplejson.loads(_ResetDrivesResult), indent=4))\n return\n else:\n cli_utils.print_result(_ResetDrivesResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)",
"def reset_master(server):\n server.exec_stmt(\"RESET MASTER\")",
"def reset(self):\n ret = libvirtmod.virNodeDeviceReset(self._o)\n if ret == -1: raise libvirtError ('virNodeDeviceReset() failed')\n return ret",
"def reset_cluster_after_upgrade_nova_cinder(self):\n self.env.revert_snapshot('upgrade_master_nova_cinder')\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.fuel_web.stop_reset_env_wait(cluster_id)\n\n self.env.bootstrap_nodes(\n self.env.d_env.nodes().slaves[3:6])\n\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-04': ['controller'],\n 'slave-05': ['controller'],\n 'slave-06': ['compute']\n }\n )\n\n self.fuel_web.run_network_verify(cluster_id)\n self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)\n self.fuel_web.run_ostf(cluster_id)\n\n nailgun_controllers = self.fuel_web.\\\n get_nailgun_cluster_nodes_by_roles(cluster_id=cluster_id,\n roles=['controller'])\n devops_controllers = self.fuel_web.\\\n get_devops_nodes_by_nailgun_nodes(nailgun_controllers)\n\n primary_controller = self.fuel_web.get_nailgun_primary_node(\n self.env.d_env.nodes().slaves[0])\n primary_controller.destroy()\n\n wait(lambda: not self.fuel_web.\n get_nailgun_node_by_devops_node(primary_controller)['online'],\n timeout=60 * 10)\n\n # Wait for HA services ready\n self.fuel_web.assert_ha_services_ready(cluster_id)\n\n # Wait until OpenStack services are UP\n self.fuel_web.assert_os_services_ready(cluster_id, should_fail=1)\n\n logger.info(\"Waiting 300 sec before MySQL Galera will up, \"\n \"then run OSTF\")\n\n # Wait until MySQL Galera is UP on online controllers\n self.fuel_web.wait_mysql_galera_is_up(\n [n.name for n in\n set(devops_controllers) - {primary_controller}], timeout=300)\n self.fuel_web.run_ostf(cluster_id=cluster_id,\n test_sets=['ha', 'smoke', 'sanity'],\n should_fail=1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get gluster volume info | def volume_info(mnode, volname):
return RestClient(mnode).handle_request("GET",
"/v1/volumes/%s" % volname,
httplib.OK, None) | [
"def get_gluster_on_compute_volume():\n # type: (None) -> str\n return _GLUSTER_ON_COMPUTE_VOLUME",
"def get_volumes(self):\n\tapi = NaElement(\"volume-get-iter\")\n\txi = NaElement(\"desired-attributes\")\n\tapi.child_add(xi)\n\t## This specifies max number of volume records to pull from sdk api\n\t## Default is 20. 20000 is enough for most clusters\n\tapi.child_add_string(\"max-records\",self.MAX_VOLUMES)\n\txi1 = NaElement(\"volume-attributes\")\n\txi.child_add(xi1)\n\txi41 = NaElement(\"volume-id-attributes\")\n\txi41.child_add_string(\"instance-uuid\",\"<instance-uuid>\")\n\txi41.child_add_string(\"name\",\"<name>\")\n\txi41.child_add_string(\"owning-vserver-name\",\"<owning-vserver-name>\")\n\txi41.child_add_string(\"uuid\",\"<uuid>\")\n\txi1.child_add(xi41)\n\txo = self.s.invoke_elem(api)\n\tself.sd.incr(\"api.invoke\")\n\tf = xmltodict.parse(xo.sprintf())\n\tvolumes = f['results']['attributes-list']['volume-attributes']\n\tvol_list = []\n\tfor volume in volumes:\n\t vol_list.append({'cluster-name':self.CLUSTER_NAME,\n\t\t\t 'owning-vserver-name':volume['volume-id-attributes']['owning-vserver-name'],\n\t\t\t 'name':volume['volume-id-attributes']['name'],\n\t\t\t 'instance-uuid':volume['volume-id-attributes']['instance-uuid']\n\t\t\t })\n\treturn vol_list",
"def show_volume(svm_name) -> None:\n print()\n print(\"Getting Volume Details\")\n print(\"===================\")\n try:\n for volume in Volume.get_collection(\n **{\"svm.name\": svm_name}, fields=\"uuid\"):\n print(\n \"Volume name:-%s ; Volume uuid:-%s \" %\n (volume.name, volume.uuid))\n except NetAppRestError as error:\n print(\"Error:- \" % error.http_err_response.http_response.text)\n print(\"Exception caught :\" + str(error))",
"def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print ( json.dumps({'data': lst}) )",
"def info(self):\n ret = libvirtmod.virStorageVolGetInfo(self._o)\n if ret is None: raise libvirtError ('virStorageVolGetInfo() failed', vol=self)\n return ret",
"def get_file_server_glusterfs_volume_name(sc):\n # type: (StorageClusterSettings) -> str\n try:\n volname = sc.file_server.server_options['glusterfs']['volume_name']\n except KeyError:\n volname = get_gluster_default_volume_name()\n return volname",
"def volume_options_list_info(self, volume):\n return self.request( \"volume-options-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'options': [ VolumeOptionInfo, True ],\n } )",
"def volume_space_list_info(self, volume=None):\n return self.request( \"volume-space-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'vol-space-infos': [ VolSpaceInfo, True ],\n } )",
"def list_vol(tag=None, device=None):\n conn = _ec2connect()\n vols = conn.get_all_volumes(filters=_get_filters(tag))\n if not vols:\n print('\\tNone.')\n return\n for v in vols:\n t = v.tags.get(TAG_NAME, 'root')\n s = v.attachment_state()\n z = v.size\n i = v.attach_data.instance_id\n d = v.attach_data.device\n print('\\t{0:25} {1:2}GB {2:15} {3:15} {4} {5}'.format(t, z, v.id, s, i, d ))",
"def volume_list(search_opts=None):\r\n c_client = cinderclient()\r\n if c_client is None:\r\n return []\r\n # print c_client.volumes.list(search_opts=search_opts)\r\n return c_client.volumes.list(search_opts=search_opts)",
"def volume_list(mnode):\n return RestClient(mnode).handle_request(\n \"GET\", \"/v1/volumes\", httplib.OK, None)",
"def get_file_server_glusterfs_volume_type(sc):\n # type: (StorageClusterSettings) -> str\n try:\n voltype = sc.file_server.server_options[\n 'glusterfs']['volume_type'].lower()\n except KeyError:\n voltype = 'distributed'\n return voltype",
"def _get_volume_name(self):\n pass",
"def _get_volume_name(self):\n return self._heat_resource.properties[\"volume_id\"]",
"def heketi_volume_info(heketi_client_node, heketi_server_url, volume_id,\n raise_on_error=True, **kwargs):\n\n heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(\n heketi_server_url, **kwargs)\n\n cmd = \"heketi-cli -s %s volume info %s %s %s %s\" % (\n heketi_server_url, volume_id, json_arg, admin_key, user)\n cmd = TIMEOUT_PREFIX + cmd\n out = heketi_cmd_run(\n heketi_client_node, cmd, raise_on_error=raise_on_error)\n if json_arg and out:\n return json.loads(out)\n return out",
"def volume_get_filer_info(self):\n return self.request( \"volume-get-filer-info\", {\n }, {\n 'disk-types': [ basestring, False ],\n 'default-raidtype': [ basestring, False ],\n 'checksum-types': [ basestring, False ],\n 'root-volume': [ basestring, False ],\n 'raidgroup-size': [ RaidgroupSizeInfo, True ],\n 'allowed-raidtypes': [ RaidtypeInfo, True ],\n 'snapshots-max': [ int, False ],\n } )",
"def volume_wafl_info(self):\n return self.request( \"volume-wafl-info\", {\n }, {\n 'root-volume': [ basestring, False ],\n 'disk-types': [ basestring, False ],\n 'snapshots-max': [ int, False ],\n 'checksum-types': [ basestring, False ],\n } )",
"def get_volume_info(disk_snapshot_id):\n output = subprocess.check_output([\n 'qemu-img',\n 'info',\n '--output=json',\n disk_snapshot_id,\n ])\n return json.loads(str(output))",
"def volume_get_root_name(self):\n return self.request( \"volume-get-root-name\", {\n }, {\n 'volume': [ basestring, False ],\n } )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get gluster volume status | def volume_status(mnode, volname):
return RestClient(mnode).handle_request(
"GET", "/v1/volumes/%s/status" % volname,
httplib.OK, None) | [
"def volume_status(self, volume):\r\n volume = self._get_volume(volume)\r\n raid = self._get_raid(volume[\"devicefile\"])\r\n if volume is not None and raid is not None:\r\n return raid[\"state\"]",
"def get_status(self):\n result = None\n try:\n r = requests.get(self.url_status)\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s' % (self.url_status, e))\n result = None\n return result",
"def get_status(self):\n r = requests.get(self.url_status)\n try:\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s' % (self.url_status, e))\n result = None\n return result",
"def volume_brick_status(mnode, volname):\n return RestClient(mnode).handle_request(\n \"GET\", \"/v1/volumes/%s/bricks\" % volname,\n httplib.OK, None)",
"def describe_volume_status(DryRun=None, VolumeIds=None, Filters=None, NextToken=None, MaxResults=None):\n pass",
"def get_gluster_on_compute_volume():\n # type: (None) -> str\n return _GLUSTER_ON_COMPUTE_VOLUME",
"def volume_info(mnode, volname):\n return RestClient(mnode).handle_request(\"GET\",\n \"/v1/volumes/%s\" % volname,\n httplib.OK, None)",
"def disk_smart_status(self, disk):\r\n disk = self._get_disk(disk)\r\n if disk is not None:\r\n return disk[\"overallstatus\"]",
"def get_volume_stats(self):\n self.conf.update_config_value()\n self._update_volume_stats()",
"def cluster_status():\n cluster_json = H2OConnection.get_json(\"Cloud?skip_ticks=true\")\n\n print(\"Version: {0}\".format(cluster_json['version']))\n print(\"Cloud name: {0}\".format(cluster_json['cloud_name']))\n print(\"Cloud size: {0}\".format(cluster_json['cloud_size']))\n if cluster_json['locked']: print(\"Cloud is locked\\n\")\n else: print(\"Accepting new members\\n\")\n if cluster_json['nodes'] == None or len(cluster_json['nodes']) == 0:\n print(\"No nodes found\")\n return\n\n status = []\n for node in cluster_json['nodes']:\n for k, v in zip(node.keys(),node.values()):\n if k in [\"h2o\", \"healthy\", \"last_ping\", \"num_cpus\", \"sys_load\", \n \"mem_value_size\", \"free_mem\", \"pojo_mem\", \"swap_mem\",\n \"free_disk\", \"max_disk\", \"pid\", \"num_keys\", \"tcps_active\",\n \"open_fds\", \"rpcs_active\"]: status.append(k+\": {0}\".format(v))\n print(', '.join(status))\n print()",
"def health(self) -> 'outputs.AmlFilesystemHealthResponse':\n return pulumi.get(self, \"health\")",
"def info(self):\n ret = libvirtmod.virStorageVolGetInfo(self._o)\n if ret is None: raise libvirtError ('virStorageVolGetInfo() failed', vol=self)\n return ret",
"def get_status(self) -> NodeManagerStatus:",
"def get_volume_stats(self, refresh=False): \n if refresh:\n self._stats = self._update_volume_stats()\n\n return self._stats",
"def get_volume_stats(self, refresh=False):\n if self._volume_stats is None or refresh:\n self._update_volume_stats()\n\n LOG.info(\n 'Successfully update volume stats. '\n 'backend: %(volume_backend_name)s, '\n 'vendor: %(vendor_name)s, '\n 'model_type: %(model_type)s, '\n 'system_id: %(system_id)s, '\n 'status: %(status)s, '\n 'driver_version: %(driver_version)s, '\n 'storage_protocol: %(storage_protocol)s.', self._volume_stats)\n\n return self._volume_stats",
"def status(self):\n\n # try to call status\n try:\n\n # check minikube status\n command = str('minikube status')\n subprocess.call(command.split())\n\n # except\n except:\n\n # print message\n print ('Minikube cluster is not responding')",
"def volume_clone_split_status(self, volume=None):\n return self.request( \"volume-clone-split-status\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'clone-split-details': [ CloneSplitDetailInfo, True ],\n } )",
"def resource_status(self) -> pulumi.Output['outputs.DiskResourceStatusResponse']:\n return pulumi.get(self, \"resource_status\")",
"def get_status():\n \n return db.get_db().getRoot().getS(ns.l2tpDeviceStatus, rdf.Type(ns.L2tpDeviceStatus))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get gluster volume brick status | def volume_brick_status(mnode, volname):
return RestClient(mnode).handle_request(
"GET", "/v1/volumes/%s/bricks" % volname,
httplib.OK, None) | [
"def volume_status(mnode, volname):\n return RestClient(mnode).handle_request(\n \"GET\", \"/v1/volumes/%s/status\" % volname,\n httplib.OK, None)",
"def get_bricks(volname):\n value = []\n cmd = [\"gluster\", \"volume\", \"info\", volname, \"--xml\"]\n info = execute(cmd)\n try:\n tree = etree.fromstring(info)\n volume_el = tree.find('volInfo/volumes/volume')\n for b in volume_el.findall('bricks/brick'):\n value.append(b.text)\n except ParseError:\n raise GlusterBadXmlFormat(\"Bad XML Format: %s\" % \" \".join(cmd))\n\n return value",
"def cmd(self):\n return '3dBrickStat'",
"def get_status(self):\n result = None\n try:\n r = requests.get(self.url_status)\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s' % (self.url_status, e))\n result = None\n return result",
"def get_status(self):\n r = requests.get(self.url_status)\n try:\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s' % (self.url_status, e))\n result = None\n return result",
"def volume_status(self, volume):\r\n volume = self._get_volume(volume)\r\n raid = self._get_raid(volume[\"devicefile\"])\r\n if volume is not None and raid is not None:\r\n return raid[\"state\"]",
"def cluster_status():\n cluster_json = H2OConnection.get_json(\"Cloud?skip_ticks=true\")\n\n print(\"Version: {0}\".format(cluster_json['version']))\n print(\"Cloud name: {0}\".format(cluster_json['cloud_name']))\n print(\"Cloud size: {0}\".format(cluster_json['cloud_size']))\n if cluster_json['locked']: print(\"Cloud is locked\\n\")\n else: print(\"Accepting new members\\n\")\n if cluster_json['nodes'] == None or len(cluster_json['nodes']) == 0:\n print(\"No nodes found\")\n return\n\n status = []\n for node in cluster_json['nodes']:\n for k, v in zip(node.keys(),node.values()):\n if k in [\"h2o\", \"healthy\", \"last_ping\", \"num_cpus\", \"sys_load\", \n \"mem_value_size\", \"free_mem\", \"pojo_mem\", \"swap_mem\",\n \"free_disk\", \"max_disk\", \"pid\", \"num_keys\", \"tcps_active\",\n \"open_fds\", \"rpcs_active\"]: status.append(k+\": {0}\".format(v))\n print(', '.join(status))\n print()",
"def volume_clone_split_status(self, volume=None):\n return self.request( \"volume-clone-split-status\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'clone-split-details': [ CloneSplitDetailInfo, True ],\n } )",
"def get_status(self):\n return self.client.get_asg_ready(self.env, self.name)",
"def start():\n\n brick_device = os.environ.get(\"BRICK_DEVICE\", None)\n brick_path = os.environ[\"BRICK_PATH\"]\n if brick_device is not None and brick_device != \"\":\n brickfs = os.environ.get(\"BRICK_FS\", \"xfs\")\n create_and_mount_brick(brick_device, brick_path, brickfs)\n\n volume_id = os.environ[\"VOLUME_ID\"]\n brick_path_name = brick_path.strip(\"/\").replace(\"/\", \"-\")\n volname = os.environ[\"VOLUME\"]\n nodename = os.environ[\"HOSTNAME\"]\n\n create_brickdir(brick_path)\n verify_brickdir_xattr_support(brick_path)\n set_volume_id_xattr(brick_path, volume_id)\n\n volfile_id = \"%s.%s.%s\" % (volname, nodename, brick_path_name)\n volfile_path = os.path.join(VOLFILES_DIR, \"%s.vol\" % volfile_id)\n generate_brick_volfile(volfile_path, volname)\n\n # UID is stored at the time of installation in configmap.\n uid = None\n with open(os.path.join(VOLINFO_DIR, \"uid\")) as uid_file:\n uid = uid_file.read()\n\n # Send Analytics Tracker\n # The information from this analytics is available for\n # developers to understand and build project in a better way\n send_analytics_tracker(\"server\", uid)\n\n test_counter = Gauge(\"test\", \"Test Counter\")\n\n os.execv(\n \"/usr/sbin/glusterfsd\",\n [\n \"/usr/sbin/glusterfsd\",\n \"-N\",\n \"--volfile-id\", volfile_id,\n \"-p\", \"/var/run/gluster/glusterfsd-%s.pid\" % brick_path_name,\n \"-S\", \"/var/run/gluster/brick.socket\",\n \"--brick-name\", brick_path,\n \"-l\", \"-\", # Log to stderr\n \"--xlator-option\",\n \"*-posix.glusterd-uuid=%s\" % os.environ[\"NODEID\"],\n \"--process-name\", \"brick\",\n \"--brick-port\", \"24007\",\n \"--xlator-option\",\n \"%s-server.listen-port=24007\" % volname,\n \"-f\", volfile_path\n ]\n )",
"def status(self):\n\n # try to call status\n try:\n\n # check minikube status\n command = str('minikube status')\n subprocess.call(command.split())\n\n # except\n except:\n\n # print message\n print ('Minikube cluster is not responding')",
"def disk_smart_status(self, disk):\r\n disk = self._get_disk(disk)\r\n if disk is not None:\r\n return disk[\"overallstatus\"]",
"def get_status(self) -> NodeManagerStatus:",
"def smb_service_status(mnode):\n g.log.info(\"Getting SMB Service status on %s\", mnode)\n return g.run(mnode, \"service smb status\")",
"def load_balance_status(self):\n return self._load_balance_status",
"def get_brightness(self):\n return int(requests.get(url+'/groups/{}'.format(self.group_number), verify=False).json()['action']['bri'])",
"def get_status (self):\n return self.__status",
"def get_gluster_on_compute_volume():\n # type: (None) -> str\n return _GLUSTER_ON_COMPUTE_VOLUME",
"def describe_volume_status(DryRun=None, VolumeIds=None, Filters=None, NextToken=None, MaxResults=None):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List the gluster volume | def volume_list(mnode):
return RestClient(mnode).handle_request(
"GET", "/v1/volumes", httplib.OK, None) | [
"def volume_list(search_opts=None):\r\n c_client = cinderclient()\r\n if c_client is None:\r\n return []\r\n # print c_client.volumes.list(search_opts=search_opts)\r\n return c_client.volumes.list(search_opts=search_opts)",
"def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print ( json.dumps({'data': lst}) )",
"def list_volumes( fields ):\n global conf\n\n volume_names = VOLUME_NAMES( conf )\n ret = []\n \n for name in volume_names:\n vol_conf = read_volume( name, fields )\n vol_conf['NAME'] = name\n ret.append( vol_conf )\n\n return ret",
"def list_vol(tag=None, device=None):\n conn = _ec2connect()\n vols = conn.get_all_volumes(filters=_get_filters(tag))\n if not vols:\n print('\\tNone.')\n return\n for v in vols:\n t = v.tags.get(TAG_NAME, 'root')\n s = v.attachment_state()\n z = v.size\n i = v.attach_data.instance_id\n d = v.attach_data.device\n print('\\t{0:25} {1:2}GB {2:15} {3:15} {4} {5}'.format(t, z, v.id, s, i, d ))",
"def get_volumes(self):\n\tapi = NaElement(\"volume-get-iter\")\n\txi = NaElement(\"desired-attributes\")\n\tapi.child_add(xi)\n\t## This specifies max number of volume records to pull from sdk api\n\t## Default is 20. 20000 is enough for most clusters\n\tapi.child_add_string(\"max-records\",self.MAX_VOLUMES)\n\txi1 = NaElement(\"volume-attributes\")\n\txi.child_add(xi1)\n\txi41 = NaElement(\"volume-id-attributes\")\n\txi41.child_add_string(\"instance-uuid\",\"<instance-uuid>\")\n\txi41.child_add_string(\"name\",\"<name>\")\n\txi41.child_add_string(\"owning-vserver-name\",\"<owning-vserver-name>\")\n\txi41.child_add_string(\"uuid\",\"<uuid>\")\n\txi1.child_add(xi41)\n\txo = self.s.invoke_elem(api)\n\tself.sd.incr(\"api.invoke\")\n\tf = xmltodict.parse(xo.sprintf())\n\tvolumes = f['results']['attributes-list']['volume-attributes']\n\tvol_list = []\n\tfor volume in volumes:\n\t vol_list.append({'cluster-name':self.CLUSTER_NAME,\n\t\t\t 'owning-vserver-name':volume['volume-id-attributes']['owning-vserver-name'],\n\t\t\t 'name':volume['volume-id-attributes']['name'],\n\t\t\t 'instance-uuid':volume['volume-id-attributes']['instance-uuid']\n\t\t\t })\n\treturn vol_list",
"def heketi_volume_list(\n heketi_client_node, heketi_server_url, raise_on_error=True, **kwargs):\n\n heketi_server_url, json_arg, admin_key, user = _set_heketi_global_flags(\n heketi_server_url, **kwargs)\n\n cmd = \"heketi-cli -s %s volume list %s %s %s\" % (\n heketi_server_url, json_arg, admin_key, user)\n cmd = TIMEOUT_PREFIX + cmd\n out = heketi_cmd_run(\n heketi_client_node, cmd, raise_on_error=raise_on_error)\n if json_arg and out:\n return json.loads(out)\n return out",
"def vol_list(server, virt=\"KVM\", pool_name=None):\n\n cmd = 'virsh -c %s vol-list %s 2>/dev/null | sed -e \"1,2 d\" -e \"$ d\"' \\\n % (virt2uri(virt), pool_name)\n ret, out = utils.run_remote(server, cmd)\n if ret != 0:\n return None\n\n return out",
"def disk_list(ip, vs_name):\n\n guest_cmd = 'cat /proc/partitions | awk \"/^ /{ print $4 } \" '\n rc, out = run_remote_guest(ip, vs_name, guest_cmd)\n\n if rc != 0:\n return None\n\n return out",
"def list_disks(self):\n return self.default_pool.listVolumes()",
"def do_showVolumes(self, filer):\n\t\tcommand = 'ssh -qn admin@%s vol show' % self.filer\n\t\tproc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n\t\tp_stdout = proc.communicate()[0]\n\t\tprint p_stdout",
"def ListVolumes(self) -> Dict[str, 'ebs.AWSVolume']:\n\n return self.aws_account.ListVolumes(\n filters=[{\n 'Name': 'attachment.instance-id',\n 'Values': [self.instance_id]}])",
"def list(connection):\n volumes = get_watched_volumes(connection)\n\n if not volumes:\n logger.info('No watched volumes found')\n return\n\n logger.info(\n '+-----------------------'\n '+----------------------'\n '+--------------'\n '+------------+')\n logger.info(\n '| {volume:<21} '\n '| {volume_name:<20.20} '\n '| {interval:<12} '\n '| {retention:<10} |'.format(\n volume='Volume ID',\n volume_name='Volume name',\n interval='Interval',\n retention='Retention'))\n logger.info(\n '+-----------------------'\n '+----------------------'\n '+--------------'\n '+------------+')\n\n for volume in volumes:\n if 'AutomatedEBSSnapshots' not in volume.tags:\n interval = 'Interval tag not found'\n elif volume.tags['AutomatedEBSSnapshots'] not in VALID_INTERVALS:\n interval = 'Invalid interval'\n else:\n interval = volume.tags['AutomatedEBSSnapshots']\n\n if 'AutomatedEBSSnapshotsRetention' not in volume.tags:\n retention = 0\n else:\n retention = volume.tags['AutomatedEBSSnapshotsRetention']\n\n # Get the volume name\n try:\n volume_name = volume.tags['Name']\n except KeyError:\n volume_name = ''\n\n logger.info(\n '| {volume_id:<14} '\n '| {volume_name:<20.20} '\n '| {interval:<12} '\n '| {retention:<10} |'.format(\n volume_id=volume.id,\n volume_name=volume_name,\n interval=interval,\n retention=retention))\n\n logger.info(\n '+-----------------------'\n '+----------------------'\n '+--------------'\n '+------------+')",
"def show_volume(svm_name) -> None:\n print()\n print(\"Getting Volume Details\")\n print(\"===================\")\n try:\n for volume in Volume.get_collection(\n **{\"svm.name\": svm_name}, fields=\"uuid\"):\n print(\n \"Volume name:-%s ; Volume uuid:-%s \" %\n (volume.name, volume.uuid))\n except NetAppRestError as error:\n print(\"Error:- \" % error.http_err_response.http_response.text)\n print(\"Exception caught :\" + str(error))",
"def heketi_volume_list_by_name_prefix(\n heketi_client_node, heketi_server_url, prefix, **kwargs):\n # Delete json key from kwargs\n kwargs.pop(\"json\", None)\n\n h_volumes = heketi_volume_list(\n heketi_client_node, heketi_server_url, **kwargs)\n\n vol_regex = re.compile(HEKETI_VOLUME % prefix)\n return vol_regex.findall(h_volumes.strip())",
"def get_gluster_on_compute_volume():\n # type: (None) -> str\n return _GLUSTER_ON_COMPUTE_VOLUME",
"def list_files_on_volume(self, volume):\n try:\n self.get_volume(volume)\n except DeploymentError as exc:\n raise exc\n\n res = self._dispatch(['run', '--rm', '-v', '{}:/data'.format(volume), 'busybox', 'ls', '/data'])\n assert len(res.stderr) == 0\n\n return res",
"def test_01_list_volumes(self):\n list_volume_response = Volume.list(\n self.apiclient,\n ids=[self.vm1_root_volume.id, self.vm2_root_volume.id, self.vm3_root_volume.id],\n type='ROOT',\n listAll=True\n )\n self.assertEqual(\n isinstance(list_volume_response, list),\n True,\n \"List Volume response was not a valid list\"\n )\n self.assertEqual(\n len(list_volume_response),\n 3,\n \"ListVolumes response expected 3 Volumes, received %s\" % len(list_volume_response)\n )",
"def volumes(self) -> List:\n if self.node is None:\n return []\n # Removing boot volume from the list\n volume_attachments = []\n for i in self.node[\"volume_attachments\"]:\n volume_detail = self.service.get_volume(i[\"volume\"][\"id\"])\n for vol in volume_detail.get_result()[\"volume_attachments\"]:\n if vol[\"type\"] == \"data\":\n volume_attachments.append(vol)\n return volume_attachments",
"def cli_cosmosdb_mongocluster_list(client,\r\n resource_group_name=None):\r\n\r\n if resource_group_name is None:\r\n return client.list()\r\n\r\n return client.list_by_resource_group(resource_group_name)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the option values for the given volume. | def get_volume_options(mnode, volname, option=None):
if not option:
_, get_vol_options, err = RestClient(mnode).handle_request(
"GET", "/v1/volumes/%s/options" % volname, httplib.OK, None)
else:
_, get_vol_options, err = RestClient(mnode).handle_request(
"GET", "/v1/volumes/%s/options/%s" % (volname, option),
httplib.OK, None)
if not err:
get_vol_options = json.loads(get_vol_options)
return get_vol_options
return None | [
"def volume_options_list_info(self, volume):\n return self.request( \"volume-options-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'options': [ VolumeOptionInfo, True ],\n } )",
"def volumes(self):\r\n volumes = self.properties[self.VOLUMES]\r\n\r\n return ((vol[self.VOLUME_ID],\r\n vol[self.VOLUME_DEVICE]) for vol in volumes)",
"def test_get_options_interval_movers_volume(self):\n pass",
"def getVolumeLists(infilename='volumelist.csv'):\n\n vialvolumelist = []\n with open(infilename, 'rb') as f:\n reader = csv.DictReader(f)\n for row in reader:\n vialvolumelist.append((row['Vial'], row['Volume']))\n return vialvolumelist",
"def volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrometheusSpecVolumesArgs']]]]:\n return pulumi.get(self, \"volumes\")",
"def volumes(self):\n volumes = self.properties[self.VOLUMES]\n\n return ((vol[self.VOLUME_ID],\n vol[self.VOLUME_DEVICE]) for vol in volumes)",
"def list_volumes( fields ):\n global conf\n\n volume_names = VOLUME_NAMES( conf )\n ret = []\n \n for name in volume_names:\n vol_conf = read_volume( name, fields )\n vol_conf['NAME'] = name\n ret.append( vol_conf )\n\n return ret",
"def get_volumes(self):\n\tapi = NaElement(\"volume-get-iter\")\n\txi = NaElement(\"desired-attributes\")\n\tapi.child_add(xi)\n\t## This specifies max number of volume records to pull from sdk api\n\t## Default is 20. 20000 is enough for most clusters\n\tapi.child_add_string(\"max-records\",self.MAX_VOLUMES)\n\txi1 = NaElement(\"volume-attributes\")\n\txi.child_add(xi1)\n\txi41 = NaElement(\"volume-id-attributes\")\n\txi41.child_add_string(\"instance-uuid\",\"<instance-uuid>\")\n\txi41.child_add_string(\"name\",\"<name>\")\n\txi41.child_add_string(\"owning-vserver-name\",\"<owning-vserver-name>\")\n\txi41.child_add_string(\"uuid\",\"<uuid>\")\n\txi1.child_add(xi41)\n\txo = self.s.invoke_elem(api)\n\tself.sd.incr(\"api.invoke\")\n\tf = xmltodict.parse(xo.sprintf())\n\tvolumes = f['results']['attributes-list']['volume-attributes']\n\tvol_list = []\n\tfor volume in volumes:\n\t vol_list.append({'cluster-name':self.CLUSTER_NAME,\n\t\t\t 'owning-vserver-name':volume['volume-id-attributes']['owning-vserver-name'],\n\t\t\t 'name':volume['volume-id-attributes']['name'],\n\t\t\t 'instance-uuid':volume['volume-id-attributes']['instance-uuid']\n\t\t\t })\n\treturn vol_list",
"def volume_list(search_opts=None):\r\n c_client = cinderclient()\r\n if c_client is None:\r\n return []\r\n # print c_client.volumes.list(search_opts=search_opts)\r\n return c_client.volumes.list(search_opts=search_opts)",
"def getValuesFromOption(opt, cmd):\n values = []\n pat = opt + r'\\s*([\\w\\_\\/\\\\]+)'\n m = re.search(pat, cmd)\n while m:\n # found\n val = m.group(1)\n values.append(val)\n # remove the option-value pair\n cmd = re.sub(opt + '\\s*' + val, '', cmd)\n m = re.search(pat, cmd)\n return values",
"def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print ( json.dumps({'data': lst}) )",
"def option_values(self, option_definition_container, option_name):\n values = []\n for pfile in self.files:\n (extension, option_values) = lookup_option_values(option_name, pfile, option_definition_container)\n if option_values is not None:\n for option_value in option_values:\n if option_value[0] == 0x0:\n values.append(ord(option_value[1]))\n elif option_value[0] == 0x02:\n offset = 1\n while ord(option_value[1][offset - 1]) > 0x7f:\n offset += 1\n values.append(str(option_value[1][offset:]))\n # Should only be one\n break\n\n # LABEL_REPEATED = 3\n #import ipdb; ipdb.set_trace()\n if (extension is not None) and (extension.label != 3):\n return values[0]\n return values",
"def get_voltage(self):\n result=self.asker('OD')\n if result[0] not in ('N', 'E'):\n header=0\n else:\n header=1\n if result[0]=='E':\n overload=True\n else:\n overload=False\n mode='V'\n if header==1:\n mode=result[3]\n result=result[4:]\n voltage=float(result)\n pt_idx=result.find('.')\n if result[-4:-2]=='-3': \n #V_range={'-33':2, '-34':3, '+02':4, '+03':5, '+04':6}[result[-4:-2]+str(result.find('.'))]\n if pt_idx==3:\n V_range=2 #10 mV\n else:\n V_range=3 #100 mV\n else:\n if pt_idx==2:\n V_range=4 #1 V \n elif pt_idx==3:\n V_range=5 #10 V\n else:\n V_range=6 #30 V\n return dict(voltage=voltage, header=header, overload=overload, mode=mode, V_range=V_range)",
"def volume(self):\n volume = []\n for dv in (self.red, self.green, self.blue, self.alpha):\n if dv.volume.dtype != np.uint8:\n vol = dv.volume.astype(\"float32\", copy=True)\n if dv.vmin is None:\n if vol.min() < 0:\n vol -= vol.min()\n else:\n vol -= dv.vmin\n\n if dv.vmax is None:\n if vol.max() > 1:\n vol /= vol.max()\n else:\n vol /= dv.vmax - dv.vmin\n\n vol = (np.clip(vol, 0, 1) * 255).astype(np.uint8)\n else:\n vol = dv.volume.copy()\n volume.append(vol)\n\n return np.array(volume).transpose([1, 2, 3, 4, 0])",
"def getVolumePrices(region):\n url = \"https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json\"\n req = urllib.request.Request(url)\n req.get_method = lambda: 'GET'\n resp = urllib.request.urlopen(req, context=ignoreCertificate())\n jResp = json.loads(resp.read().decode('utf-8'))\n dvolumes = {}\n for k, v in jResp['products'].items():\n if v['productFamily'] == 'Storage'\\\n and v['attributes']['location'] == aws_region[region]:\n if k in jResp['terms']['OnDemand']:\n price = jResp['terms']['OnDemand'][k][k+\".\"+price_code['ondemand']]['priceDimensions'][k+\".\"+price_code['ondemand']+\".6YS6EN2CT7\"]['pricePerUnit']['USD']\n try:\n vtype = v['attributes']['usagetype'].split(\".\")[1]\n except:\n vtype=\"standard\"\n dvolumes[vtype] = price\n return dvolumes",
"def gluster_volume_options(sdv, sdvkey):\n # type: (dict, str) -> str\n try:\n vo = sdv[sdvkey]['volume_options']\n if util.is_none_or_empty(vo):\n raise KeyError()\n except KeyError:\n vo = None\n return vo",
"def volume_space_list_info(self, volume=None):\n return self.request( \"volume-space-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'vol-space-infos': [ VolSpaceInfo, True ],\n } )",
"def volume_autosize_get(self, volume):\n return self.request( \"volume-autosize-get\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'increment-size': [ basestring, False ],\n 'minimum-size': [ basestring, False ],\n 'grow-threshold-percent': [ int, False ],\n 'maximum-size': [ basestring, False ],\n 'shrink-threshold-percent': [ int, False ],\n 'is-enabled': [ bool, False ],\n 'mode': [ basestring, False ],\n } )",
"def list(self,\n **kwargs\n ):\n\n # dont filter_name=None,\n # dont filter_value=None,\n # dryrun=False):\n\n #:param filter_name (string)\n #:param filter_value (string)\n #:param volume_ids (list): The volume IDs\n\n # filter = \"[[\n # {\n # 'Name': 'xyz',\n # 'Values': [\n # 'abc',\n # ]\n # },\n # ]\"\n\n # filter = eval(filter)\n\n #banner('print kwargs')\n #print(kwargs)\n #print(kwargs['output'])\n\n client = boto3.client('ec2')\n dryrun = kwargs['--dryrun']\n #region = kwargs['--region']\n #vm = kwargs['--vm']# will need vm id from mongo records\n result = client.describe_volumes(\n DryRun=dryrun,\n # Filters=[\n # {\n # 'Name': {},\n # 'Values': [\n # filter_value,\n # ]\n # },\n # ],\n )\n #banner(\"raw results\")\n #print(result)\n #banner(\"raw results end\")\n result = self.update_dict(result)\n\n #print(self.Print(result, kind='volume', output=kwargs['output']))\n\n return result"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if object is accessed by attributes or not. | def is_attribute_access(obj, fields):
ag = operator.attrgetter(*fields)
try:
ag(obj)
return True
except AttributeError:
return False | [
"def _has_private_attribute(self):\n return isinstance(self.attributes, dict) and any([k.startswith('__') for k in self.attributes.keys()])",
"def _check_object_attrs(self, obj: Any) -> bool:\n for a, v in self._named_attrs.items():\n if not hasattr(obj, a):\n return False\n\n if isinstance(v, type(self)):\n if v != getattr(obj, a):\n return False\n\n else:\n v = v if isinstance(v, Value) else Value(v)\n if not v.check_against(getattr(obj, a)):\n return False\n\n for v in self._unnamed_attrs:\n has_attr = False\n for a in dir(obj):\n if getattr(obj, a) == v:\n has_attr = True\n break\n\n if not has_attr:\n return False\n\n for e in self._elements:\n try:\n if e not in obj:\n return False\n except TypeError:\n return False\n\n return True",
"def hasAttribute(*args, **kwargs):\n \n pass",
"def has_attribute(self, atributo):\r\n return atributo in self.__atributos",
"def attribute_check(obj, attribute):\n\n check_node(obj)\n\n dep_node = get_depend_node(obj)\n dep_fn = maya.api.OpenMaya.MFnDependencyNode()\n dep_fn.setObject(dep_node)\n return dep_fn.hasAttribute(attribute)",
"def has_attributes(self):\n return bool(type_has_attributes(self))",
"def check(cls, obj):\n # An object is 'missing' if it has an attribute 'moya_missing' set to True\n return getattr(obj, \"moya_missing\", False)",
"def check_cls_for_attributes(cls, attrs):\n for attr in attrs:\n if not hasattr(cls, attr):\n raise TypeError(\n f'{cls} does not have the attribute \"{attr}\", '\n f'and thus does not expose the interface that is needed '\n f'to check for access.'\n )",
"def is_not_known_attribute(cls, attr):\r\n return attr not in cls.known_attributes",
"def testHasAttributes(self):\n self.assertTrue(hasattr(self.a, 'name'))\n self.assertTrue(hasattr(self.a, 'id'))\n self.assertTrue(hasattr(self.a, 'created_at'))\n self.assertTrue(hasattr(self.a, 'updated_at'))",
"def self(accessing_obj, accessed_obj, *args, **kwargs):\r\n return accessing_obj.typeclass == accessed_obj.typeclass",
"def is_attr_protected(attrname: str) -> bool:\n return (\n attrname[0] == \"_\"\n and attrname != \"_\"\n and not (attrname.startswith(\"__\") and attrname.endswith(\"__\"))\n )",
"def all_set(self):\n for a in self.LEGAL_ATTRS:\n try:\n getattr(self, a)\n except AttributeError:\n return False\n return True",
"def has_attributes(self):\n return bool(cursor_has_attributes(self))",
"def _is_mapping(obj):\n attrs = ('__getitem__', '__len__', '__iter__', 'keys', 'values', 'items')\n return all(hasattr(obj, attr) for attr in attrs)",
"def has_attr(self, user, key):\n return user in self._attributes and key in self._attributes[user]",
"def info_has_read_prop_access(cls, info, model, field_name=None, instance=None):\n\t\trpc_session = info.context.get('rpc_session')\n\t\tif rpc_session is None:\n\t\t\treturn True\n\t\tfield_name = field_name or info.field_name\n\t\treturn model.session_has_read_prop_access(rpc_session, field_name, instance=instance)",
"def test_has_attributes(self):\n self.assertTrue(hasattr(self.ajive, 'blocks'))\n self.assertTrue(hasattr(self.ajive, 'common'))\n self.assertTrue(hasattr(self.ajive.blocks['x'], 'joint'))\n self.assertTrue(hasattr(self.ajive.blocks['x'], 'individual'))\n self.assertTrue(hasattr(self.ajive.blocks['y'], 'joint'))\n self.assertTrue(hasattr(self.ajive.blocks['y'], 'individual'))",
"def __attributeExists(self, Visum, attributeName):\r\n\t\r\n for attr in Visum.Net.Zones.Attributes.GetAll:\r\n\t if str(attr.ID).upper() == attributeName.upper():\r\n\t\treturn True\r\n\treturn False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sends wav attachment to Microsoft Bing Speech Recognition Service. | def speechrec(wavobj):
# Decode base64 attachment
outobj = base64.b64decode(wavobj)
# Set connection parameters
authhost = 'api.cognitive.microsoft.com'
authpath = '/sts/v1.0/issueToken'
speechhost = 'speech.platform.bing.com'
speechpath = '/recognize?scenarios=smd&appid=D4D52672-91D7-4C74-8AD8-42B1D98141A5&locale=' + globalconfig('speech_language') + "&format=json&device.os=FreePBX&version=3.0&instanceid=" + str(uuid.uuid4()) + "&requestid=" + str(uuid.uuid4())
authheaders = {'Ocp-Apim-Subscription-Key' : globalconfig('ms_cognitive_api_key'),
'Content-Length' : '0'}
conn = httplib.HTTPSConnection(authhost)
# Get authentication token
conn.request(method="POST", url=authpath, headers=authheaders, body="")
response = conn.getresponse()
token = response.read()
# If we don't get a token then return
if int(response.status) != 200:
return "No transcription available - Auth Error: " + str(token)
# Setup for transcription
headerfields = {"Accept" : "application/json;text/xml",
"Content-Type" : 'audio/wav; codec="audio/pcm"; samplerate=8000; trustsourcerate=false'
}
try:
headerfields["Authorization"] = "Bearer " + token
except:
headerfields["Authorization"] = "Bearer " + token.decode('utf-8')
conn = httplib.HTTPSConnection(speechhost)
# Send wave file for transcription
conn.request(method="POST", url=speechpath, headers=headerfields, body=outobj)
resp = conn.getresponse()
# If there's a problem then return
if int(resp.status) != 200:
return "No transcription available - Server Error: " + resp.read()
respval = json.loads(resp.read())
return respval['header']['name'] | [
"def stt_google_wav(audio_fname):\n\n print (\"Sending \", audio_fname)\n #Convert to flac first\n with io.open(audio_fname, 'rb') as audio_file:\n content = audio_file.read()\n audio = types.RecognitionAudio(content=content)\n config = types.RecognitionConfig(encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=RATE, language_code=LANG_CODE)\n response = client.recognize(config, audio)\n return response",
"def speechrec(wavobj):\n from google.oauth2 import service_account\n from google.cloud import speech\n from google.cloud.speech import enums\n from google.cloud.speech import types\n import io\n import sys\n import subprocess\n \n # Decode base64 attachment\n outobj = base64.b64decode(wavobj)\n \n #write to temp file\n randid = id_generator()\n audiofilename = globalconfig('temp_dir') + '/' + randid + \".wav\"\n flacaudiofile = globalconfig('temp_dir') + '/' + randid + \".flac\"\n \n try:\n tempwav = open(audiofilename, \"w\")\n tempwav.write(outobj)\n tempwav.close()\n except (Exception) as e:\n sys.exit(\"Error writing to : \" + audiofilename)\n # return \"\"\n \n \n # convert to flac\n retcode = subprocess.call([\"sox\",audiofilename,\"--channels=1\",\"--bits=16\",\"--rate=41000\",flacaudiofile,\"trim\",\"0\",\"55\"])\n\n creds = service_account.Credentials.from_service_account_file(globalconfig('gcloud_service_json'))\n client = speech.SpeechClient(credentials=creds)\n\n with io.open(flacaudiofile, 'rb') as audio_file:\n content = audio_file.read()\n \n audio = types.RecognitionAudio(content=content)\n config = types.RecognitionConfig(\n encoding=enums.RecognitionConfig.AudioEncoding.FLAC,\n sample_rate_hertz=41000,\n language_code='en-US')\n \n response = client.recognize(config, audio)\n\n outstr = \"\"\n transcriptstr = \"\"\n total_confidence = []\n if response:\n for result in response.results:\n for alternative in result.alternatives:\n total_confidence.append(alternative.confidence)\n # outstr += \"Confidence (this is still beta): \" + str(\"{:.2%}\".format(alternative.confidence)) + \"\\n\"\n transcriptstr += alternative.transcript + \" \"\n \n avg_confidence = sum(total_confidence) / len(total_confidence)\n outstr += \"Confidence (this is still beta): \" + str(\"{:.2%}\".format(avg_confidence)) + \"\\n\"\n outstr += transcriptstr\n \n # cleanup files\n dir_name = globalconfig('temp_dir') + '/'\n findFiles = os.listdir(dir_name)\n for item in findFiles:\n if item.endswith(\".wav\") or item.endswith(\".flac\") or item.endswith(\".mp3\"):\n os.remove(os.path.join(dir_name, item))\n \n # os.remove(audiofilename)\n # os.remove(flacaudiofile)\n \n return outstr",
"def get_wav_data():\n # Get requested word and query db for audio url\n word = request.args.get(\"word\")\n audio_url = services.query.get_audio_url(word)\n\n # Fetch audio file from MW API\n # and send to client\n data = requests.get(audio_url).content\n return send_file(BytesIO(data), mimetype='audio/wav')",
"def test_audio_convert_to_wav(self):\n pass",
"def speechrec_openaiwhisper(wavobj):\n import openai\n openai.api_key = OPENAI_API_KEY\n\n \n import io\n import sys\n import subprocess\n \n # Decode base64 attachment\n outobj = base64.b64decode(wavobj)\n \n #write to temp file\n randid = id_generator()\n audiofilename = globalconfig('temp_dir') + '/' + randid + \".wav\"\n mp3audiofile = globalconfig('temp_dir') + '/' + randid + \".mp3\"\n \n # tempwav = open(audiofilename, \"w\")\n # tempwav.write(outobj)\n # tempwav.close()\n \n try:\n tempwav = open(audiofilename, \"wb\")\n tempwav.write(outobj)\n tempwav.close()\n except (Exception) as e:\n sys.exit(\"Error writing to : \" + audiofilename)\n # return \"\"\n \n \n # convert to mp3\n retcode = subprocess.call([\"lame\",\"-b\",\"32\",\"--resample\",\"8\",\"-a\",audiofilename,mp3audiofile])\n\n audio_file = open(mp3audiofile, \"rb\")\n\n transcript = openai.Audio.translate(\"whisper-1\", audio_file)\n\n\n outstr = \"\"\n transcriptstr = transcript.text\n \n # outstr += \"Confidence (this is still beta): \" + str(\"{:.2%}\".format(avg_confidence)) + \"\\n\"\n outstr += transcriptstr\n \n # cleanup files\n dir_name = globalconfig('temp_dir') + '/'\n findFiles = os.listdir(dir_name)\n for item in findFiles:\n if item.endswith(\".wav\") or item.endswith(\".flac\") or item.endswith(\".mp3\"):\n os.remove(os.path.join(dir_name, item))\n \n # os.remove(audiofilename)\n # os.remove(flacaudiofile)\n \n return outstr",
"def listen_for_speech(threshold=THRESHOLD):\n #Open stream\n p = pyaudio.PyAudio()\n\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n print \"* Listening mic. \"\n audio2send = []\n cur_data = '' # current chunk of audio data\n rel = RATE/CHUNK\n slid_win = deque(maxlen=SILENCE_LIMIT * rel)\n #Prepend audio from 0.5 seconds before noise was detected\n prev_audio = deque(maxlen=PREV_AUDIO * rel)\n started = False\n response = []\n\n while True:\n cur_data = stream.read(CHUNK)\n slid_win.append(math.sqrt(abs(audioop.avg(cur_data, 4))))\n #print slid_win[-1]\n if(sum([x > THRESHOLD for x in slid_win]) > 0):\n if(not started):\n print \"Starting record of phrase\"\n started = True\n audio2send.append(cur_data)\n elif (started is True):\n print \"Finished\"\n # The limit was reached, finish capture and deliver.\n filename = save_speech(list(prev_audio) + audio2send, p)\n # Send file to Google and get response\n r = stt_google_wav(filename)\n\n # Reset all\n started = False\n slid_win = deque(maxlen=SILENCE_LIMIT * rel)\n prev_audio = deque(maxlen=0.5 * rel)\n audio2send = []\n print \"Listening ...\"\n else:\n prev_audio.append(cur_data)\n\n print \"* Done recording\"\n stream.close()\n p.terminate()\n\n return response",
"def send_wave_data(dev):\n f = open(\"wave1.bin\", \"rb\") # wave1.bin is the waveform to be sent\n data = f.read()\n print(\"data: \", data[0:10])\n print('write bytes:', len(data))\n dev.write_binary_values('C1:WVDT M50,WVNM,wave1,TYPE,5,LENGTH,32KB,FREQ,0.1,AMPL,5.0,OFST,0.0,PHASE,0.0,WAVEDATA,', data, datatype='B', header_fmt='empty') # SDG00100 series\n dev.write(\"C1:ARWV NAME,wave1\")\n f.close()",
"def record_voice():\n fs = 44100 # Sample rate\n seconds = 3 # Duration of recording\n # sd.default.device = \"Built-in Audio\" # Speakers full name here\n\n print(\"Say something:\")\n myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)\n sd.wait() # Wait until recording is finished\n write(\"speech_emotion_recognition/recordings/myvoice.wav\", fs, myrecording)\n print(\"Voice recording saved.\")",
"def wiimote_Load16bitMonoSampleWAV(*args):\n return _wiimote.wiimote_Load16bitMonoSampleWAV(*args)",
"def convert_wav(\n self,\n wav_bytes: bytes,\n sample_rate: typing.Optional[int] = None,\n sample_width: typing.Optional[int] = None,\n channels: typing.Optional[int] = None,\n ) -> bytes:\n if sample_rate is None:\n sample_rate = self.sample_rate\n\n if sample_width is None:\n sample_width = self.sample_width\n\n if channels is None:\n channels = self.channels\n\n return subprocess.run(\n [\n \"sox\",\n \"-t\",\n \"wav\",\n \"-\",\n \"-r\",\n str(sample_rate),\n \"-e\",\n \"signed-integer\",\n \"-b\",\n str(sample_width * 8),\n \"-c\",\n str(channels),\n \"-t\",\n \"raw\",\n \"-\",\n ],\n check=True,\n stdout=subprocess.PIPE,\n input=wav_bytes,\n ).stdout",
"def run(self):\n play_obj = self.wav.play()\n play_obj.wait_done()",
"def save_speech(self, data):\n\n\t\tfilename = 'audio'\n\t\t# writes data to WAV file\n\t\tdata = ''.join(data)\n\t\twf = wave.open(filename + '.wav', 'wb')\n\t\twf.setnchannels(self.CHANNELS)\n\t\twf.setsampwidth(self.WIDTH)\n\t\twf.setframerate(self.RATE) # TODO make this value a function parameter?\n\t\twf.writeframes(data)\n\t\twf.close()\n\t\treturn filename + '.wav'",
"def Load16bitMonoSampleWAV(*args):\n return _wiimote.wiimote_Load16bitMonoSampleWAV(*args)",
"def listen_for_speech(self , num_phrases = -1):\n\n\t\t#Open stream\n\t\tself.s = connection.Connection()\n\t\t\n\t\tsilence_thread = Thread(target=self.silence_analysys)\n\t\tsilence_thread.start()\n\n\t\tp = pyaudio.PyAudio()\n\n\t\tstream = p.open(format = self.FORMAT,\n\t\t\t\t\t\tchannels = self.CHANNELS,\n\t\t\t\t\t\trate = self.RATE,\n\t\t\t\t\t\tinput = True,\n\t\t\t\t\t\tframes_per_buffer = self.CHUNK,\n\t\t\t\t\t\tstream_callback = self.callback)\n\t\ttry:\n\n\t\t\tstream.start_stream()\n\n\t\t\twhile stream.is_active():\n\t\t\t\ttime.sleep(0.1)\n\t\t\t\n\t\texcept KeyboardInterrupt:\n\t\t\tself.log.debug(\"INTERRUPTED BY USER: Finished\")\n\t\texcept Exception as e:\n\t\t\tself.log.debug('ERROR: ' + str(e))\n\t\tfinally:\n\t\t\tself.audioQueue.put(None)\n\t\t\tself.s.destroy()\n\t\t\tself.log.info(\"* Done recording\")\n\n\t\t\tstream.stop_stream()\n\t\t\tstream.close()\n\t\t\tp.terminate()",
"def handle_audio(self, chat_id, file_id):\n\n self.bot.getFile(file_id)\n cur_dir = os.curdir\n for format in self.formats:\n path = os.path.join(cur_dir, \"audio\", str(chat_id) + \".\" + format)\n self.bot.download_file(file_id, path)\n\n self.bot.sendMessage(chat_id, \"Ok. Now send me extension into which you want to convert this audio.\")",
"def convert_audio(self, chat_id, extension):\n\n path = \"audio/\" + str(chat_id) + \".\" + extension\n self.bot.sendAudio(chat_id, open(path, \"rb\"))",
"def upload_wave_file(self, file_name):\n\n files = {'file': open(file_name, 'rb')}\n requests.post(self.address + '/api/waves', files=files)",
"def transcribe_wav(in_fname):\n tmp_fname1 = get_unique_fname('../tmp/extended', '.wav')\n tmp_fname2 = get_unique_fname('../tmp/transcribe', '.log')\n\n # prepend some silence (first bit of speech might else be treated as noise)\n subprocess.check_call(['praat', '--run', '../misc/prepend_silence.praat',\n in_fname, tmp_fname1])\n\n # run pocketsphinx (printing to log so only transcript is written to stdout)\n comp_proc = subprocess.run(\n ['pocketsphinx_continuous',\n '-infile', tmp_fname1, '-logfn', tmp_fname2],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)\n\n remove(tmp_fname1)\n remove(tmp_fname2)\n\n return comp_proc.stdout.decode(\"utf-8\").replace('\\n', '').replace('\\r', '')",
"def audio_file(self, path):\n logger.debug(\"serving file: %s\" % path)\n return send_from_directory(self.AUDIO_DIR_NAME, path)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks to see if HTML tags exist in the message and changes content type | def examinehtml(textobject, contenttype):
if not globalconfig('check_html'):
return contenttype
if (('<html>' in textobject) and ('</html>' in textobject)):
return 'text/html'
else:
return 'text/plain' | [
"def is_html_message(self):\n return self._is_html_message",
"def is_html(text):\n if text is not None and '<html' in text[:300].lower():\n return True\n return False",
"def is_html_like(text):\n if isinstance(text, str):\n text = text.strip()\n if text.startswith(\"<\"):\n return True\n return False\n return False",
"def isHTML(content):\n soup = BeautifulSoup(content, 'html.parser')\n return bool(soup.find())",
"def is_html(self):\n return self._is_html",
"def test_is_html_tag_properly(self):\r\n file=\"HTMLDOC.txt\"\r\n html_doc=p.read_file(file)\r\n result=p.is_html_tag_properly(html_doc)\r\n self.assertTrue(result,True)",
"def test_base_content_type_translates_markdown_to_html(self):\n\n a_post = Post(markdown=\"*test*\")\n self.assertEqual(a_post.body(), \"<p><em>test</em></p>\\n\")",
"def and_has_html(self, html: str):\n pass",
"def is_html_message(self, is_html_message):\n allowed_values = [\"Y\", \"N\"]\n if is_html_message not in allowed_values:\n raise ValueError(\n \"Invalid value for `is_html_message`, must be one of {0}\"\n .format(allowed_values)\n )\n self._is_html_message = is_html_message",
"def _good_response(self, resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)",
"def is_body_html(self) -> bool:\n return self._is_body_html",
"def inHTML(text, index, body):\n # if there is a < then lxml will interpret that as a tag, so only search for the stuff before it\n text = text.split(b\"<\")[0]\n paths = pathsToText([(fromstring(body), \"\")], text.decode(\"utf-8\"), found=[])\n try:\n path = paths[index]\n return \"script\" not in path\n except IndexError:\n return False",
"def set_content(self, html, text):\n # For the curious, some email clients are too daft to pay attention to\n # content-type, and instead display the last alternative they\n # understand. I.e. if 'Content-Type: text/plain' is the last MIME added\n # to a 'Content-Type: multipart/related', those shitty apps (gmail is\n # one...) will display the FUCKING PLAIN TEXT.\n # NOTE :: A metric fuck-ton of time was waisted figuring the above out.\n # NOTE :: Learn from my pain. <brian@poold.in>\n # NOTE :: http://www.violato.net/blog/others/110-gmail-email-formatting-issue-with-multipartalternative-mime-entry\n if not text:\n from pooldlib.exceptions import GoWorkForBalmerError\n msg = \"You were warned!\"\n raise GoWorkForBalmerError(msg)\n text = MIMEText(text.encode('utf8'), _subtype='plain', _charset='utf-8')\n self.msg_alternative.attach(text)\n\n html = MIMEText(html.encode('utf8'), _subtype='html', _charset='utf-8')\n self.msg_alternative.attach(html)",
"def is_html(self):\r\n return self.base_class is HTMLTranslator",
"def _parse_html_error(content):\n\n msg = None\n if not content:\n return msg\n\n from xml.dom.minidom import Document, parse\n dom = parse(cStringIO.StringIO(content))\n msg = \"\"\n\n paragraphs = []\n if not isinstance(dom, Document):\n # Assume the output was the message.\n msg = content\n else:\n paragraphs = dom.getElementsByTagName(\"p\")\n\n # XXX this is specific to the depot server's current\n # error output style.\n for p in paragraphs:\n for c in p.childNodes:\n if c.nodeType == c.TEXT_NODE:\n value = c.nodeValue\n if value is not None:\n msg += (\"\\n{0}\".format(value))\n\n return msg",
"def ensure_correct_content_type(self, brain):\n\n if self.has_expected_content_type(brain):\n return\n\n mail = brain.getObject()\n message = getattr(mail, 'message', None)\n if not message:\n return\n\n # can't be paranoid enough sometimes ...\n if not INamedBlobFile.providedBy(message):\n return\n\n if not getattr(message, 'filename', None):\n return\n\n content_type = get_contenttype(filename=message.filename)\n if not content_type:\n return\n\n message.contentType = content_type\n # we're actually interested in updating metadata only, but since we\n # can't just chose a cheap index to achieve that.\n mail.reindexObject(idxs=['id'])",
"def _html_or_none(self, request, template, context={}):\r\n if (http.accepts_html(request.META.get('HTTP_ACCEPT'))):\r\n return render_to_string(template, context)\r\n return None",
"def messageInHTML(aMessageTitle, aMessage):\n return \"\"\"<html>\n <head>\n <meta http-equiv=\"content-type\" content=\"text/html; charset=UTF-8\">\n <title>WebFilter</title>\n </head>\n <body> <h2>%s</h2><br>%s</body>\n </html>\n \"\"\" %(aMessageTitle, aMessage)",
"def HTMLChecker(match_obj):\n \n if AllowedHTML.search(match_obj.group()):\n return match_obj.group()\n else:\n return \"\"",
"def detect_message_text_formatting(message: Message) -> Optional[str]:\n\n raw_text: str = message.text\n\n before_escape_md = raw_text.count('\\\\')\n before_escape_html = raw_text.count('&')\n\n escaped_md = escape_md(raw_text).count('\\\\') - before_escape_md\n escaped_html = quote_html(raw_text).count('&') - before_escape_html\n\n with dont_change_plain_urls, dont_escape_md:\n with_entities = message.md_text\n\n escaped_with_entities = escape_md(with_entities).count('\\\\') - before_escape_md\n\n if escaped_with_entities > max(escaped_html, escaped_md):\n parse_mode = None\n elif escaped_html > escaped_md:\n parse_mode = 'html'\n else:\n parse_mode = 'markdown'\n\n return parse_mode"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts wav attachments to MP3. Requires FFMPEG | def vmtomp3(filename, wavobj):
outobj = base64.b64decode(wavobj)
if not globalconfig('vm_to_mp3'):
return [filename, 'audio/x-wav', outobj]
randfilename = str(random.randint(1,10000000000)) + filename
newrandfilename = randfilename[:-3] + "mp3"
try:
tempwav = open(globalconfig('temp_dir') + '/' + randfilename, "w")
tempwav.write(outobj)
tempwav.close()
except (Exception) as e:
return [filename, 'audio/x-wav', outobj]
ffcommand = [globalconfig('ffmpeg_location'), '-loglevel', 'error', '-i', globalconfig('temp_dir') + '/' + randfilename, '-ab', '16k', globalconfig('temp_dir') + '/' + newrandfilename]
child = Popen(ffcommand, stdout=PIPE, stderr=PIPE)
stdout, stderr = child.communicate()
rc = child.returncode
if rc == 0:
mp3handle = open(globalconfig('temp_dir') + '/' + newrandfilename, 'r')
mp3dump = mp3handle.read()
try:
os.remove(globalconfig('temp_dir') + '/' + randfilename)
except:
pass
try:
os.remove(globalconfig('temp_dir') + '/' + newrandfilename)
except:
pass
return [filename[:-3] + 'mp3', 'audio/x-mpeg-3', mp3dump]
else:
return [filename, 'audio/x-wav', outobj] | [
"def wav2mp3(wavfile, mp3file, bitrate=128):\n cmd = \"sox -c 1 %s -C %d %s\" % (wavfile, bitrate, mp3file)\n subprocess.call(cmd.split(\" \"))",
"def convert_mp3(self, filename, to_mp3=True):\r\n fs = FluidSynth()\r\n title = filename.split('.')[0]\r\n audio_filename = f'{title}.mp3' if to_mp3 else f'{title}.wav'\r\n # saves file to disk\r\n fs.midi_to_audio(filename, audio_filename)",
"def test_audio_convert_to_mp3(self):\n pass",
"def test_mp3_to_wav(src_dest):\n mp3_to_wav(src_dest[0], src_dest[1])\n\n # the following call with raise an exception\n # if the file being read is not encoded as wav\n pydub.AudioSegment.from_wav(src_dest[1])",
"def mp3_to_wav(self, pathname):\n if self.folder_method == 'folder':\n label_list = open(self.out_folder + '/' + 'labels.txt', 'w')\n my_path_cwd = self.ext_storage\n labels = os.listdir(my_path_cwd)\n pbar = tqdm(range(self.total_conv))\n for upl in pbar:\n rand_lab = random.choice(labels)\n path = my_path_cwd + '/' + rand_lab\n file = random.choice(os.listdir(path))\n filetype = file[-4:]\n filename = file[:-4]\n pbar.set_description(filename)\n if filetype == '.mp3':\n mp3_form = AudioSegment.from_mp3(path + '/' + file)\n full_clip = self.seconds * 1000 # Runs in miliseconds\n if full_clip > len(mp3_form):\n continue\n rand_start = np.random.randint(0, len(mp3_form) - full_clip)\n mp3_wav = mp3_form[rand_start:(rand_start + full_clip)]\n destin = self.out_folder + '/' + filename + '.wav'\n if Path(destin).is_file():\n filename, destin = self.assign_new_name(destin)\n mp3_wav.export(destin, format='wav')\n label_list.write('%s : %s\\n' % (filename, rand_lab))\n elif filetype == '.m4a':\n m4a_form = AudioSegment.from_file(path + '/' + file)\n full_clip = self.seconds * 1000 # Runs in miliseconds\n if full_clip > len(m4a_form):\n continue\n rand_start = np.random.randint(0, len(m4a_form) - full_clip)\n m4a_wav = m4a_form[rand_start:(rand_start + full_clip)]\n destin = self.out_folder + '/' + filename + '.wav'\n if Path(destin).is_file():\n filename, destin = self.assign_new_name(destin)\n m4a_wav.export(destin, format='wav')\n label_list.write('%s : %s\\n' % (filename, rand_lab))\n elif filetype == '.wav':\n wav_form = AudioSegment.from_wav(path + '/' + file)\n full_clip = self.seconds * 1000 # Runs in miliseconds\n if full_clip > len(wav_form):\n continue\n rand_start = np.random.randint(0, len(wav_form) - full_clip)\n wav_wav = wav_form[rand_start:(rand_start + full_clip)]\n destin = self.out_folder + '/' + filename + '.wav'\n if Path(destin).is_file():\n filename, destin = self.assign_new_name(destin)\n wav_wav.export(destin, format='wav')\n label_list.write('%s : %s\\n' % (filename, rand_lab))\n else:\n if file[-5:] == '.flac':\n filetype = file[-5:]\n filename = file[:-5]\n data, samprate = sf.read(path + '/' + file)\n # set value due to different sample sizes\n set_value = self.seconds * samprate\n if set_value > len(data):\n continue\n rand_start = np.random.randint(0, len(data) - set_value)\n new_data = data[rand_start:int(rand_start + set_value)]\n destin = self.out_folder + '/' + filename + '.wav'\n if Path(destin).is_file():\n filename, destin = self.assign_new_name(destin)\n sf.write(destin, new_data, samprate)\n label_list.write('%s : %s\\n' % (filename, rand_lab))\n return",
"def strip_audio(targetAud,pathVid):\n import os\n import moviepy.editor as mp\n if os.path.isdir(targetAud) is False:\n print(\"file doesn`t exist so creating export file\")\n os.mkdir(targetAud)\n else:\n pass\n direts = os.listdir(f\"{pathVid}\")\n for tst in direts:\n video_file = f\"{pathVid}/{tst}\"\n print(video_file)\n\n video = mp.VideoFileClip(video_file)\n audio = video.audio\n\n #change format to ffmpeg of your choice\n\n audio.write_audiofile(f\"{targetAud}/{tst[:-4]}.mp3\")",
"def wav_to_mp3_batch(dir_in,\n dir_out=\"../audio/mp3_chunked\",\n bitrate=96\n ):\n\n existing = set()\n bitrate = str(bitrate)\n \n for mp3_fpath in glob(dir_out + \"/*.mp3\"):\n f_id = os.path.splitext(os.path.basename(mp3_fpath))[0]\n existing.add(f_id)\n \n for wav_fpath in glob(dir_in + \"/*.wav\"):\n f_id = os.path.splitext(os.path.basename(wav_fpath))[0]\n if f_id not in existing:\n command = \"lame -b{} {}/{}.wav {}/{}.mp3\".format(bitrate, \n dir_in, \n f_id, \n dir_out, \n f_id)\n result = os.system(command) \n if result != 0:\n print(\"*** ERROR: {} not converted\".format(fb_id))",
"def _convert_midi_to_mp3(self, midi_fp):\n\n if not os.path.exists(MP3_DIR):\n os.mkdir(MP3_DIR)\n\n mp3_fp = os.path.join(MP3_DIR, f'{uuid1().hex}.mp3')\n\n command = f\"timidity {midi_fp} -Ow -o - | ffmpeg -y -f wav -i - {mp3_fp}\"\n print(f\"Running command: {command}\")\n\n subprocess.call(command, shell=True)\n\n print(f\"Done converting file\")\n\n return mp3_fp",
"def handle_audio(self, chat_id, file_id):\n\n self.bot.getFile(file_id)\n cur_dir = os.curdir\n for format in self.formats:\n path = os.path.join(cur_dir, \"audio\", str(chat_id) + \".\" + format)\n self.bot.download_file(file_id, path)\n\n self.bot.sendMessage(chat_id, \"Ok. Now send me extension into which you want to convert this audio.\")",
"def test_audio_convert_to_wav(self):\n pass",
"def extract_audio(input_vid, audio_params):\n cmd = f'{FFMPEG} -i {join(os.getcwd(),input_vid)} -vn {audio_params} {join(os.getcwd(),\".temp\",\"audio.mkv\")}'\n Popen(cmd, shell=True).wait()",
"def _encode_mp3(self, wav_filename):\n encode_mp3(\n wav_filename, self.mp3_filename, self.track_metadata,\n stdout_filename=self.stdout_filename)\n\n # check for clipping\n stdout = self.__read_stdout()\n if \"WARNING: clipping occurs at the current gain.\" in stdout:\n clipping_occurs = True\n m = re.search(\n r\"encode\\s+again\\s+using\\s+\\-\\-scale\\s+(\\d+\\.\\d+)\", stdout)\n scale = float(m.group(1)) if m else 0.99\n\n # re-encode, scaling the PCM data, until there is no clipping\n while clipping_occurs:\n self.__log.info(\n \"detected clipping in %s; re-encoding at %.2f scale...\",\n self.mp3_filename, scale)\n status = (\n self.track_index, self.cdda_filename, self.flac_filename,\n self.stdout_filename, TRACK_REENCODING_MP3(scale))\n _ENCODING_QUEUE.put((5, status))\n\n encode_mp3(\n wav_filename, self.mp3_filename, self.track_metadata,\n scale=scale, stdout_filename=self.stdout_filename)\n\n clipping_occurs = (\n \"WARNING: clipping occurs at the current gain.\"\n in self.__read_stdout())\n scale -= 0.01",
"def convert_to_wav(csv_file, target_dir):\n wav_dir = os.path.join(target_dir, 'wav/')\n txt_dir = os.path.join(target_dir, 'txt/')\n os.makedirs(wav_dir, exist_ok=True)\n os.makedirs(txt_dir, exist_ok=True)\n path_to_data = os.path.dirname(csv_file)\n\n def process(x):\n global media_path\n file_path, text = x\n file_name = os.path.splitext(os.path.basename(file_path))[0]\n act_path_to_data = os.path.join(path_to_data, media_path)\n\n text = text.strip().upper()\n with open(os.path.join(txt_dir, file_name + '.txt'), 'w') as f:\n f.write(text)\n cmd = \"sox {} -r {} -b 16 -c 1 {}\".format(\n os.path.join(act_path_to_data, file_path),\n args.sample_rate,\n os.path.join(wav_dir, file_name + '.wav'))\n subprocess.call([cmd], shell=True)\n\n print('Converting mp3 to wav for {}.'.format(csv_file))\n _, fext = os.path.splitext(csv_file)\n with open(csv_file) as csvfile:\n reader = None\n if fext.endswith('tsv'):\n reader = csv.DictReader(csvfile, delimiter='\\t')\n else:\n reader = csv.DictReader(csvfile)\n #i = reader.fieldnames\n #print(i)\n data = [(row['path'], row['sentence']) for row in reader]\n with ThreadPool(10) as pool:\n pool.map(process, data)",
"def video2audio(path, wav_output):\n my_video = mp.VideoFileClip(path)\n my_video.audio.write_audiofile(wav_output) # output mp4 to wav file\n return my_video",
"def simple_transformer(mp3path, savedirectory='./data/interim/features/',\n filename='output',\n transforms=['stft', 'wave', 'logmel', 'mfcc', 'chroma',\n 'cqt'],\n sample_rate=32000, seconds=30, offset=0.0):\n\n if isinstance(transforms, str): transforms = [transforms]\n\n # load librosa file\n waveform, _ = librosa.load(mp3path, sr=sample_rate, duration=seconds,\n offset=offset)\n\n # add transforms here\n for output in transforms:\n if output == \"wave\":\n dir_path = os.path.join(savedirectory, output)\n if not os.path.exists(dir_path): os.makedirs(dir_path)\n\n wave = torch.Tensor(waveform)\n output_path = os.path.join(dir_path, f'{filename}.pt')\n torch.save(wave, output_path)\n\n elif output == \"stft\":\n dir_path = os.path.join(savedirectory, output)\n if not os.path.exists(dir_path): os.makedirs(dir_path)\n\n spec = librosa.stft(waveform)\n spec_db = librosa.amplitude_to_db(abs(spec))\n spec_db = torch.Tensor(spec_db)\n output_path = os.path.join(dir_path, f'{filename}.pt')\n torch.save(spec_db, output_path)\n\n elif output == \"logmel\":\n dir_path = os.path.join(savedirectory, output)\n if not os.path.exists(dir_path): os.makedirs(dir_path)\n\n mel = librosa.feature.melspectrogram(y=waveform, sr=sample_rate)\n mel = mel.astype(np.float16)\n logmel = np.log(10000 * mel + 1)\n logmel_db = librosa.amplitude_to_db(abs(logmel))\n logmel_db = torch.Tensor(logmel_db)\n output_path = os.path.join(dir_path, f'{filename}.pt')\n torch.save(logmel_db, output_path)\n\n elif output == \"chroma\":\n dir_path = os.path.join(savedirectory, output)\n if not os.path.exists(dir_path): os.makedirs(dir_path)\n\n harmonic,_ = librosa.effects.hpss(waveform)\n chroma = librosa.feature.chroma_cqt(y=harmonic, sr=sample_rate,\n bins_per_octave=36) # chroma_stft???\n form = torch.Tensor(chroma)\n output_path = os.path.join(dir_path, f'{filename}.pt')\n torch.save(form, output_path)\n\n elif output == \"mfcc\":\n dir_path = os.path.join(savedirectory, output)\n if not os.path.exists(dir_path): os.makedirs(dir_path)\n\n mfccs = librosa.feature.mfcc(waveform, sr=sample_rate)\n mfccs = sklearn.preprocessing.scale(mfccs, axis=1)\n mfcc_tensor = torch.Tensor(mfccs)\n\n output_path = os.path.join(dir_path, f'{filename}.pt')\n torch.save(mfcc_tensor, output_path)\n\n elif output == \"cqt\":\n dir_path = os.path.join(savedirectory, output)\n if not os.path.exists(dir_path): os.makedirs(dir_path)\n\n c = librosa.cqt(y=waveform, sr=sample_rate, bins_per_octave=36)\n c_db = librosa.amplitude_to_db(abs(c))\n c_db = torch.Tensor(c_db)\n output_path = os.path.join(dir_path, f'{filename}.pt')\n torch.save(c_db, output_path)\n\n else:\n raise ValueError(\"Enter a valid transform\")\n\n return True",
"def transcode_to_mp3(filepath, quality='320k', slice_start=None, slice_duration=None):\r\n\r\n err_output = None\r\n cmd_path = spawn.find_executable('ffmpeg')\r\n if cmd_path is None:\r\n cmd_path = spawn.find_executable('avconv')\r\n if cmd_path is None:\r\n raise IOError('Neither ffmpeg nor avconv was found in your PATH')\r\n cmd = [cmd_path, '-i', filepath]\r\n\r\n if slice_duration is not None:\r\n cmd.extend(['-t', str(slice_duration)])\r\n if slice_start is not None:\r\n cmd.extend(['-ss', str(slice_start)])\r\n\r\n if isinstance(quality, int):\r\n cmd.extend(['-q:a', str(quality)])\r\n elif isinstance(quality, basestring):\r\n cmd.extend(['-b:a', quality])\r\n else:\r\n raise ValueError(\"quality must be int or string, but received %r\" % quality)\r\n\r\n cmd.extend(['-f', 's16le', # don't output id3 headers\r\n '-c', 'libmp3lame',\r\n 'pipe:1'])\r\n\r\n log.debug('running transcode command %r', cmd)\r\n\r\n try:\r\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,\r\n stderr=subprocess.PIPE)\r\n\r\n audio_out, err_output = proc.communicate()\r\n\r\n if proc.returncode != 0:\r\n err_output = (\"(return code: %r)\\n\" % proc.returncode) + err_output\r\n raise IOError # handle errors in except\r\n\r\n except (OSError, IOError) as e:\r\n\r\n err_msg = \"transcoding command (%s) failed: %s. \" % (' '.join(cmd), e)\r\n\r\n if 'No such file or directory' in str(e):\r\n err_msg += '\\navconv must be installed and in the system path.'\r\n\r\n if err_output is not None:\r\n err_msg += \"\\nstderr: '%s'\" % err_output\r\n\r\n log.exception('transcoding failure:\\n%s', err_msg)\r\n\r\n raise IOError(err_msg)\r\n\r\n else:\r\n return audio_out",
"def mixAuVid(pathAud,pathVid,pathTarget):\n import os\n import moviepy.editor as mp\n if os.path.isdir(pathTarget) is False:\n print(\"target file doesn`t exist so creating export file\")\n os.mkdir(pathTarget)\n else:\n pass\n diretvid = os.listdir(pathVid)\n diretsound = os.listdir(pathAud)\n for video in diretvid:\n clip = mp.VideoFileClip(f\"{pathVid}/{video[:-4]}.mp4\")\n print(video)\n audioclip = mp.AudioFileClip(f\"{pathAud}/{video[:-4]}.mp3\")\n finalclip = clip.set_audio(audioclip)\n finalclip.write_videofile(f\"{pathTarget}/{video[:-4]}.mp4\")",
"def convert_video_audio(title, video_filename):\n audio_filename = 'audios/{title}.mp3'.format(title=title)\n call([\n 'ffmpeg',\n '-i',\n video_filename,\n '-b:a',\n '192k',\n '-vn',\n audio_filename\n ])\n return audio_filename",
"def audio(self):\n audio_path = ffmpeg_extract(input_path=self.path(), output_ext='.wav')\n return Audio(audio_path)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get number from string of format {currency}{value}{multiplicator} | def parse_param_string(parameter_string: str) -> Number:
parameter_string = parameter_string.replace('\xa0', ' ')
match = re.search(
r'(?:US\$)' # currency
r'(?P<value>[+−-]?\d+(?:\.\d+)?)' # value, minus sign can be written as − or -
r'(?P<multiplicator>\s[a-zA-Z]*)?', # billion or million
parameter_string)
if not match:
raise ValueError('Parameter is not money amount')
value, multiplicator = match.groups(default='')
value = value.replace('−', '-') # minus sign can be written as − or -
multiplicator = multiplicator[1:] # eliminating required space char
if multiplicator not in multiplicators:
raise ValueError('Bad multiplicator')
return float(value) * multiplicators[multiplicator] | [
"def string_to_number(self,s):\n str_number = self.regex_decimal.search(s).group(0)\n str_number = str_number.replace(',', '.')\n nb = float(str_number)\n regex_mapper = re.compile('|'.join(['(' + k + ')' for k in self.mapping_number.keys()]))\n match = regex_mapper.search(s)\n if match is not None:\n return self.mapping_number[match.group(0)] * nb\n else:\n return nb",
"def parse_money(string):\n return float(string[1:])",
"def get_number_and_unit(value_string):\n if value_string.endswith('%'):\n return (float(value_string.strip()[:-1]), '%')\n try:\n a = ureg.parse_expression(value_string)\n except:\n return (value_string, None)\n\n if type(a) == list:\n return (value_string, None)\n\n if isinstance(a, (int, float)):\n return (a, None)\n else:\n # a.ito_base_units()\n value = a.magnitude\n unit = \"{:~}\".format(a)\n unit = unit[unit.index(\" \"):].replace(\" \", \"\")\n\n if unit == 'min':\n unit = 's'\n value *= 60\n return (value, unit)",
"def asNumeral(value):",
"def redshift_str2num(z: str):\n z = z.strip('z').replace('p', '.')\n return round(float(z), 3)",
"def base_to_dec(string, base, digits):\n if string == '':\n return 0\n return digits.index(string[0]) + base_to_dec(string[1:], base, digits) * base",
"def get_number(data_string):\n if len(data_string) == 0:\n return 0\n elif \"(\" in data_string:\n n, *_ = data_string.split(\"(\")\n number = re.sub(r\"\\D\", \"\", n)\n return int(number) if len(number) > 0 else 0\n else:\n number = re.sub(r\"\\D\", \"\", data_string)\n return int(number) if len(number) > 0 else 0",
"def decode_number(num):\n r = re.match('^([0-9.]+)[ ]+([kM]?)(Wh|W)?$', num)\n if r:\n factor = UNITPREFIX.get(r.group(2), 1)\n return float(r.group(1))*factor\n else:\n raise Exception('Cannot parse number [%s]' % num)",
"def convert_to_base(string):\n stripped = string.strip()\n try:\n return int(stripped)\n except ValueError:\n if stripped[-1] in ('K', 'M', 'G', 'T'):\n return int(int(stripped[:-1]) * conv_factor[stripped[-1]])\n else:\n ValueError(f\"Invalid unit {stripped[-1]}\")",
"def _to_number(cls, string):\n num = ast.literal_eval(string)\n if isinstance(num, (int, float)):\n return num\n return string",
"def extract_number(s):\n ns = re.findall(r'\\d+', s)\n if len(ns) == 0:\n return 0\n else:\n return int(ns[0])",
"def convert_money(val: str) -> int:\n if not val:\n return 0\n\n clean_up = MONEY_re.sub('', val)\n if not clean_up.isdigit():\n raise DataValidationFailed(f'Wrong money pattern: {val}')\n \n return int(clean_up)",
"def extract_number_from_str(input_string):\n return int(re.findall(r'\\d+', str(input_string))[-1])",
"def number(string):\n try:\n return int(string)\n except (ValueError, OverflowError):\n # Unclear on why sometimes it's overflow vs value error, but this should work.\n return long(string)",
"def amount_get(self, string, amount=True, rest=False):\n num_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n if amount and not rest:\n num_index = [i for i in range(len(string)) if string[i] in num_list]\n num = [string[i] for i in num_index]\n map(lambda x: str(x), num)\n num = \"\".join(num)\n return float(num)\n else:\n rest_index = [i for i in range(len(string)) if string[i] not in num_list]\n rest_string = \"\".join([string[i] for i in rest_index])\n return rest_string[1:] # bo pierwszy znak to spacja",
"def num(s):\n\ttry:\n\t\treturn int(s)\n\texcept:\n\t\treturn float(s)",
"def str_to_num(value):\r\n if isinstance(value, numbers.Number):\r\n return value\r\n try:\r\n return int(value)\r\n except ValueError:\r\n return float(value)",
"def _number(s):\n try:\n n = int(s)\n return n\n except ValueError:\n pass\n try:\n n = float(s)\n return n\n except ValueError:\n raise GlifLibError(\"Could not convert %s to an int or float.\" % s)",
"def convert(rates, value, from_string, to_string):\n rate = conversion_control_structure(rates, from_string, to_string)\n if rate is None:\n pass\n else:\n return round((rate * value), 2)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get company name from summary table. | def scrape_company_name(html: str) -> str:
page = bs.BeautifulSoup(html, 'html.parser')
summary_table = page.find('table', # find one summary table
class_='infobox vcard')
if not summary_table:
raise ValueError('No summary table found')
try:
caption = summary_table.find('caption').text
except AttributeError:
raise ValueError('Caption of summary table not found')
if not caption:
raise ValueError('Caption of summary table not found')
return caption | [
"def get_company_name(self):\r\n return self.company_name",
"def get_company_name(url_str):\n scraper = get_scraper(url_str)\n\n h1 = scraper.find_all(\"h1\", {\"id\": \"cn-cont\"})[0]\n return h1.contents[0].strip()",
"def get_CompanyName(TargetID):\n pass",
"def get_company_summary(parsed_html):\n\n keys = []\n values = []\n for dt, dd in zip(parsed_html.find_all(\"dt\"), parsed_html.find_all(\"dd\")):\n # Take out first token (number followed by a period)\n key = \" \".join(dt.text.strip().split()[1:])\n value = dd.text.strip()\n if \"Title of Each Class\\n\" in value:\n stock_inventory_df = parse_stock_inventory(value)\n keys += stock_inventory_df.iloc[:, 0].values.tolist()\n values += stock_inventory_df.iloc[:, 1].values.tolist()\n else:\n keys.append(key)\n values.append(value)\n\n company_summary_df = pd.DataFrame()\n company_summary_df[\"key\"] = keys\n company_summary_df[\"value\"] = values\n return company_summary_df",
"def test_get_company_name(self):\n\n symbol = 'AAPL'\n name = stock_info.get_company_name(symbol)\n self.assertEqual(name, 'Apple Inc.')",
"def get_company_info():\n return _get(\"info\")",
"def cohort_name_in_header(self):\n return self._cohort_name(self.q(css=self._bounded_selector(\".group-header-title .title-value\")).text[0])",
"def find_name(self, domain):\n # get company information from a valid domain\n self.browser.get(f\"https://{domain}\")\n title = self.browser.title if self.browser.title != None else \"No Company Info\"\n\n return title",
"def indeed_company(bsoup):\n company = []\n for div in bsoup.find_all(name=\"div\", attrs={\"class\": \"row\"}):\n try:\n company.append(div.find(\"span\", attrs={\"class\": \"company\"}).text)\n except:\n company.append(\"Nothing_found\")\n return company",
"def get_client_name(self):\n client_name_mask = self._fuzzy_match_series(self.closeout_df['item'], 'name', errors=1)\n name = self.closeout_df[client_name_mask]['item'].values[0]\n name = name.replace('name', '').strip()\n return name",
"def scrape_company_info(driver, site, company_name):\n source = get_page_source(driver, site, company_name)\n\n soup = BeautifulSoup(source, \"html.parser\")\n\n company_curr = Company(soup)\n\n name = company_curr.get_name()\n desc = company_curr.get_desc()\n location = company_curr.get_location()\n size = company_curr.get_size()\n url = company_curr.get_url()\n domain = company_curr.get_domains()\n \n return name, desc, location, size, url, domain",
"def _fetch_tw_name(self):\n country_tw = self.env.ref('base.tw')\n if not self.is_company:\n return\n if self.name:\n return\n if not self.vat:\n return\n if self.country_id != country_tw:\n return\n url = 'https://data.gcis.nat.gov.tw/od/data/api/9D17AE0D-09B5-4732-A8F4-81ADED04B679?$format=json&$filter=Business_Accounting_NO eq %s&$skip=0&$top=1' % self.vat\n try:\n response = requests.get(url)\n res_data = response.json()\n except Exception as e:\n raise UserError(e)\n name = res_data[0].get('Company_Name', '')\n self.name = name",
"def get_company_by_name(c, name):\n c.execute(\"SELECT * FROM companies WHERE name=:name\", {'name': name})\n obj = c.fetchone()\n return obj",
"def get_contact_name(course_code):\n\n ans = DatabaseConnector.get_values(\"SELECT contact_name, course_name FROM course WHERE \"\n \"course.course_code = \\\"\" + course_code + \"\\\";\")\n contact_name = ans[0][0]\n name = ans[0][1]\n return \"The name of the contact person in \" + course_code + \" \" + name + \" is \" + contact_name",
"def FirstName(self):\n return self.all_common_names[0]",
"def display_name_field(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name_field\")",
"def output_company(company):\n return {\n \"Name\": company.name,\n \"Address\": company.address\n }",
"def get_company_info(name, session):\n escaped_name = urllib.parse.quote_plus(name)\n\n response = session.get(('https://www.linkedin.com'\n '/voyager/api/organization/companies?'\n 'q=universalName&universalName=' + escaped_name))\n\n if response.status_code == 404:\n print(\"[!] Could not find that company name. Please double-check LinkedIn and try again.\")\n sys.exit()\n\n if response.status_code != 200:\n print(\"[!] Unexpected HTTP response code when trying to get the company info:\")\n print(f\" {response.status_code}\")\n sys.exit()\n\n # Some geo regions are being fed a 'lite' version of LinkedIn mobile:\n # https://bit.ly/2vGcft0\n # The following bit is a temporary fix until I can figure out a\n # low-maintenance solution that is inclusive of these areas.\n if 'mwlite' in response.text:\n print(\"[!] You are being served the 'lite' version of\"\n \" LinkedIn (https://bit.ly/2vGcft0) that is not yet supported\"\n \" by this tool. Please try again using a VPN exiting from USA,\"\n \" EU, or Australia.\")\n print(\" A permanent fix is being researched. Sorry about that!\")\n sys.exit()\n\n try:\n response_json = json.loads(response.text)\n except json.decoder.JSONDecodeError:\n print(\"[!] Yikes! Could not decode JSON when getting company info! :(\")\n print(\"Here's the first 200 characters of the HTTP reply which may help in debugging:\\n\\n\")\n print(response.text[:200])\n sys.exit()\n\n company = response_json[\"elements\"][0]\n\n found_name = company.get('name', \"NOT FOUND\")\n found_desc = company.get('tagline', \"NOT FOUND\")\n found_staff = company['staffCount']\n found_website = company.get('companyPageUrl', \"NOT FOUND\")\n\n # We need the numerical id to search for employee info. This one requires some finessing\n # as it is a portion of a string inside the key.\n # Example: \"urn:li:company:1111111111\" - we need that 1111111111\n found_id = company['trackingInfo']['objectUrn'].split(':')[-1]\n\n print(\" Name: \" + found_name)\n print(\" ID: \" + found_id)\n print(\" Desc: \" + found_desc)\n print(\" Staff: \" + str(found_staff))\n print(\" URL: \" + found_website)\n print(f\"\\n[*] Hopefully that's the right {name}! If not, check LinkedIn and try again.\\n\")\n\n return (found_id, found_staff)",
"def get_author_fullname(self):\n\n try:\n # Find the author too.\n userlookup = self.transaction[0][\"authorPHID\"]\n who = dict(self.phab.phid.query(\n phids=[userlookup]))[userlookup][\"fullName\"]\n\n self.logger.debug('get_author_fullname: %s' % who)\n return who\n\n # If the object exists, no worries, let's just return a good response.\n except http.client.HTTPException:\n self.logger.info('get_author_fullname is None')\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert parameters to other currency. | def convert_currency(params: Dict[str, Number], exchange_rate: Dict[str, Number]) -> Dict[str, Number]:
converted_params = {name: round(value * exchange_rate['exchange_rate'], 2)
for name, value in params.items()}
return converted_params | [
"def convert(self, params):\r\n if self.rates is None:\r\n self.load_rates()\r\n\r\n try:\r\n if params['in_currency'] == 'USD':\r\n result = params['amount'] * self.rates[params['out_currency']]\r\n elif params['out_currency'] == 'USD':\r\n result = params['amount'] / self.rates[params['in_currency']]\r\n else:\r\n result = self.convert(\r\n {'amount': params['amount'] / self.rates[params['in_currency']],\r\n 'in_currency': 'USD',\r\n 'out_currency': params['out_currency']})\r\n except KeyError as error:\r\n self.vprint('Given currency is not supported by OER method and is skipped:'\r\n f' {error.args[0]}')\r\n raise ConversionError(type='unsupported')\r\n\r\n return result",
"def convert(self, params):\r\n self.params = {\r\n 'Amount': params['amount'],\r\n 'From': params['in_currency'],\r\n 'To': params['out_currency']\r\n }\r\n\r\n try:\r\n response = self.get_response()\r\n except Exception:\r\n raise ConversionError(type='xe_error')\r\n\r\n if self.check_response(response) is not False:\r\n return response['converted']\r\n else:\r\n self.vprint('Given input and/or output currency is not supported '\r\n 'by XE method and is skipped: '\r\n '{} and/or {}'\r\n .format(params['in_currency'], params['out_currency']))\r\n raise ConversionError(type='unsupported')",
"def convert(self,amount):\n result = {}\n i = 0\n for in_cur in self.input_currency:\n i +=1\n if len(self.input_currency) == 1:\n n = ''\n else:\n n = str(i)+'.'\n values = {}\n for out_cur in self.output_currency:\n if out_cur == 'all_currencies':\n values = cr().get_rates(in_cur)\n for k,v in values.items():\n values.update({k:round(v*amount,2)})\n # faster than one by one cr().convert()\n else:\n value = round(cr().convert(in_cur,out_cur,amount),2)\n values.update({out_cur:value})\n result[\"{0}input\".format(n)] = {\"amount\":amount,\"currency\":in_cur}\n result[\"{0}output\".format(n)] = values\n return(result)",
"def convert():\n amt = amount.get().strip()\n source_curr = cb_source.get().strip()\n target_curr = cb_target.get().strip()\n\n if len(amt) == 0:\n amt = \"1.00\"\n elif float(amt) == 0:\n amt = \"1.00\"\n\n amt = str(float(amt)) # removing leading zeroes\n amount.set(amt)\n\n if source_curr not in currency_codes or target_curr not in currency_codes:\n result.set(\"Select Both Currencies!\")\n label_result['fg'] = 'red'\n label_result['font'] = FONT\n button_convert.flash()\n return\n elif source_curr == target_curr:\n result.set(\"Same currencies selected!\")\n label_result['fg'] = 'red'\n label_result['font'] = FONT\n else:\n val = cc.convert(source_curr, target_curr, amt)\n result.set(f\"{amt} {source_curr} = {val} {target_curr}\")\n label_result['fg'] = 'darkblue'\n label_result['font'] = FONT + (\"bold\",)",
"def convert(self, amount, input_cur, output_cur=None):\n self.in_amount = amount\n self.in_currency = input_cur\n self.out_currency = output_cur\n if not all([self.in_amount, self.in_currency]):\n prine('Arguments [amount] and [input_cur] are both required to convert!')\n return None\n\n prinf('converting in=%s, out=%s', self.in_currency, self.out_currency)\n self.convert_to_ccode()\n prinf('converted in=%s, out=%s', self.in_code, self.out_code)\n\n if self.in_code == self.out_code:\n self.out_amount = self.in_amount\n else:\n rates = self.get_rates_from_sql(self.in_code, self.out_code)\n\n prinf('rate = %s', rates)\n if type(rates) is not dict:\n self.out_amount = round(self.in_amount * rates, self.out_digits)\n self.print_json()\n else:\n self.out_amount = None\n rates = {key: round(self.in_amount * value, self.out_digits) for key, value in rates.items()}\n self.print_json(rates_dict=rates)",
"def convert(amount, from_currency, to_currency):\n\tfrom_currency = from_currency.upper()\n\tto_currency = to_currency.upper()\n\tlink = \"https://open.er-api.com/v6/latest/USD\"\n\tfile_name = \"./.files/USD.json\"\n\t\n\ttry:\n\t\tjson_data = requests.get(link).json()\n\t\trates_data = json_data[\"rates\"]\n\texcept Exception:\n\t\twith open(file_name, \"r\") as jsf:\n\t\t\tdata = json.load(jsf)\n\t\t\trates_data = data[\"rates\"]\n\telse:\n\t\tif(is_data_outdataed(json_data)):\n\t\t\twrite_data_to_file(json_data)\n\t\t\twrite_last_update(json_data)\n\n\tif(from_currency == \"USD\"):\n\t\tres = amount * rates_data[to_currency]\n\telif (to_currency == \"USD\"):\n\t\tres = amount / rates_data[from_currency]\n\telse:\n\t\tres_1 = convert(amount, from_currency, \"USD\")\n\t\tres = convert(res_1, \"USD\", to_currency)\n\treturn round(res, 4)",
"def convert(self, amount):\n return self.compute(\n request.nereid_website.company.currency.id,\n amount,\n request.nereid_currency.id)",
"def convert(self, amount:int, base_currency:str, foreign_currency:str):\r\n currency_url = \"https://api.exchangeratesapi.io/latest?base=\" + base_currency\r\n result = None\r\n try:\r\n result = urllib.request.urlopen(currency_url)\r\n json_text = result.read().decode(encoding = \"utf-8\")\r\n\r\n converter = json.loads(json_text)\r\n return amount * converter[\"rates\"][foreign_currency]\r\n except:\r\n pass\r\n\r\n finally:\r\n if result != None:\r\n result.close()",
"def convert(rates, value, from_string, to_string):\n rate = conversion_control_structure(rates, from_string, to_string)\n if rate is None:\n pass\n else:\n return round((rate * value), 2)",
"def yenToDollars(yen):\n # complete the function ",
"def _map_non_formatted_money_to_version_with_currency(cost, resource, token):\n return '$%.3f' % cost",
"def convert(self, conversions):\r\n w = self.copy()\r\n if conversions is None:\r\n return w\r\n assert isinstance(conversions, list)\r\n for from_asset, to_asset, rate in conversions:\r\n if from_asset in w:\r\n if to_asset not in w:\r\n w[to_asset] = Decimal()\r\n w[to_asset] += w[from_asset] * rate\r\n del w[from_asset]\r\n return w",
"def convert_to(self, date: Date, amount: Currency) -> Currency:\n rate = self.rate_at_date(date)\n return amount * rate",
"def convert_to_ccode(self):\n self.in_code = self.csc.get_currency_code(self.in_currency)\n if self.in_code is None:\n # suggest the nearest textually?\n sys.exit(2)\n\n if self.out_currency:\n self.out_code = self.csc.get_currency_code(self.out_currency)\n if self.out_code is None:\n # suggest the nearest textually?\n sys.exit(3)",
"def get_currency_to(self):\n return str(self.gui.cmb_currency_to.currentText())",
"def test_convert_amount(self):\r\n\r\n init = 'USD'\r\n new_currency = 'USD'\r\n amount = 1\r\n curr = CurrencyRates()\r\n curr_conversion = curr.convert(init, new_currency, amount)\r\n self.assertNotEqual(curr_conversion, 2)\r\n self.assertEqual(curr_conversion, 1)",
"def exchange(src, dst, amt):\r\n assert iscurrency(src)\r\n assert iscurrency(dst)\r\n assert isinstance(float(amt), float) or isinstance(int(amt), int)\r\n assert not isinstance(amt, bool)\r\n\r\n # query the server\r\n x = introcs.urlread(\r\n 'https://ecpyfac.ecornell.com/python/currency/fixed?src=' +\r\n src + '&dst=' + dst + '&amt=' + str(amt) + '&key=' + APIKEY)\r\n\r\n y = get_dst(x)\r\n z = before_space(y)\r\n\r\n return float(z)",
"def base_currency(self) -> str:",
"def eurosToDollars(euros):\n # perform a calculation\n\n return dollars"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
See if the client wants json by inspecting the mimetypes | def wants_json(mimetypes):
best = mimetypes.best_match(['application/json', 'text/html'])
return best == 'application/json' | [
"def _json_request(self):\n return 'json' in self._get_content_type()",
"def test_media_types(sut: SystemUnderTest, uri, response):\n if (uri != '/redfish/v1/$metadata' and response.request.method != 'HEAD'\n and response.status_code in [requests.codes.OK,\n requests.codes.CREATED]):\n # Test Assertion.PROTO_JSON_ALL_RESOURCES\n result, msg = response_content_type_is_json(uri, response)\n sut.log(result, response.request.method, response.status_code, uri,\n Assertion.PROTO_JSON_ALL_RESOURCES, msg)\n\n # Test Assertion.PROTO_JSON_RFC\n result, msg = response_is_json(uri, response)\n sut.log(result, response.request.method, response.status_code, uri,\n Assertion.PROTO_JSON_RFC, msg)\n\n # Test Assertion.PROTO_JSON_ACCEPTED\n if response.request.body:\n if response.status_code in [requests.codes.OK, requests.codes.CREATED,\n requests.codes.NOT_ACCEPTABLE,\n requests.codes.UNSUPPORTED_MEDIA_TYPE]:\n result, msg = response_is_json(uri, response)\n sut.log(result, response.request.method, response.status_code,\n uri, Assertion.PROTO_JSON_ACCEPTED, msg)",
"def test_json_media(self):\n media = JSON_MEDIA()\n self.assertEqual(media.mime_type, \"text/json\")\n self.assertEqual(media.file_extension, \".json\")",
"def json(self):\n assert_have_json()\n if self.mimetype.endswith(('+json', '/json')):\n return jsonmod.loads(six.text_type(self.data, self.charset))",
"def _utf8_encoded_json(request):\r\n content_type = request.META.get('CONTENT_TYPE', '')\r\n parts = content_type.split(';')\r\n if (len(parts) != 2 or\r\n parts[0].strip().lower() != 'application/json' or\r\n parts[1].strip().lower() != 'charset=utf-8'):\r\n return False\r\n return True",
"def ifReplaceable():\n if headerFile.HEADER.get(\"content-type\") == None:\n headerFile.HEADER[\"content-type\"] = \"application/json\"\n else:\n notif.warning(\"priority json\")",
"def test_file_types(self):\n obj = self._request(\"test.html\")\n info = obj.info()\n t = info[\"Content-type\"]\n self.assertEqual(t, \"text/html\", \"Content type must be text/html\")\n obj = self._request(\"test.js\")\n info = obj.info()\n t = info[\"Content-type\"]\n self.assertEqual(t, \"application/javascript\",\n \"Content type must be application/javascript\")\n obj = self._request(\"test.css\")\n info = obj.info()\n t = info[\"Content-type\"]\n self.assertEqual(t, \"text/css\", \"Content type must be text/css\")\n obj = self._request(\"test.png\")\n info = obj.info()\n t = info[\"Content-type\"]\n self.assertEqual(t, \"image/png\", \"Content type must be image/png\")\n obj = self._request(\"test.jpg\")\n info = obj.info()\n t = info[\"Content-type\"]\n self.assertEqual(t, \"image/jpeg\", \"Content type must be image/jpeg\")",
"def check_post_content_type(self):\n content_type = self.request.headers.get(\"Content-Type\")\n if not content_type:\n # not specified, e.g. from a script\n return True\n\n # parse content type for application/json\n fields = content_type.lower().split(\";\")\n if not any(f.lstrip().startswith(\"application/json\") for f in fields):\n self.log.warning(f\"Not allowing POST with content-type: {content_type}\")\n return False\n\n return True",
"def negotiate_serializer(self, *args, **kwargs):\n serializers = getattr(self, \"SERIALIZERS\",\n current_app.config[\"TOYBOX_SERIALIZERS\"])\n\n if len(serializers) > 0:\n mime_type = request.accept_mimetypes.best_match(serializers.keys())\n if mime_type is None:\n raise werkzeug.exceptions.NotAcceptable()\n return mime_type, serializers[mime_type]\n else:\n raise werkzeug.exceptions.InternalServerError()",
"def is_json(response):\n\n for left, right in [(response.getcode(), 200),\n (response.info().getmaintype(), \"application\"),\n (response.info().getsubtype(), \"json\")]:\n if left != right:\n return False\n\n return True",
"def _has_json_file_ext(self):\n fstr = self.basename.lower()\n return (\n fstr.endswith(\".json\")\n or fstr.endswith(\".jsonl\")\n or fstr.endswith(\".json.gz\")\n or fstr.endswith(\".jsonl.gz\")\n )",
"def test_json_only(self):\n\n input_path = os.path.join(os.path.dirname(__file__),\n \"test_data\",\n \"things_kinds\")\n\n json_paths = list(i_walk_json_paths(input_path))\n self.assertEqual(2, len(json_paths))\n for path in json_paths:\n self.assertIn('.json', path)",
"def accepts_only_json_request(f):\n @functools.wraps(f)\n def decorated_function(*args, **kwargs):\n if not request.is_json:\n return status_406()\n return f(*args, **kwargs)\n return decorated_function",
"def valid_content_types() -> List[str]:",
"def accepts(request, media_type):\n accept = parse_accept_header(request.META.get(\"HTTP_ACCEPT\", \"\"))\n return media_type in [t for (t, p, q) in accept]",
"def accept_json(request):\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n json.loads(request.data.decode())\n if request.json:\n return func(*args, **kwargs)\n except Exception:\n resp = make_response(json.dumps({'error': 'Invalid JSON'}), 422)\n resp.mimetype = 'application/json'\n return resp\n resp = make_response(json.dumps({'error': 'Unsupported media type'}), 415)\n resp.mimetype = 'application/json'\n return resp\n return wrapper\n return decorator",
"def test_accept(self):\r\n # A request without an Accept header should return JSON.\r\n headers = None\r\n response = self.app.get('/api/person/1', headers=headers)\r\n assert 200 == response.status_code\r\n assert 'Content-Type' in response.headers\r\n assert 'application/json' == response.headers['Content-Type']\r\n assert 1 == loads(response.data)['id']\r\n headers = dict(Accept='application/json')\r\n response = self.app.get('/api/person/1', headers=headers)\r\n assert 200 == response.status_code\r\n assert 'Content-Type' in response.headers\r\n assert 'application/json' == response.headers['Content-Type']\r\n assert 1 == loads(response.data)['id']\r\n # Check for accepting XML.\r\n # headers = dict(Accept='application/xml')\r\n # response = self.app.get('/api/person/1', headers=headers)\r\n # assert 200 == response.status_code\r\n # assert 'Content-Type' in response.headers\r\n # assert 'application/xml' == response.headers['Content-Type']\r\n # assert '<id>1</id>' in response.data\r",
"def isJson(f):\n return len(f) > 5 and f[-5:] == \".json\"",
"def validate_json(request):\n if not request.is_json:\n print(\"Warning! Bad content-type '{}' in payload\".format(request.content_type))\n raise UnsupportedMediaType\n try:\n json_payload = request.get_json()\n return json_payload\n except Exception as e:\n bad_request_error = BadRequest()\n bad_request_error.description = '{}'.format(e)\n raise bad_request_error"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Discover BIGIP in Device / ADC | def ltm_discover(self, config, devid):
iq = self.config['bigiq']
ip = config['bigip']
username = config['ip_username']
password = config['ip_password']
iq_username = config['iq_username']
iq_password = config['iq_password']
self.logger.info("Discover BIGIP {0} in Device".format(ip))
uri= 'https://' + iq + '/mgmt/cm/global/tasks/device-discovery'
link = 'https://localhost/mgmt/cm/system/machineid-resolver/{0}'.format(devid)
device_json = {'deviceReference': {"link": link}, 'moduleList': [{'module': 'adc_core'}], "status":"STARTED"}
result=0
response = requests.post(uri, data=str(device_json), auth=(iq_username, iq_password), verify=False)
json_str = response.json()
uri=json_str['selfLink'].replace('localhost', iq)
i=0
while True:
response = requests.get(uri, auth=(config['iq_username'], config['iq_password']), verify=False)
json_str = response.json()
if json_str['status'] == 'FINISHED':
result=1
break
elif json_str['status'] == 'FAILED':
result=0
break
else:
time.sleep(1)
i+=1
self.logger.info("Discovery Status = {0} expecting FINISHED. {1}".format(json_str['status'], i))
if result==1:
return True
else:
return False | [
"def discover_atag():\r\n # return format: [b'ONE xxxx-xxxx-xxxx_xx-xx-xxx-xxx (ST)',\r\n # ('xxx.xxx.x.x', xxxx)]\r\n # sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) # UDP\r\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\r\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\r\n\r\n sock.settimeout(30)\r\n sock.bind((\"\", 11000))\r\n try:\r\n while True:\r\n result = sock.recvfrom(37)\r\n host_ip = result[1][0]\r\n device_id = result[0].decode().split()[1]\r\n return host_ip, device_id\r\n except socket.timeout:\r\n return False\r\n except Exception as err:\r\n raise RequestError(err)",
"def ping_devices():",
"async def async_discover_atag():\r\n # return format: [b'ONE xxxx-xxxx-xxxx_xx-xx-xxx-xxx (ST)',\r\n # ('xxx.xxx.x.x', xxxx)]\r\n trans, proto = await asyncio.get_event_loop().create_datagram_endpoint(\r\n Discovery, local_addr=(LOCALHOST, ATAG_UDP_PORT)\r\n )\r\n try:\r\n result = await asyncio.wait_for(proto.data, timeout=30)\r\n host_ip = result[1][0]\r\n device_id = result[0].decode().split()[1]\r\n trans.close()\r\n except asyncio.TimeoutError:\r\n trans.close()\r\n raise RequestError(\"Host discovery failed\")\r\n return host_ip, device_id",
"def discover(): \n global prefs\n if 'bridge_cache' in prefs:\n try:\n bridgeip = prefs['bridge_cache']['ip']\n reply = requests.get('http://%s/api/' % (bridgeip), timeout=3).json()\n if len(reply) > 0 and 'error' in reply[0] and reply[0]['error']['type'] == 4:\n # good bridge, use it\n return bridgeip\n except requests.exceptions.ConnectTimeout:\n # fallback to rendezvous point\n pass\n\n print(\"Discovering bridge...\")\n try:\n bridgeip = requests.get('https://www.meethue.com/api/nupnp').json()[0]['internalipaddress']\n prefs['bridge_cache'] = {'ip': bridgeip}\n return bridgeip\n except Exception as except_inst:\n print(\"Bridge discovery failed:\", except_inst)\n raise CliFatalError()",
"def discoverDLNA():\n socket.setdefaulttimeout(1)\n location_regex = re.compile(\"location:[ ]*(.+)\\r\\n\", re.IGNORECASE)\n servers = []\n\n for addr in interface_addresses():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)\n sock.bind((addr, 0))\n sock.sendto(DISCOVERY_MSG, ('239.255.255.250', 1900))\n\n while True:\n try:\n location_result = location_regex.search(sock.recv(1024).decode('utf-8'))\n servers.append({'location':location_result[1]})\n except socket.timeout:\n break\n sock.close()\n\n for location in servers:\n try:\n resp = requests.get(location['location'], timeout=2)\n try:\n xmlRoot = ElementTree.fromstring(resp.text)\n except:\n #Failed XML parsing\n continue\n\n location[\"name\"] = get_attribute(xmlRoot,\"./{urn:schemas-upnp-org:device-1-0}device/{urn:schemas-upnp-org:device-1-0}friendlyName\")\n\n iconurl = xmlRoot.find(\".//*{urn:schemas-upnp-org:device-1-0}icon/{urn:schemas-upnp-org:device-1-0}url\")\n if iconurl is not None:\n location['image'] = parse.urljoin(location['location'], iconurl.text)\n\n # service = xmlRoot.find('.//*{urn:schemas-upnp-org:device-1-0}service[{urn:schemas-upnp-org:device-1-0}serviceType=\"urn:schemas-upnp-org:service:ContentDirectory:1\"]')\n # location[\"controlURL\"] = parse.urljoin(location['location'], service.find('./{urn:schemas-upnp-org:device-1-0}controlURL').text)\n # location[\"servicetype\"] = service.find('./{urn:schemas-upnp-org:device-1-0}serviceType').text\n\n services = xmlRoot.findall(\".//*{urn:schemas-upnp-org:device-1-0}serviceList/\")\n for service in services:\n serviceURL = parse.urljoin(location['location'], service.find('./{urn:schemas-upnp-org:device-1-0}SCPDURL').text)\n # read in the SCP XML\n resp = requests.get(serviceURL, timeout=2)\n try:\n serviceXML = ElementTree.fromstring(resp.text)\n except:\n #Failed to parse the response XML\n continue;\n\n actions = serviceXML.findall(\".//*{urn:schemas-upnp-org:service-1-0}action\")\n for action in actions:\n if action.find('./{urn:schemas-upnp-org:service-1-0}name').text == 'Browse':\n location[\"controlURL\"] = parse.urljoin(location['location'], service.find('./{urn:schemas-upnp-org:device-1-0}controlURL').text)\n location[\"servicetype\"] = service.find('./{urn:schemas-upnp-org:device-1-0}serviceType').text\n\n except requests.exceptions.ConnectionError:\n settings.logger.warning('[!] Could not load %s' % location)\n except requests.exceptions.ReadTimeout:\n settings.logger.warning('[!] Timeout reading from %s' % location)\n\n return servers",
"def scan(self):\n for addr in range(127):\n # Skip I2C addresses which are reserved.\n if addr <= 7 or addr >= 120:\n continue\n if self.ping(addr):\n self._log.debug('Detected device at address 0x{0:02x}.'.format(addr))",
"def detectDigiDevice(timeout=1):\n\n listenPort = 1181\n broadcastPort = 2362\n digiDiscoverPacket = \"DIGI\\x00\\x01\\x00\\x06\\xff\\xff\\xff\\xff\\xff\\xff\"\n\n # setup socket\n outsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n outsock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n outsock.bind(('', listenPort))\n\n # send our discovery packet out over all interfaces\n try:\n from netifaces import interfaces, ifaddresses, AF_INET\n for ifaceName in interfaces():\n try:\n for i in ifaddresses(ifaceName)[AF_INET]:\n outsock.sendto(\n digiDiscoverPacket, (i['broadcast'], broadcastPort))\n except:\n pass\n except ImportError:\n outsock.sendto(digiDiscoverPacket, (\"255.255.255.255\", broadcastPort))\n\n responses = []\n\n # wait for a response\n try:\n # setup the timeout\n outsock.settimeout(timeout)\n # until the socket timeout is raised or CTRL+C\n while True:\n # wait for data\n data, addr = outsock.recvfrom(2048)\n\n # process data\n if not data.startswith('DIGI'):\n return None\n mac = \"%02X:%02X:%02X:%02X:%02X:%02X\" % (\n ord(data[10]), ord(data[11]), ord(data[12]),\n ord(data[13]), ord(data[14]), ord(data[15]))\n len = ord(data[35])\n desc = data[36:(36+len)]+\" \"\n len2 = ord(data[36+len+7])\n desc += data[36+len+8: 36+len+8+len2]\n\n responses.append((addr[0], mac, desc))\n except (socket.timeout, KeyboardInterrupt):\n pass\n return responses",
"def scan_net(sub_net):\n sub_net = str(sub_net)\n list_host = []\n str_nmap = subprocess.run([\"nmap\", \"-sP\", sub_net],capture_output=True)\n str_nmap = str_nmap.stdout.decode(\"utf-8\")\n arr_host = str_nmap.split(\"Nmap scan report for\")\n del arr_host[0]\n active_hosts = map(filter_address, arr_host)\n for host in active_hosts: \n list_host.append(host)\n return list_host",
"def test_udp_search(fauxmo_server: t.Callable) -> None:\n msg = b'MAN: \"ssdp:discover\"' + b\"ST: urn:Belkin:device:**\"\n addr = (\"239.255.255.250\", 1900)\n\n with fauxmo_server(\"tests/test_config.json\"):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n reuseport = getattr(socket, \"SO_REUSEPORT\", None)\n if reuseport:\n sock.setsockopt(socket.SOL_SOCKET, reuseport, 1)\n\n sock.sendto(msg, addr)\n data = sock.recv(4096)\n\n assert b\"LOCATION: http://\" in data\n assert b\"/setup.xml\" in data",
"def find_visa_connected():\n\n mgr = visa.ResourceManager()\n resources = mgr.list_resources()\n print('Found VISA devices: ')\n for d in resources:\n if any([d.startswith(prefix) for prefix in INSTR_PREFIXES]):\n print(d)\n return resources",
"def findIlo(ilo_net):\n hp_servers = []\n nm = nmap.PortScanner()\n #scan net for ilo virtual media port is the key assumes that we don't override it in ilo config:q\n nm.scan(ilo_net,'17988','-PN') \n for h in nm.all_hosts():\n if nm[str(h)]['tcp'][17988]['state'] == 'open':\n # list of IP that have something looking like ILO :)\n #print 'SERVER %s -----------------' % str(h)\n #get damn server name aka sn\n try:\n conn = httplib.HTTPSConnection(str(h), timeout=5)\n except:\n print \"Can't connect to %s skip\" % str(h)\n continue\n try:\n conn.request(\"GET\", \"/xmldata?item=all\")\n response = conn.getresponse()\n except:\n print \"can't get response from %s\" % str(h)\n conn.close()\n continue\n \n data = response.read()\n a = re.findall('<SBSN>(.*)</SBSN>', data)\n conn.close\n if a:\n server_sn = a.pop().rstrip()\n print \"Found server %s with ILO module\" % server_sn\n hp_serv = HpServer(server_sn,'osmp.ru',str(h))\n hp_servers.append(hp_serv)\n\n #if list_all == 1:\n # print \"IP: %s Serial: %s Model: %s ILO FW: %s ILO Model: %s\" % (str(h), server_sn, model, fw_ver, ilo_model)\n return hp_servers",
"def getBulbs():\n\n # Initialize bulb scanner\n s = BulbScanner()\n\n # Scan the local network with timeout of 1\n result = s.scan(timeout=1)\n\n # Return just the IPs\n ips = []\n for i in result:\n ips.append(i['ipaddr'])\n\n return ips",
"def gather_chassis_details(self):",
"def find_devices(controller):\n pysicl.gpib_timeout(500)\n for addr in range(1,31):\n print addr\n if addr != 21:\n status = dev_status(controller+str(addr))\n print addr,status\n if status > -1:\n print addr,\":\",status\n pysicl.gpib_timeout(10000)",
"async def _discover_resources(call_obj, ip):\n context = 'coap://' + ip\n protocol = await Context.create_client_context()\n uri = context + \"/.well-known/core\"\n #print (uri)\n\n request = Message(code=GET, uri=uri)\n\n try:\n response = await protocol.request(request).response\n except Exception as e:\n print(\"Failed to discover resources on {0}\".format(uri))\n print(e)\n exit(-1)\n #pdb.set_trace()\n payload = str(response.payload)[2:-1]\n links = lh.parse(payload).links\n # Sorry, couldn't find a better way\n links = [(link.rt[0], link.get_target(context)) for link in links if 'rt' in link]\n res = {link[0]: link[1] for link in links}\n #print(res)\n #pdb.set_trace()\n if call_obj:\n call_obj.resourceDict = res\n return res",
"def is_lcd_reachable():\n\n response = requests.get(NODE_INFO_ENDPOINT)\n return True if response.status_code == 200 else False",
"def findNatsubNetwork():\n ipsubnet = \"192.168.\"\n i = 10\n while True:\n cmdstatus, cmdoutput = commands.getstatusoutput(\"/sbin/ifconfig -a | /bin/grep -w inet | /bin/awk -F' ' '{print $2}' | grep '%s%s' \" % (ipsubnet.replace('.', '\\.'), str(i) + '\\.'))\n if cmdstatus:\n break\n else:\n i += 2\n return [ipsubnet + str(i) + sub for sub in [\".1\", \".2\", \".254\" ]]",
"def get_magnum_service_by_host_and_binary(self, host, binary):",
"def main(dest_name):\n\t\n\tsource_addr = gethostIP()\n\tdest_addr = socket.gethostbyname(dest_name)\n\tport = 33434\n\tttl = 16\n\tmax_hops = 0\n\tmin_hops = 0\n\ttarget_hops = 0\n\tRTT = 0\n\tfound = False\n\tprint \"Source: %s\" % (source_addr)\n\tprint \"Destination: %s\" % (dest_addr)\n\n\twhile True:\n\t\tif not found: #look for it\n\t\t\tif ttl == 256:\n\t\t\t\tttl -= 1\n\t\t\telif ttl > 255:\n\t\t\t\tprint \"Maximum TTL reached. IP not found. Exiting.\"\n\t\t\t\tquit()\n\t\t\tprint \"Searching with ttl of %i.\" % (ttl)\n\n\t\t\tcurr_addr, _, __ = connect(ttl, port, dest_name)\n\n\t\t\t#If target found, begin binary search\n\t\t\tif curr_addr == dest_addr:\n\t\t\t\tmax_hops = ttl\n\t\t\t\tmin_hops = ttl/2\n\t\t\t\tprint \"Initial server found with ttl = %i\" % (ttl)\n\t\t\t\tprint \"Beginning Binary search of ttls from %i to %i\\n\" % (min_hops, max_hops)\n\t\t\t\tfound = True\n\t\t\telse:\n\t\t\t\tttl *= 2\n\t\t\t\tprint \"Server not found.\"\n\t\telse: #Now start binary searching\n\t\t\tcurr_addr, RTT, curr_name = connect((max_hops+min_hops)/2, port, dest_name)\n\n\t\t\t# print data of individual probe in format of TTL|Name|IP|RTT\n\t\t\tif curr_addr is not None:\n\t\t\t\tcurr_host = \"%s (%s) %fms\" % (curr_name, curr_addr, RTT)\n\t\t\telse:\n\t\t\t\tcurr_host = \"*\"\n\t\t\tprint \"%d\\t%s\" % ((min_hops+max_hops)/2, curr_host)\n\n\t\t\tif curr_addr == dest_addr: #You found it in the range. Check lower\n\t\t\t\tmax_hops = (min_hops+max_hops)/2\n\t\t\t\tprint \"Found server-Checking ttl from %i to %i.\" % (min_hops, max_hops)\n\t\t\telse: #Not in range. Check higher.\n\t\t\t\tmin_hops = (min_hops+max_hops)/2\n\t\t\t\tprint \"Server not found-Checking ttl from %i to %i.\" % (min_hops, max_hops)\n\n\t\t\t# break if search over\n\t\t\tif min_hops+1 == max_hops: #Binary search over. Now return \n\t\t\t\tprint_results(RTT, max_hops, source_addr, dest_addr)\n\t\t\t\tbreak"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call addition on MC | def addition(arg1, arg2):
return materials_commons.add(arg1, arg2) | [
"def add(arg1, arg2):\n\treturn arg1 + arg2",
"def addThem(n, m):\n\treturn n + m",
"def multiplyAndAdd(n, m):\n\treturn (n + 5) * (m + 7)",
"def __add__(self, *args):\n return _snap.TCnComV___add__(self, *args)",
"def add(num1, num2, num3):\n\treturn num1 + num2 + num3",
"def __add__(self, matrix):",
"def add(self, matrix):",
"def add_powder(self):",
"def addition(self, a, b, c):\n self.registers[a] = (self.registers[b] + self.registers[c]) % self.max_value",
"def myAdd(num1, num2):\n \n arg_sum = num1 + num2\n return arg_sum\n # This could also be done with: return num1 + num2",
"def example_add(val1, val2):\n print(f'Task add started')\n return val1 + val2",
"def __iadd__(self, other):\n if isinstance(other, MCCD):\n for ccd,occd in zip(self._data,other._data):\n ccd += occd\n else:\n for ccd in self._data:\n ccd += other\n return self",
"def __add__(self, mat):\n ret_mat = Mat3()\n for i in range(0,8):\n ret_mat[i] = self.data[i] + mat[i]\n return ret_mat",
"def __call__(self, t):\n return self.a(t) + self.b(t)",
"def tap_add():\n temp_add = answer.get()\n temp_add += \"+\"\n answer.set(temp_add)",
"def add_mult(num1, num2, num3):\n sum1 = num1 + num2\n return sum1 * num3",
"def compute_addtion(value):\n answer = value + value\n return answer",
"def addmm(self, beta=1, alpha=1, mat1, mat2): # real signature unknown; restored from __doc__\n pass",
"def add(listed):\n\n return sum(listed)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call subtraction on MC | def subtraction(arg1, arg2):
return materials_commons.sub(arg1, arg2) | [
"def substract():\n print(\"SUBTRACT\")\n a,b = getInput()\n print(a - b)",
"def subtract(minuend: int, subtrahend: int) -> int:\n click.echo(f\"{minuend} - {subtrahend} = {minuend - subtrahend}\")",
"def subtract(*arg):\n #first element will be the initial value of substraction, since we substruct from it\n subtraction = arg[0]\n #choosing the range from the second element till the end\n #this is what we are going to gradually substruct from the first element\n for i in arg[1:]:\n subtraction -= i\n return subtraction",
"def subtract(self):\n first_operand = self.pop()\n second_operand = self.pop()\n self.__cpu.memory.stack.push(ArithmeticLogicUnit.subtract(first_operand, second_operand))",
"def test_calculator_subtract(self):\n self.calculator = Calculator()\n self.calculator.subtract_number(1)\n assert self.calculator.get_result() == -1",
"def test_subtract():\n calc = Calculator(5)\n assert calc.subtract(6) == -1",
"def subtract(a, b):\n return b - a",
"def __call__(self, t):\n return self.a(t) - self.b(t)",
"def test_e_function_subtract_output(self):\n # Try to import before testing\n try:\n import lab3c as lab3cStudent\n except:\n self.fail('lab3c.py contains errors(HINT: run the function and fix errors')\n error_output = 'problem subtracting(HINT: operate(10, 5, \\'subtract\\')'\n self.assertEqual(str(lab3cStudent.operate(10, 5, 'subtract')), '5', msg=error_output)",
"def subtract(self,mat,tosubtract) :\n density1 = self._get_mat(mat)\n density2 = tosubtract._get_mat(mat)\n self.dg = density1 - density2",
"def subtract(a, b):\n print(str(a) + \" - \" + str(b) + \" = \" + str(a - b))",
"def testSubtractionOfSelf(self):\n self.time_sub(['10:10:10','10:10:10','00:00:00'])",
"def subtraction(self, term):\n\n self.complex_num -= term\n self.grade_exponential()",
"def __sub__(self, other):\n tccd = []\n if isinstance(other, MCCD):\n for ccd,occd in zip(self._data,other._data):\n tccd.append(ccd - occd)\n else:\n for ccd in self._data:\n tccd.append(ccd - other)\n return MCCD(tccd, self.head)",
"def subtract(value, arg):\n return int(value) - int(arg)",
"def __sub__(self, mat):\n ret_mat = Mat3()\n for i in range(0,8):\n ret_mat[i] = self.data[i] - mat[i]\n return ret_mat",
"def opcode_subtract_from_second_register(self, opcode: bytes) -> None:\n # Get the necessary information from the opcode\n first_register = self.get_lower_nibble(opcode[0])\n second_register = self.get_upper_nibble(opcode[1])\n first_register_value = self.registers[first_register]\n second_register_value = self.registers[second_register]\n result, not_borrow = self.bounded_subtract(second_register_value, first_register_value)\n\n # Perform the instruction\n self.registers[first_register] = result\n self.registers[15] = not_borrow\n logger.debug(f\"Execute Opcode {opcode.hex()}: Set the value of register {first_register} to the difference of the value of register {second_register} and itself ({second_register_value} - {first_register_value} = {result}, not borrow = {not_borrow}).\")",
"def cDiff():\n R=\"\"\"\n KERNEL void cDiff( GLOBAL_MEM const int *order2,\n GLOBAL_MEM const float2 *indata,\n GLOBAL_MEM float2 *outdata)\n {\n const unsigned int gid = get_global_id(0); \n const unsigned int ind = order2[gid];\n outdata[gid]=indata[ind]- indata[gid];\n };\n \"\"\"\n return R",
"def subtract_units(unit1,unit2):\n # check that units are compatible between unit/unit2\n unit_type = _check_unit_compatibility(unit1,unit2)\n\n # get unit dictionary\n unit_dict = _unit_dict(unit_type)\n unit1_val = _standard(unit1,unit_dict)\n unit2_val = _standard(unit2,unit_dict)\n\n # subtract using unit conversion\n unit_val = unit1_val - unit2_val\n\n # package and return string unit\n return _output([unit_val,unit_type],unit_dict)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
PubNub Status Callback Implementation | def status(self, pubnub, status):
if utils.is_subscribed_event(status) and not self.connected_event.is_set():
self.connected_event.set()
self.disconnected_event.clear()
elif utils.is_unsubscribed_event(status) and not self.disconnected_event.is_set():
self.disconnected_event.set()
self.connected_event.clear()
elif status.is_error():
_LOGGER.error('Error in Snoo PubNub Listener of Category: %s', status.category) | [
"def presence(self, pubnub, presence):",
"def status_callback(self, response):\n print response",
"def on_publish(self, client, userdata, retcode):\n self.log.debug(\n \"on_publish: Successfully published message %s, %s, %s\",\n client,\n userdata,\n retcode,\n )",
"def handle_getStatus_event():\n global STATUS_VALS\n socketio.emit('updateStatus', str(STATUS_VALS), callback=messageReceived)",
"def monitor_cb(ud, msg):\r\n return False",
"def comm_status(self):\r\n # TODO Note this has a lot of repeated code from forward UCM method. consider refactoring\r\n mesdict = {\"commstate\": \"good\"}\r\n\r\n page = self.URLmap.get(\"comm_state\", \"/comm.cgi?\")\r\n requestURL = \"http://\" + self.UCMip + page\r\n UCMrequest = urllib2.Request(requestURL)\r\n \r\n method = self.HTTPmethods.get(\"comm_state\", \"POST\")\r\n messtr = json.dumps(mesdict)\r\n UCMrequest.add_data(messtr)\r\n UCMresponsedict = {\"message_subject\": \"commstate_update\"}\r\n \r\n now = datetime.utcnow().isoformat() + 'Z'\r\n if settings.DEBUGGING_LEVEL >= 2:\r\n print(\"Sending a message to test connection at {time}\".format(time = now))\r\n topic = self.create_topic(\"commstate\")\r\n try:\r\n result = urllib2.urlopen(UCMrequest, timeout = 10)\r\n HTTPcode = result.getcode()\r\n if HTTPcode == 200:\r\n UCMresponsedict[\"commstate\"] = \"good\"\r\n elif HTTPcode == 400:\r\n UCMresponsedict[\"commstate\"] = \"SGD_timeout\"\r\n else:\r\n UCMresponsedict[\"commstate\"] = \"ambiguous\"\r\n\r\n print(\"<{name}> channel status update from {time}: {status}\".format(name =self.UCMname, time = now, status = UCMresponsedict[\"commstate\"]))\r\n notification = json.dumps(UCMresponsedict)\r\n self.vip.pubsub.publish(peer = 'pubsub', topic = topic, headers = {}, message = notification)\r\n except urllib2.URLError, e:\r\n print('an urllib2 error of type {error} occurred while sending comms test message to {ucm}'.format(error = e, ucm = self.UCMname))\r\n _log.error('Comm_state urllib error')\r\n except socket.timeout, e:\r\n _log.error('Comm_state time out')",
"def on_publish(self, unused_client, unused_userdata, unused_mid):\n print('Published message - ACK received')",
"def public_mmsstatuscb():\n message_sid = request.values.get('MessageSid')\n message_status = request.values.get('MessageStatus')\n logging.info('Message status: %s / %s', message_sid, message_status)\n logging.debug('Message status DUMP: %s', request.values)\n if (message_status == 'failed' or message_status == 'undelivered'):\n logging.error('Message with SID %s has unacceptable status: %s', message_sid, message_status)\n return ('', 204)",
"def test_statusesChangedOnStatusMessage(self):\n q = []\n dispatcher = self.dispatcher\n dispatcher.statusWatcher = Watcher(q)\n message = \"whatever\"\n # Need to have a socket that will accept the descriptors.\n dispatcher.addSocket()\n subskt = dispatcher._subprocessSockets[0]\n dispatcher.statusMessage(subskt, message)\n dispatcher.statusMessage(subskt, message)\n self.assertEquals(q, [[-1], [-2]])",
"def Status(con, category, message):",
"def on_publish(self, unused_client, unused_userdata, unused_mid):\n self.print_debug('Published message acked.')",
"def test_notify_run_status(self):\n pass",
"def status(data):\n try:\n onlineUser = db_function.get_online_users('Y')\n # print(onlineUser)\n emit(\"announce status\", {\"onlineUser\": onlineUser}, broadcast=True)\n except Exception:\n print(\"Error occurred while broadcasting online users.\", sys.exc_info()[0])",
"def _callback(msg):\n print('subscription message data: ', msg.data.decode('utf-8'))\n if msg.attributes:\n print('subscription message attributes:\\n')\n pprint(msg.attributes)\n msg.ack()",
"def currently_publishing():\n c = CurrentlyPublishingStatus()\n c.update_status()",
"def handle_refreshStatus_event():\n receiveStatus()",
"def send_nak_response():\n pub.sendMessage(NAK_FIRST_TOPIC)",
"def send_unsigned_presence(self):\n current_presence = self.core.get_status()\n self.core.command.status('%s %s' % (current_presence.show or 'available', current_presence.message or '',))",
"def _push_status(self):\n\n self.data['status'] = self._status\n event_manager.device_changed(self)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
PubNub Message Callback Implementation | def message(self, pubnub, message):
self._callback(ActivityState.from_dict(message.message)) | [
"def message_callback(callback):\r\n @wraps(callback) #update attribute data for decorator\r\n def message_handler(client,userdata,message):\r\n #decode and log the message and return the payload values\r\n message.payload = message.payload.decode(\"utf-8\")\r\n logger.debug('Received Message: %s (%s)'%(message.payload,message.topic))\r\n return callback(client,userdata,message)\r\n return message_handler",
"def publicMessage(con, nick, message):",
"def _callback(msg):\n print('subscription message data: ', msg.data.decode('utf-8'))\n if msg.attributes:\n print('subscription message attributes:\\n')\n pprint(msg.attributes)\n msg.ack()",
"def on_publish(self, unused_client, unused_userdata, unused_mid):\n print('Published message - ACK received')",
"def pub_callback(topic, payload, qos, retain):\n mqtt.async_publish(topic, payload, qos, retain)",
"def on_publish(self, client, userdata, retcode):\n self.log.debug(\n \"on_publish: Successfully published message %s, %s, %s\",\n client,\n userdata,\n retcode,\n )",
"def presence(self, pubnub, presence):",
"def on_publish(client, user_data, msg_id):\n logger.info(\"Message with msg id {} successfully published.\".format(msg_id))",
"def publish_message(self, message, queue):",
"def on_publish(self, unused_client, unused_userdata, unused_mid):\n self.print_debug('Published message acked.')",
"def channelMessageReceived(self, channel, message, subchannel):",
"def post_send_message(self, msg):\r\n pass",
"def test_pubsubhubbub_required_callback(self):\n topic = (\n \"hub.topic\",\n \"https://github.com/octocat/hello-world/events/push\",\n )\n body = [(\"hub.mode\", \"subscribe\"), topic, (\"hub.callback\", \"\")]\n data = {k[4:]: v for k, v in body}\n self.instance.pubsubhubbub(**data)\n assert self.session.post.called is False",
"def test_publish_message(self):\n pass",
"def on_publish(self, client, userdata, mid):\n if mid == self.message_mid:\n logger.debug('Message {} published to broker'.format(message))\n raise KeyboardInterrupt",
"def MessageHandlerMethod(**kwargs):\n data: dict = kwargs['data']\n bus: AbstractPikaBus = kwargs['bus']\n payload: dict = kwargs['payload']\n print(payload)\n if 'count' in payload:\n payload['count'] += 1\n # bus.Publish(payload, topic='myTopic')",
"def send_nak_response():\n pub.sendMessage(NAK_FIRST_TOPIC)",
"def test_pubsub(nsproxy, serializer, message):\n a0 = run_agent('a0')\n a1 = run_agent('a1')\n a1.set_attr(received=None)\n addr = a0.bind('PUB', alias='pub', serializer=serializer)\n a1.connect(addr, handler=set_received)\n while not a1.get_attr('received'):\n a0.send('pub', message)\n time.sleep(0.1)\n assert a1.get_attr('received') == message",
"def on_message(client, userdata, msg):\n saveMqttData(msg)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
PubNub Presence Callback Implementation | def presence(self, pubnub, presence): | [
"def _update_presence():\n # get presence info from Centrifugo\n cent_url = CENTRIFUGO_HOST+\":\"+str(CENTRIFUGO_PORT)\n client = Client(cent_url, SECRET_KEY, timeout=1)\n clients = client.presence(CHANNEL)\n if DEBUG:\n print \"Updating presence\"\n # post presence info\n total_users = 0\n anonymous_users = 0\n users = []\n for client in clients.keys():\n total_users += 1\n user = clients[client][\"user\"]\n if user == \"\":\n anonymous_users += 1\n if DEBUG and user != \"\":\n print \"Client: \"+str(user)\n if user not in users and user != \"\":\n users.append(user.encode('utf8'))\n if DEBUG:\n print str(anonymous_users)+\" anonymous users\"\n print \"Connected users:\"\n for user in users:\n print '- '+user\n # send presence info into a Centrifugo channel\n msg = \",\".join(users)+'/'+str(anonymous_users)\n if total_users > 0:\n broadcast(message=msg, event_class=\"__presence__\")\n return",
"def _presence_listener(self, event: Dict[str, Any], presence_update_id: int) -> None:\n if self._stop_event.ready():\n return\n\n user_id = event[\"sender\"]\n\n if event[\"type\"] != \"m.presence\" or user_id == self._user_id:\n return\n\n address = address_from_userid(user_id)\n\n # Not a user we've whitelisted, skip. This needs to be on the top of\n # the function so that we don't request they displayname of users that\n # are not important for the node. The presence is updated for every\n # user on the first sync, since every Raiden node is a member of a\n # broadcast room. This can result in thousands requests to the Matrix\n # server in the first sync which will lead to slow startup times and\n # presence problems.\n if address is None or not self.is_address_known(address):\n return\n\n user = self._user_from_id(user_id, event[\"content\"].get(\"displayname\"))\n\n if not user:\n return\n\n self._displayname_cache.warm_users([user])\n # If for any reason we cannot resolve the displayname, then there was a server error.\n # Any properly logged in user that joined a room, will have a displayname.\n # A reason for not resolving it could be rate limiting by the other server.\n if user.displayname is None:\n new_state = UserPresence.SERVER_ERROR\n self._set_user_presence(user_id, new_state, presence_update_id)\n return\n\n address = self._validate_userid_signature(user)\n if not address:\n return\n\n self.add_userid_for_address(address, user_id)\n\n new_state = UserPresence(event[\"content\"][\"presence\"])\n\n self._set_user_presence(user_id, new_state, presence_update_id)\n self._maybe_address_reachability_changed(address)",
"def status(self, pubnub, status):\n if utils.is_subscribed_event(status) and not self.connected_event.is_set():\n self.connected_event.set()\n self.disconnected_event.clear()\n elif utils.is_unsubscribed_event(status) and not self.disconnected_event.is_set():\n self.disconnected_event.set()\n self.connected_event.clear()\n elif status.is_error():\n _LOGGER.error('Error in Snoo PubNub Listener of Category: %s', status.category)",
"def publicMessage(con, nick, message):",
"def message(self, pubnub, message):\n self._callback(ActivityState.from_dict(message.message))",
"def on_publish(self, client, userdata, retcode):\n self.log.debug(\n \"on_publish: Successfully published message %s, %s, %s\",\n client,\n userdata,\n retcode,\n )",
"def uplink_receive(self, stanza):\n pass",
"def presence_processor(self, elem):\n # log.msg(\"presence_processor() called\")\n items = xpath.queryForNodes(PRESENCE_MUC_ITEM, elem)\n if items is None:\n return\n\n _room = jid.JID(elem[\"from\"]).user\n if _room not in self.rooms:\n botutil.email_error(\n f\"Got MUC presence from unknown room '{_room}'\",\n self,\n elem,\n )\n return\n _handle = jid.JID(elem[\"from\"]).resource\n statuses = xpath.queryForNodes(PRESENCE_MUC_STATUS, elem)\n muc_codes = []\n if statuses is not None:\n muc_codes = [status.getAttribute(\"code\") for status in statuses]\n\n for item in items:\n affiliation = item.getAttribute(\"affiliation\")\n _jid = item.getAttribute(\"jid\")\n role = item.getAttribute(\"role\")\n left = affiliation == \"none\" and role == \"none\"\n selfpres = \"110\" in muc_codes\n if selfpres:\n log.msg(f\"MUC '{_room}' self presence left: {left}\")\n self.rooms[_room][\"joined\"] = not left\n\n self.rooms[_room][\"occupants\"][_handle] = {\n \"jid\": _jid,\n \"affiliation\": affiliation,\n \"role\": role,\n }",
"def test_pubsubhubbub_required_callback(self):\n topic = (\n \"hub.topic\",\n \"https://github.com/octocat/hello-world/events/push\",\n )\n body = [(\"hub.mode\", \"subscribe\"), topic, (\"hub.callback\", \"\")]\n data = {k[4:]: v for k, v in body}\n self.instance.pubsubhubbub(**data)\n assert self.session.post.called is False",
"def status(data):\n try:\n onlineUser = db_function.get_online_users('Y')\n # print(onlineUser)\n emit(\"announce status\", {\"onlineUser\": onlineUser}, broadcast=True)\n except Exception:\n print(\"Error occurred while broadcasting online users.\", sys.exc_info()[0])",
"def wait_for_presences(self, pres):\n self.received.add(pres['from'].bare)\n if len(self.received) >= len(self.client_roster.keys()):\n self.presences_received.set()\n else:\n self.presences_received.clear()",
"def _callback(msg):\n print('subscription message data: ', msg.data.decode('utf-8'))\n if msg.attributes:\n print('subscription message attributes:\\n')\n pprint(msg.attributes)\n msg.ack()",
"def on_peer_connected(peer, peer_count):",
"def _filter_presence(event: Dict[str, Any], presence_update_id: int) -> None:\n sender_server = event[\"sender\"].split(\":\")[-1]\n receiver_server = urlparse(client_server_url).netloc\n main_client_server = urlparse(self._client.api.base_url).netloc\n other_clients_servers = {\n urlparse(server_url).netloc\n for server_url in self.server_url_to_listener_id\n if server_url != self._client.api.base_url\n }\n\n # if this comes from the main client's sync consume all presences of users\n # which do not have a client in other clients. If other client for user's\n # homeserver exists, presence will be consumed by other client's sync\n if receiver_server == main_client_server:\n if sender_server not in other_clients_servers:\n self._presence_listener(event, presence_update_id)\n\n elif sender_server == receiver_server:\n self._presence_listener(event, presence_update_id)",
"def send_unsigned_presence(self):\n current_presence = self.core.get_status()\n self.core.command.status('%s %s' % (current_presence.show or 'available', current_presence.message or '',))",
"def on_publish(self, unused_client, unused_userdata, unused_mid):\n print('Published message - ACK received')",
"def unsubscribedReceived(self, presence):\n # This is just a confirmation. Don't respond.\n pass",
"def presence_message():\n message = {\n 'action': 'presence',\n 'time': time.ctime(),\n 'type': 'status',\n 'user': {\n 'account_name': client,\n 'status': status,\n }\n }\n return json.dumps(message)",
"def session_start(self, event):\n\n self.send_presence()\n # Most get_*/set_* methods from plugins use Iq stanzas, which\n # can generate IqError and IqTimeout exceptions\n # try:\n # roster = self.get_roster()\n # except IqError as err:\n # print(\"IqError\")\n # self.disconnect()\n # except IqTimeout:\n # print(\"Iq Timeout Error \")\n # self.disconnect()\n\n self.get_roster()\n # self.send_message(mto='182037_2486194@chat.hipchat.com',\n # mbody=\"I have been built to annoy you\",\n # mtype='chat')\n # print(\"Sent the message\")\n\n # self.disconnect(wait=True)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Async utility function that waits for subscription disconnect. | async def wait_for_disconnect(self):
if not self.disconnected_event.is_set():
await self.disconnected_event.wait() | [
"async def unsubscribe_and_await_disconnect(self):\n self.unsubscribe()\n await self._listener.wait_for_disconnect()",
"async def wait_until_endpoint_subscriptions_change(self) -> None:\n ...",
"async def test_signal_listener_disconnect(\n proxy: DBusInterfaceProxy, test_service: TestInterface\n):\n value = None\n\n async def callback(val: str):\n nonlocal value\n value = val\n\n assert proxy.is_connected\n proxy.dbus.on_test_signal(callback)\n\n test_service.test_signal(\"hello\")\n await test_service.ping()\n assert value == \"hello\"\n\n proxy.disconnect()\n test_service.test_signal(\"goodbye\")\n await test_service.ping()\n assert value == \"hello\"",
"async def wait_no_websocket(self):\n await self._websocket_queue.join()",
"async def wait_until_endpoint_subscribed_to(\n self, remote_endpoint: str, event: Type[BaseEvent]\n ) -> None:\n ...",
"async def test_disconnect_consumer(test_consumer):\n test_consumer._message_queue = asyncio.Queue()\n test_consumer._message_queue.put_nowait('message')\n test_consumer._message_queue.put_nowait('message')\n test_consumer._message_queue.put_nowait('message')\n await test_consumer._connection_error_callback(Exception())\n for i in range(3):\n message = await test_consumer.read()\n assert message == 'message'\n with pytest.raises(Exception):\n await test_consumer.read()",
"async def _async_cancel_and_unsubscribe(self) -> None:\n self.async_cancel_pull_messages()\n if self._pull_messages_task:\n self._pull_messages_task.cancel()\n await self._async_unsubscribe_pullpoint()",
"async def test_remove_output():\n src = create_channel()\n out1 = create_channel()\n out2 = create_channel()\n m = create_multiple(src)\n assert m.add_output(out1)\n assert m.add_output(out2)\n await asyncio.sleep(0.05)\n m.remove_output(out1)\n x = 'x'\n src.offer(x)\n value2 = await out2.take(timeout=0.05)\n assert value2 == x\n assert out1.empty()\n src.close()\n await asyncio.wait_for(out2.closed(), timeout=0.05)\n with pytest.raises(asyncio.TimeoutError):\n await asyncio.wait_for(out1.closed(), timeout=0.05)",
"async def wait_until_connections_change(self) -> None:\n ...",
"def _await_state(self, async_res, sub):\n\n # wait for the event:\n log.debug(\"%r: _await_state: _timeout=%s\", self._platform_id, self._timeout)\n async_res.get(timeout=self._timeout)\n\n self._destroy_event_subscriber(sub)",
"async def _async_unsubscribe_pullpoint(self) -> None:\n if not self._pullpoint_manager or self._pullpoint_manager.closed:\n return\n LOGGER.debug(\"%s: Unsubscribing from PullPoint\", self._name)\n try:\n await self._pullpoint_manager.shutdown()\n except UNSUBSCRIBE_ERRORS as err:\n LOGGER.debug(\n (\n \"%s: Failed to unsubscribe PullPoint subscription;\"\n \" This is normal if the device restarted: %s\"\n ),\n self._name,\n stringify_onvif_error(err),\n )\n self._pullpoint_manager = None",
"def test_disconnect(daq):\n logger.debug('test_disconnect')\n assert not daq.connected\n daq.connect()\n assert daq.connected\n daq.disconnect()\n assert not daq.connected",
"async def _wait_for_connect(self):\n await self._started.wait()",
"async def wait_until_endpoint_subscribed_to(\n self, remote_endpoint: str, event: Type[BaseEvent]\n ) -> None:\n async with self._remote_subscriptions_changed:\n while True:\n if self.is_endpoint_subscribed_to(remote_endpoint, event):\n return\n await self._remote_subscriptions_changed.wait()",
"async def wait_until_any_endpoint_subscribed_to(\n self, event: Type[BaseEvent]\n ) -> None:\n ...",
"async def async_stop_mqtt_client(event=None):\n mqtt_client_task.cancel()\n with suppress(asyncio.CancelledError):\n await mqtt_client_task",
"async def shutdown_connection(app, loop):\n app.redis_connection.close()\n await app.redis_connection.wait_closed()",
"async def call_worker_shutdown():\n await asyncio.sleep(5)\n await worker.shutdown()",
"async def call_worker_shutdown():\n await asyncio.sleep(5)\n await worker.shutdown()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unsubscribe to Snoo Activity Channel and await disconnect | async def unsubscribe_and_await_disconnect(self):
self.unsubscribe()
await self._listener.wait_for_disconnect() | [
"def unsubscribe(channel: str) -> None:\n _get().unsubscribe(channel)",
"def unsubscribe(self) -> None:\n if self.cdata is None:\n return\n if self.asyncio_register and self.fd != -1:\n self.loop.remove_reader(self.fd)\n try:\n check_call(lib.sr_unsubscribe, self.cdata)\n finally:\n self.cdata = None\n for t in list(self.tasks.values()):\n t.cancel()\n self.tasks.clear()",
"def stopSubscription(self) -> None:\n ...",
"async def unsubscribe(\n cls,\n connection: Connection,\n *,\n screen_name: str,\n ) -> None:\n log.info('Unsubscribing from screen name %s', screen_name)\n await connection.execute(\n models.twitter_subscriptions\n .update()\n .where(models.twitter_subscriptions.c.screen_name.ilike(screen_name))\n .values(\n unsubscribed_at=datetime.datetime.utcnow(),\n latest_tweet_id=None,\n refreshed_latest_tweet_id_at=None,\n ),\n )",
"def _unsubscribe(self):\n if hasattr(self, '_subscribed') and self._subscribed:\n self._conn.unregisterInterface(self._iTag, self)\n self._subscribed = False",
"def unsubscribe(self, sender):\n ctx = ssl.create_default_context(cafile=certifi.where())\n try:\n url = self.sub_info[sender]['sub_url']\n response = urlfetch.get(url)\n print(response.status)\n except urlfetch.UrlfetchException:\n print('Unsubscribe timeout exceeded!')",
"def unsubscribe(self, stream, i):\n stream_filter = self.get_stream_filter(stream)\n self.send_command({\n 'action': 'UNSUBSCRIBE',\n 'stream_filter': stream_filter,\n })",
"async def stop(self):\n # pylint: disable=protected-access\n # Workaround until PR is accepted:\n # https://github.com/pubnub/python/pull/99\n # self._pubnub.stop()\n await self._pubnub._session.close()\n if self._pubnub._subscription_manager is not None:\n self._pubnub._subscription_manager.stop()",
"def stop_subscription(event):\n _LOGGER.info(\"Shutting down subscriptions.\")\n VERA_CONTROLLER.stop()",
"def unsubscribe(self, handle, callback=None):\r\n pass",
"async def unsubscribe(self, ctx):\n\n sub = publix.weekly_sub()\n user_list = database.r_subscribed_users(sub.name)\n\n user_list.remove(ctx.author.id) # remove id from list\n database.w_subscribed_users(sub.name, user_list, overwrite=True) # write new user list\n await ctx.send(f\"{ctx.author.name} is now unsubscribed from notifications for {sub.name}\")",
"async def _async_cancel_and_unsubscribe(self) -> None:\n self.async_cancel_pull_messages()\n if self._pull_messages_task:\n self._pull_messages_task.cancel()\n await self._async_unsubscribe_pullpoint()",
"def __UnsubscribeEvent(self, serviceUrl, sid):\n unsubscribeRequest(serviceUrl, sid)\n try:\n self.sidToSocket[sid].shutdown(socket.SHUT_RDWR)\n except OSError:\n pass\n self.sidToSocket[sid].close()\n self.sidToSocket.pop(sid)\n self.sidToTimer[sid].cancel()\n self.sidToTimer.pop(sid)",
"def unsubscribe(self, subreddit):\n return self.subscribe(subreddit, unsubscribe=True)",
"async def stop(self):\n\t\t\n\t\tif self._ws:\n\t\t\ttry:\n\t\t\t\tawait self._ws.close()\n\t\t\texcept:\n\t\t\t\tpass # errors not useful here\n\t\t\n\t\tif self._runtask:\n\t\t\tawait self._runtask",
"async def unregister(websocket):\n logger.info(\"One client disconnected\")\n USERS.remove(websocket)\n await send_state_data()",
"def stop_wemo(event):\n _LOGGER.info(\"Shutting down subscriptions.\")\n SUBSCRIPTION_REGISTRY.stop()",
"async def on_disconnect():\n print(f'{bot.user.name} has disconnected!')",
"def stop(self):\n self.client.disconnect()\n log.debug('Disconnected from MQTT broker')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Publish a message to the Snoo control command channel | async def publish(self, message):
task = await self._pubnub.publish().channel(
self._controlcommand_channel).message(message).future()
return task | [
"def send_control(self, control):\n if not self.is_closed:\n with self._lock:\n self.client.channel_control_write(self.number, control)",
"def publish(self, channel, message):\r\n return self.execute_command(\"PUBLISH\", channel, message)",
"def publish(self, rosmsg):\n if self.ready:\n msg = Msg()\n msg.data_type = rosmsg._type\n buff = StringIO()\n rosmsg.serialize(buff)\n msg.data = buff.getvalue()\n msg.length = buff.len\n self.__send(msg)",
"def send_command(connection: 'Redis', worker_name: str, command: str, **kwargs):\n payload = {'command': command}\n if kwargs:\n payload.update(kwargs)\n connection.publish(PUBSUB_CHANNEL_TEMPLATE % worker_name, json.dumps(payload))",
"def publish_message(self, message, queue):",
"def publish(self, msg=None):\n if not self.message_queue:\n logger.error('publish message error due to message_queue is None')\n return\n message = json.dumps(msg)\n try:\n self.message_queue.send(message)\n except Exception as e:\n logger.error('publish ipc message to queue fail: {}'.format(e))",
"def on_publish(self, client, userdata, mid):\n if mid == self.message_mid:\n logger.debug('Message {} published to broker'.format(message))\n raise KeyboardInterrupt",
"def _send_message(self, target, data):\n self._r.publish(target, json.dumps({\n \"sender\": self.name,\n \"data\": data,\n }))\n print(\"Sent message to\", target)",
"def on_publish(self, unused_client, unused_userdata, unused_mid):\n print('Published message - ACK received')",
"def __publish_connect_message(self):\n logger.debug(\"Connect successfull\")\n self.publish(\"Greetings from Discord Bot\", topic=self.bot_topic)",
"async def publish_start(self):\n return await self.publish({\n 'command': 'start_snoo'\n })",
"def console_publish(cluster_name, data):\n zmq_context = zmq.Context()\n zmq_socket = zmq_context.socket(zmq.PUSH)\n zmq_socket.connect(\"tcp://127.0.0.1:{port}\".format(port=CONSOLE_MONITOR_PORT_PUSH))\n if not data.has_key('timestamp'):\n data['timestamp'] = (datetime.datetime.utcnow() - datetime.datetime(1970,1,1)).total_seconds()\n zmq_socket.send_string(\"console {cluster_name} {data}\".format(\n cluster_name=cluster_name, \n data=json.dumps(data)))",
"def do_topic(self, channel, topic):\n self.send(\n msg=':{server} TOPIC {channel} :{topic}',\n channel=channel,\n topic=topic,\n )",
"def send_message(self,message): #tested and documtented\n self.__verbose_output( \">>sending '%s' to spectrometer\"%(message), 2 )\n self.device.write( message + \"\\r\\n\" )\n time.sleep(CONST_TIME_MEDIUM) #short sleep time to prevent too many requests",
"async def notify(self, channel, data):\n if not self._red:\n raise Exception(\"redis not enabled\")\n self._log.debug(\"Redis outgoing, channel: %s, data: %s\", channel, data)\n return await self._red.pubsub.publish(channel, json.dumps(data))",
"def func(self):\r\n caller = self.caller\r\n channelkey, msg = self.args\r\n if not msg:\r\n caller.msg(\"Say what?\")\r\n return\r\n channel = ChannelDB.objects.get_channel(channelkey)\r\n if not channel:\r\n caller.msg(\"Channel '%s' not found.\" % channelkey)\r\n return\r\n if not channel.has_connection(caller):\r\n string = \"You are not connected to channel '%s'.\"\r\n caller.msg(string % channelkey)\r\n return\r\n if not channel.access(caller, 'send'):\r\n string = \"You are not permitted to send to channel '%s'.\"\r\n caller.msg(string % channelkey)\r\n return\r\n msg = \"[%s] %s: %s\" % (channel.key, caller.name, msg)\r\n msgobj = create.create_message(caller, msg, channels=[channel])\r\n channel.msg(msgobj)",
"def publish(self, channel, clientid, request):\n return True, \"\"",
"def sendScpi(self, scpiCmd):\n\n if not self._isConnected:\n raise OntRemoteError('No connection to ONT')\n\n self._ts.write( scpiCmd.encode('ascii') + self._eoc_encoded)",
"def Publish(client, obj, mid):\n\tprint(f\"mid: {mid} on publish\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Publish a message a start_snoo command to the Snoo control command channel | async def publish_start(self):
return await self.publish({
'command': 'start_snoo'
}) | [
"async def start_control(self) -> None:\n print('[Balancing] Starting sonos control loop')\n await self.sonos.control_loop()",
"def serviceStarted(self):\n self.openChannel(ShellSlaveChannel(conn=self))",
"def session_start(self, event):\n\n self.send_presence()\n # Most get_*/set_* methods from plugins use Iq stanzas, which\n # can generate IqError and IqTimeout exceptions\n # try:\n # roster = self.get_roster()\n # except IqError as err:\n # print(\"IqError\")\n # self.disconnect()\n # except IqTimeout:\n # print(\"Iq Timeout Error \")\n # self.disconnect()\n\n self.get_roster()\n # self.send_message(mto='182037_2486194@chat.hipchat.com',\n # mbody=\"I have been built to annoy you\",\n # mtype='chat')\n # print(\"Sent the message\")\n\n # self.disconnect(wait=True)",
"def run(self):\n\t\tself.client.loop_start()\n\t\tself.discover_and_notify()\n\t\tself.publish()",
"def start_mqtt():\n with app.app_context():\n sub = Subscriber()\n sub.subscribe()",
"def start_snmp(self) :\n cmd = IPNETNSEXEC % (self.hostname, STARTSNMP % (self.snmppid))\n runOS(cmd)",
"def start(self):\n try:\n self.is_connected = self.robot.connect()\n self.is_enabled = True\n rospy.loginfo(\"Connected to Sphero %s with address: %s\", self._namespace, self._address)\n # Set initial color of Sphero indicating connection.\n self.robot.set_rgb_led(self.connect_color_red, self.connect_color_green,\n self.connect_color_blue, 1, False)\n except Exception as e:\n rospy.logerr(\"Failed to connect to Sphero.\")\n print(\"\\033[1m\\033[31mPlease send this error message to marko.krizmancic@fer.hr:\\n\\n\"\n \"While trying to connect to Sphero, script exited with this error: \\n\"\n \"Type: {}\\n\"\n \"Message: {}\\n\\033[0m\".format(sys.exc_info()[0].__name__, e))\n sys.exit(1)\n\n if self._data_stream == 'All':\n # Setup all data streaming.\n self.robot.set_filtered_data_strm(self.sampling_divisor, 1, 0, False)\n self.robot.add_async_callback(chr(0x03), self.parse_data_strm)\n self.robot.set_locator(0, 0, 0, 0, True)\n rospy.loginfo(\"Data streaming of Imu data and Odometry is set\")\n elif self._data_stream == 'Locator':\n # Setup real-time location data stream.\n self.robot.set_locator(1, 0, 0, 0, True)\n rospy.loginfo(\"Locator data streaming is set\")\n\n # Setup power notification.\n self.robot.set_power_notify(True, True)\n self.robot.add_async_callback(chr(0x01), self.parse_power_notify)",
"def __create_ROS_publisher(self):\n import rospy\n \n from std_msgs.msg import String\n self.ros_pub = rospy.Publisher(self.topic, String, queue_size=10)\n\n rospy.init_node(self.node, anonymous = True)\n self.connected = True",
"def main():\n\trospy.init_node('command_request')\n\n\t# Publishers and subscribers\n\tpub = rospy.Publisher('/command', String, queue_size=10)\n\t\n\trate = rospy.Rate(10) # 10hz\n\n\twhile not rospy.is_shutdown():\n\n\t\t# Get command from user\n\t\ttxt = raw_input(\"Write play or stop to put or hide the ball in the environment: \\n\")\n\t\tif(txt == \"Play\" or txt == \"play\" or txt == \"PLAY\"):\n\t\t\ttxt = \"play\"\n\t\t\tpub.publish(txt)\n\t\t\n\t\telif(txt == \"Stop\" or txt == \"stop\" or txt == \"STOP\"):\n\t\t\ttxt = \"stop\"\n\t\t\tpub.publish(txt)\n\t\t# Code for invalid comand and retry\n\t\telse:\n\t\t\tprint(\"Your command '\" + txt + \"' is not valid.\")\n\t\t\tprint(\"Please write a valid command\")\n\t\t\tprint(\"\")\n\t\t\tcontinue\n\t\trate.sleep()\n\n\trospy.spin()",
"def start(self, irc, msg, args):\n irc.reply(\"Starting twitter monitoring.\")\n self._start(irc)",
"def start(self):\n if self.communicator_ is None:\n print('you must call init_with_ctx first to init comm before start')\n return\n self.communicator_.start()",
"def start_consuming(self):",
"def _connection_menu_start_pressed():\n msg = BaseMessage(mess_type=climess.MessageType.START_GAME_MANUALLY, target=climess.Target.GAME)\n Client.get_instance().send_important_message(msg)",
"def start(self):\n self.slackClient.start()",
"def _start_msg(message):\n # add chat to the list of chats for broadcast\n self._cids.add(message.chat.id)\n # send answer\n self._bot.send_message(message.chat.id, \"Hello there!\")",
"def start(self):\n log.info(\"Starting SCPI interface\")\n self._create_socket()\n self._stop_event.clear()\n self._flush()\n\n @coroutine\n def receiver():\n request = None\n try:\n message, addr = self._socket.recvfrom(self._buffer_size)\n log.info(\"Message received from {}: {}\".format(addr, message.strip()))\n except socket.error as error:\n error_id = error.args[0]\n if error_id == errno.EAGAIN or error_id == errno.EWOULDBLOCK:\n yield sleep(0.2)\n else:\n raise error\n except Exception as error:\n log.exception(\"Error while fetching SCPI message\")\n raise error\n else:\n request = ScpiRequest(message, addr, self._socket)\n finally:\n if not self._stop_event.is_set():\n self._ioloop.add_callback(receiver)\n if request:\n yield self._dispatch(request)\n self._ioloop.add_callback(receiver)",
"def start(self):\n self.action_server.start()",
"def start(update: Update, context: CallbackContext) -> int:\n tgUser = update.message.from_user\n logger.info(\"Starting /start command for User %s\", tgUser.username)\n reply_keyboard = [['Luxor']]\n update.message.reply_text(\n 'Welcome to the Mining Pool Monitor Bot.\\n'\n 'Use the /cancel command to stop me.\\n\\n'\n 'Please select your mining pool:',\n reply_markup=ReplyKeyboardMarkup(\n reply_keyboard, one_time_keyboard=True, input_field_placeholder='Mining Pool'\n ),\n )\n return SELECTMININGPOOL",
"def start(self):\n self.logger.info(\"Starting NLaunch Console \" +\n \"(port='{port}', pwd_path='{pwd_path}')\".format(\n port=self.port, pwd_path=self.pwd_path))\n reactor.listenTCP(self.port, NLaunchFactory(self.pwd_path))\n reactor.run()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stop and Cleanup the Async Pubnub Utility | async def stop(self):
# pylint: disable=protected-access
# Workaround until PR is accepted:
# https://github.com/pubnub/python/pull/99
# self._pubnub.stop()
await self._pubnub._session.close()
if self._pubnub._subscription_manager is not None:
self._pubnub._subscription_manager.stop() | [
"def cleanup() -> None:\n\n global _broadcaster\n _broadcaster = None",
"async def cleanup(self) -> None:\n for pf in self._scheduled_functions.values():\n pf.stop()\n for t in self._in_stream.values():\n t.cancel()\n if self._cb_app_heartbeat:\n self._cb_app_heartbeat.stop()\n if self.name and self._red is not None:\n # Unregister app from redis.\n await self._red.hdel(REDIS_KEYS.APPS.value, self._app_rkey) # type: ignore\n if self._red is not None:\n await self._red.close()\n if self._red_sub is not None:\n await self._red_sub.close()",
"def stop(self):\n self._check(pn_messenger_stop(self._mng))",
"async def _async_unsubscribe_pullpoint(self) -> None:\n if not self._pullpoint_manager or self._pullpoint_manager.closed:\n return\n LOGGER.debug(\"%s: Unsubscribing from PullPoint\", self._name)\n try:\n await self._pullpoint_manager.shutdown()\n except UNSUBSCRIBE_ERRORS as err:\n LOGGER.debug(\n (\n \"%s: Failed to unsubscribe PullPoint subscription;\"\n \" This is normal if the device restarted: %s\"\n ),\n self._name,\n stringify_onvif_error(err),\n )\n self._pullpoint_manager = None",
"async def stop(self):\n\t\t\n\t\tif self._ws:\n\t\t\ttry:\n\t\t\t\tawait self._ws.close()\n\t\t\texcept:\n\t\t\t\tpass # errors not useful here\n\t\t\n\t\tif self._runtask:\n\t\t\tawait self._runtask",
"def stop(self):\n LOGGER.debug(\"Stopping Pokeman\")\n for name, connection in self.connections['sync'].items():\n if connection is not None:\n connection.close_all_channels()\n connection.disconnect()\n for name, connection in self.connections['async'].items():\n if connection is not None:\n connection.close_all_channels()\n connection.disconnect()\n self.cleanup()\n LOGGER.debug(\"Stopping Pokeman OK!\")",
"def tearDown(self):\n self.disp_pub.clear_output()\n self.socket.close()\n self.context.term()",
"def _on_stop(self):\n self._pool.join()",
"def __del__(self):\n self._output_stream.stop_stream()\n self._output_stream.close()\n self._py_audio.terminate()",
"def close(self):\n PubSubManager.remove_subscriber(self)",
"def stop(self):\n self.client.disconnect()\n log.debug('Disconnected from MQTT broker')",
"def stop(self):\n if self.run_matrx_api:\n if self.verbose:\n print(\"Shutting down Matrx api\")\n _ = requests.get(\"http://localhost:\" + str(api._port)\n + \"/shutdown_API\")\n self.api_info[\"api_thread\"].join()\n\n if self.run_matrx_visualizer:\n if self.verbose:\n print(\"Shutting down Matrx visualizer\")\n _ = requests.get(\"http://localhost:\"\n + str(visualization_server.port)\n + \"/shutdown_visualizer\")\n self.matrx_visualizer_thread.join()",
"def stop(self):\n self.subscriber.disconnect()\n self.file.close_file()",
"def shutdown(self):\n\n self.logger.info(\"Shutting down %s\"%__name__)\n\n try:\n self.logger.info(\"Closing websocket\")\n self.websocketClient.close()\n except Exception as e:\n self.logger.error(\"Websocket close error : %s \" %e)\n\n self.alive = False\n \n self.threadProcessQueue.join()\n\n time.sleep(1)\n self.exit = True",
"def _cleanup(self):\n self.amqp_channel.basic_cancel(TrapezeWSGI.CONSUMER_TAG)\n self.input_buffer.close()\n self.output_buffer.close()\n self.error_buffer.close()\n self.amqp_channel.close()\n self.amqp_connection.close()",
"def _cleanup(self):\n self._timer.cancel()\n if self.isRunning:\n self.isRunning = False\n self.tkRoot.tk.deletefilehandler(self.subProc.stdout)\n self.tkRoot.tk.deletefilehandler(self.subProc.stderr)\n outData, errData = self.subProc.communicate()\n if outData:\n self.logWdg.addOutput(outData)\n if errData:\n self.logWdg.addOutput(errData, severity=RO.Constants.sevError)",
"def _destroy_event_subscriber(self, sub):\n #self.remove_endpoint(sub) -- why this is making tests fail?\n # TODO determine whether self.remove_endpoint is the appropriate call\n # here and if so, how it should be used. For now, calling sub.close()\n # (this only change made the difference between successful tests and\n # failing tests that actually never exited -- I had to kill them).\n sub.close()",
"def __del__(self):\n _LOGGER.info('Shutting the co-process')\n self.process.terminate()",
"def __del__(self):\n\n if self.curveball_client:\n QE2LOG.info('Stopping curveball subprocess')\n self.curveball_client.terminate()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a tuple containing True if the key could be deleted and the message to send back to the client. | def handle_delete(key):
try:
del DATABASE_DICT[key]
except KeyError:
operation_status = False
result_msg = 'ERROR: Key [{}] not found and could not be deleted'.format(key)
else:
operation_status = True
result_msg = "Key [{}] deleted".format(key)
return operation_status, result_msg | [
"def deleted_message(self):\n return isinstance(self.original.action,\n types.ChannelAdminLogEventActionDeleteMessage)",
"def isDeleted():",
"async def messagedelete(self, ctx):\n status = await self.bot.pool.fetch(\"SELECT * FROM loggingsettings WHERE guildid = $1\", ctx.guild.id)\n\n if status[0][\"message_delete\"] == True:\n await self.bot.pool.execute(\"UPDATE loggingsettings SET message_delete = $1 WHERE guildid = $2\", False, ctx.guild.id)\n embed=discord.Embed(title=\"Done!\", color=discord.Color.blurple(), description=\"Logging has been turned off for deleting message.\")\n await ctx.send(embed=embed)\n return\n else:\n await self.bot.pool.execute(\"UPDATE loggingsettings SET message_delete = $1 WHERE guildid = $2\", True, ctx.guild.id)\n embed=discord.Embed(title=\"Done!\", color=discord.Color.blurple(), description=\"Logging has been turned on for deleting message.\")\n await ctx.send(embed=embed)",
"def test_delete_key(client):\n resp = client.delete_key(PROJECT_ID, 48855760)\n assert resp['project_id'] == PROJECT_ID\n assert resp['key_removed']",
"def encodeDeleteMessage(supercuboid_key, message_id, receipt_handle):\n return NotImplemented",
"def delete_recipient(self,key):\n response = self.recipient_resource.delete(params={'key':key})\n return response['deleted']",
"def test_015(self):\n\n HEADING()\n result = run(\"cm key delete testkey\")\n print (result)\n assert \"OK.\" in result",
"def test_delete_key_comment(client):\n response = client.delete_key_comment(PROJECT_ID, KEY_ID, 3838530)\n assert response['project_id'] == PROJECT_ID\n assert response['comment_deleted']",
"def decodeDeleteMessage(message):\n return NotImplemented",
"def test_key_deletion(self):\n pass",
"def test_api_v1_messages_message_id_delete(self):\n pass",
"def delete_from_redis(delete_key):\n r.delete('sample1', delete_key)\n print(r.hmget('sample1', delete_key))",
"def _add_delete_key_pb(self):\n\t\tnew_mutation = _datastore_pb2.Mutation()\n\t\tself._mutations.append(new_mutation)\n\t\treturn new_mutation.delete",
"def deletedRecord():",
"def email_delete(imap, uid):\n status, response = imap.store(str(uid), '+FLAGS', '\\Deleted')\n return status, response",
"def delete_message(self, msg_id):\r\n\r\n self.handle.dele(msg_id)",
"def test_pos_remove_with_policy_key_digest(self):\n\n key = (\"test\", \"demo\", None, bytearray(\"asd;as[d'as;djk;uyfl\", \"utf-8\"))\n meta = {\"gen\": 0}\n policy = {\"retry\": aerospike.POLICY_RETRY_ONCE, \"key\": aerospike.POLICY_KEY_DIGEST}\n retobj = self.as_connection.put(key, policy)\n\n assert retobj == 0\n\n retobj = self.as_connection.remove(key, meta, policy)\n\n assert retobj == 0\n\n with pytest.raises(e.RecordNotFound) as exception:\n (key, meta, _) = self.as_connection.get(key)\n\n (code, msg, _, _) = exception.value\n assert msg == \"AEROSPIKE_ERR_RECORD_NOT_FOUND\"\n assert code == 2",
"def delete(self, key, item): # noqa\n return self.execute_command(CF_DEL, key, item)",
"async def _kv_atomic_delete(self, *keys):\n raise NotImplementedError()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function that pushes the text through the django templating engine. Keep in mind that it does not escape text, which is good! | def templatize(self, text, context):
return Template("{% autoescape off %}" + text + "{% endautoescape %}").render(context) | [
"def render_template(text, context=None):\n template = engines[\"django\"].from_string(text)\n if not context:\n context = {}\n return template.render(context)",
"def do_normal(self, string):\n _append = Appender()\n def append(string):\n _append(string)\n code_obj = compile(string, \"template-compile\", \"exec\")\n env = self.env.copy()\n env.update({\"append\":append})\n exec code_obj in env # exec other normal template statements in environ `self.env`\n return _append.get_value() # 返回 这个 块 生成的html",
"def mako_template(text):\n\n return mako_template_env.from_string(text)",
"def render_template(text, **context_args):\r\n template = Template(\"{% load bootstrap3 %}\" + text)\r\n if not 'form' in context_args:\r\n context_args['form'] = ExpenseFilterForm()\r\n return template.render(Context(context_args))",
"def addtemplate(self, name, text):\n\t\tself.context[name] = self.parser.parsetext(name, text)",
"def render(self, user):\n self._render_text = self.content.replace('\\n', '<br>')\n return render_str(\"post.html\", p=self, user=user)",
"def render_str(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)",
"def render_markup(text):\n if flaskbb_config['MARKUP_TYPE'] == 'bbcode':\n return render_bbcode(text)\n elif flaskbb_config['MARKUP_TYPE'] == 'markdown':\n return render_markdown(text, extras=['tables'])\n return text",
"def render_template(text, **context_args):\n template = Template(\"{% load bootstrap3 %}\" + text)\n if not 'form' in context_args:\n context_args['form'] = AccountForm()\n return template.render(Context(context_args))",
"def _build_html(items, wrapping):\r\n return jinja2.Markup('\\n'.join((wrapping % item for item in items)))",
"def simple_render(template, context):\n\n def parse_token(token, in_tag):\n if not in_tag:\n return token\n var = token[2:-2].strip()\n return context.get(var, '')\n\n result = []\n in_tag = False\n\n for token in tag_re.split(template):\n if token:\n result.append(parse_token(token, in_tag))\n in_tag = not in_tag\n\n return ''.join(result)",
"def process_string(self, string):\n template = self.tpl_env.from_string(string)\n out = template.render(self.tpl_context)\n return out",
"def html_mail(tpl,type,title,subtitle,user,content,unsubscribeurl):\n #stores all info to be inserted in the template, \n #makes sure french accent are converted to their html equivalent\n infos = {'title': title,\n 'subtitle': subtitle,\n 'user': user,\n 'content': content,\n 'unsubscribeurl': unsubscribeurl,\n 'mailtype': type \n }\n \n return tpl % infos",
"def glue_template_and_params(template_and_params) -> str:\n template, params = template_and_params\n text = ''\n for items in params.items():\n text += '|{}={}\\n'.format(*items)\n\n return f'{{{{{template}\\n{text}}}}}'",
"def prettify_text(self, text):\n text = self.widont(text)\n text = smartypants.smartypants(text)\n return text",
"def ex_braces(content):\n return \"{{\" + content + \"}}\"",
"def test_in_text_template(self):\r\n tmpl = TextTemplate(\"\"\"\r\n #def echo(greeting, name='world')\r\n ${greeting}, ${name}!\r\n #end\r\n ${echo('Hi', name='you')}\r\n \"\"\")\r\n self.assertEqual(\"\"\"\r\n Hi, you!\r\n\r\n \"\"\", tmpl.generate().render(encoding=None))",
"def quoted_email(context, template_name):\n return quote_text(render_to_string(template_name, context.flatten()))",
"def plugin_admin_html_to_tags(text):\n return OBJ_ADMIN_RE.sub(lambda m: u\"{{ plugin_object %s }}\" % m.groups()[0], text)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator to execute a func inside the QThreadPool | def AsQThread(func):
@wraps(func)
def AsyncFunc(*args, **kwargs):
runnable = Runnable(func = func, args = args, kwargs = kwargs)
global pool
pool.start(runnable)
return AsyncFunc | [
"def job(self, func):\n @functools.wraps(func)\n def delay(*args, **kwargs): # pragma: no cover\n return self.enqueue(func, *args, **kwargs)\n func.delay = delay\n return func",
"def call_threaded(func: t.Callable[..., None]) -> t.Callable[..., \"Future\"]:\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n return threaded_worker.submit(func, *args, **kwargs)\n except RuntimeError:\n log.debug(f\"Failed to submit function {func}.\")\n\n return wrapper",
"def process(self, func, args=None):\r\n\t\tfunc = self.pool_thread(func)\r\n\t\tt = threading.Thread(target=func, args=args)\r\n\t\tt.start()",
"def wrapper(self, *args, **kw):\n\n def call():\n \"\"\"Calls function on loop thread\"\"\"\n try:\n func(self, *args, **kw)\n except Exception:\n logger.exception(\n \"failed to call async [%r] with [%r] [%r]\", func, args, kw\n )\n\n self.loop.call_soon_threadsafe(call)",
"async def run(self, func: Callable[..., T], /, *args: Any, **kwargs: Any) -> T:\n # TODO: How to make sure that we can get the same event loop at instance creation ?\n _loop = get_event_loop()\n _func = partial(func, *args, **kwargs)\n return await _loop.run_in_executor(self, _func)",
"def nonimmediate(func):\n ioloop = tornado.ioloop.IOLoop.instance()\n @wraps(func)\n def delayed_call(*args, **kwargs):\n callback = partial(func, *args, **kwargs)\n ioloop.add_callback(callback)\n return delayed_call",
"async def call_in_thread(self, func, *args, **kwargs):\n decorator = logcontext.Decorator.get(logcontext.current_context())\n assert decorator is None or callable(decorator)\n\n @functools.wraps(func)\n def wrapper(*args):\n with logcontext.Decorator.use(decorator):\n return func(*args)\n\n return await super().call_in_thread(wrapper, *args, **kwargs)",
"def apply_async(self, func, args=(), kwds={}, callback=None):\n return Pool.apply_async(self, LogExceptions(func), args, kwds, callback)",
"def pool_thread(self, func):\r\n\t\tdef _func(*args, **kws):\r\n\t\t\trtn = func(*args, **kws)\r\n\t\t\tself.cond.acquire()\r\n\t\t\tself._current_thread_num -= 1\r\n\t\t\tself.cond.notify()\r\n\t\t\tif self._current_thread_num == 0:\r\n\t\t\t\tself.pool_active = False\r\n\t\t\tif self.thread_num == 0 and self._current_thread_num == 0 and not self.pool_active:\r\n\t\t\t\tself.joincond.acquire()\r\n\t\t\t\tself.joincond.notify()\r\n\t\t\t\tself.joincond.release()\r\n\t\t\tself.cond.release()\r\n\t\t\treturn rtn\r\n\t\treturn _func",
"def async_func(self, *args, **kwargs):\n del args\n task = TaskRunner(run_function=func, obj=self, kwargs=kwargs)\n ret = task.start()\n return ret",
"def delayable(f):\n def delay(*args, **kwargs):\n queue_key = current_app.config.get('REDIS_QUEUE_KEY', 'default')\n task_id = '%s:result:%s' % (queue_key, str(uuid4()))\n s = dumps((f, task_id, args, kwargs))\n redis.set(task_id, '')\n redis.rpush(queue_key, s)\n return Task(task_id)\n def get_task(task_id):\n result = Task(task_id)\n return result if result.exists else None\n f.delay = delay\n f.get_task = get_task\n return f",
"def run_in_thread(func):\n\n def wrapper(*args, **kwargs):\n thread = threading.Thread(target=func, args=args, kwargs=kwargs)\n thread.start()\n return thread\n\n return wrapper",
"def task(func):\n def task_wrapper(*args, **kwargs):\n return spawn(func, *args, **kwargs)\n return task_wrapper",
"def remote_func(self, **kwargs):\n def wrapper_param(f):\n @functools.wraps(f)\n def wrapper(*f_args, **f_kwargs):\n return self.execute_python(f, f_args, f_kwargs, **kwargs)\n return wrapper\n return wrapper_param",
"def threaded(fn):\n def wrapper(*args, **kwargs):\n Thread(target=fn, args=args, kwargs=kwargs).start()\n return wrapper",
"def add_task(func):\n # Add task function to be run in the task manager's thread pool\n _TASK_MANAGER.add_task(func)",
"def threaded(f):\n\n def wrapper(*args, **kwargs):\n return _thread.start_new(f, args, kwargs)\n\n return wrapper",
"def submit(self, fn, *args, **kwargs):\n\n return self._threadpool.submit(fn, *args, **kwargs)",
"def submit(\n self,\n fn: Callable[..., Any],\n *args: Any,\n **kwargs: Any,\n ) -> RuntimeFuture:\n ..."
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new action for a rule associated with a specific resource name. | def create_action_for_rule(self, ruleresourcename: str, action_post: ActionPOST, query_params: Dict[str, object] = None) -> Action:
if query_params is None:
query_params = {}
path_params = {
"ruleresourcename": ruleresourcename,
}
path = Template("/catalog/v2alpha2/rules/${ruleresourcename}/actions").substitute(path_params)
url = self.base_client.build_url(path)
data = action_post.to_dict()
response = self.base_client.post(url, json=data, params=query_params)
return handle_response(response, Action) | [
"def create_action_for_rule_by_id(self, ruleid: str, action_post: ActionPOST, query_params: Dict[str, object] = None) -> Action:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"ruleid\": ruleid,\n }\n\n path = Template(\"/catalog/v2alpha2/rules/${ruleid}/actions\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = action_post.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Action)",
"def create_rule(connection, rule_info):\n connection.command_path = 'rule'\n extra_headers = {\n connection.header_key: connection.token,\n 'Content-Type': 'text/xml'\n }\n url = connection.build_url()\n rule_data = _build_rule_payload(rule_info)\n verify_ssl = connection.verify_ssl\n res = requests.post(url, headers=extra_headers,\n data=rule_data,\n verify=verify_ssl)\n if res.status_code == 201:\n return rules.parse_rule(res.content)\n\n if res.status_code == 403 and \"Rule already exists\" in res.text:\n raise RuleCreationDuplicateRule(\"Rule already exists\")\n\n raise RuleCreationException(\"Error creating rule: {0} => {0}\".format(\n res.status_code, res.content\n ))",
"def create_rule(self, rule_post: RulePOST, query_params: Dict[str, object] = None) -> Rule:\n if query_params is None:\n query_params = {}\n\n path_params = {\n }\n\n path = Template(\"/catalog/v2alpha2/rules\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = rule_post.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Rule)",
"def create_action(self, action_name, action_config):\n action_type = action_config['type']\n clz = Actions.get_action_class(action_type)\n action = clz()\n action.set_info(self.name, action_name, self.config)\n return action",
"def add_rule(self, action_type, role, resource):\n\n if not self.validate(role, resource):\n return\n permission = (role, resource)\n if permission not in self.ACTION_DICT.get(action_type):\n self.ACTION_DICT.get(action_type).append(permission)",
"def create_permission(self, action_name: str, resource_name: str) -> Permission | None:\n raise NotImplementedError",
"def new_action(self, name):\n if not self.sub_actor:\n self.sub_actor = Actor(self)\n return self.sub_actor.new_action(name)",
"def action(self, name):\n if not isinstance(name, str):\n raise TypeError('`name` must be a string.')\n\n if not name:\n raise ValueError('`name` must not be blank.')\n\n return self._get_action_class()(\n self._client,\n f'{self._path}/{name}',\n )",
"def rule_create(request, **kwargs):\n body = {'optimizer_rule': kwargs}\n rule = neutronclient(request).create_optimizer_rule(\n body).get('optimizer_rule')\n return Rule(rule)",
"def create(session: Session, rule_name: str, rule_type: str, data: str, frequency: int) -> Rule:\n if not rule_name or not rule_type or not data:\n raise ValueError(\"A rule name, a type, an argument and a frequency is required.\")\n try:\n rule = Rule(rule_name, rule_type, data, frequency)\n session.add(rule)\n session.commit()\n return rule\n except IntegrityError as ex:\n raise RuleExistsError(\"The rule already exists\") from ex",
"def get_action_by_id_for_rule(self, ruleresourcename: str, actionid: str, query_params: Dict[str, object] = None) -> Action:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"ruleresourcename\": ruleresourcename,\n \"actionid\": actionid,\n }\n\n path = Template(\"/catalog/v2alpha2/rules/${ruleresourcename}/actions/${actionid}\").substitute(path_params)\n url = self.base_client.build_url(path)\n response = self.base_client.get(url, params=query_params)\n return handle_response(response, Action)",
"def create_rule_name(name, rule):\n rule_name = '{name}-from-{rule[sources]}-to-{rule[allowed]}'.format(\n name=name,\n rule=rule)\n return utils.get_gcp_resource_name(rule_name)",
"def create(self, validated_data):\n\t\tviolation_data = Violation.objects.get(id=validated_data.pop('vio_id')) ## -- Get Violation Details using Vioaltion ID -- ##\n\t\treturn Action.objects.create(violation=violation_data, **validated_data)",
"def _add_action_rule(self,\n action_rule_stable: list,\n action_rule_flexible: list,\n action_rule_decision: list,\n action_rule_supp: list,\n action_rule_conf: list,\n uplift: float):\n action_rule = [action_rule_stable, action_rule_flexible, action_rule_decision]\n self.action_rules.append([action_rule, action_rule_supp, action_rule_conf, uplift])",
"def create_resource(self, *args, **kwargs):\n target_uri = self._build_uri(*args, **kwargs)\n\n message, status_code = self.request(\n target_uri, POST, request_object=kwargs.get('payload'))\n\n if args:\n resource_type = args[2]\n elif not args and kwargs:\n resource_type = kwargs.get('resource_level')\n else:\n resource_type = None\n\n operation = 'Create {resource_type} resource'.format(\n resource_type=resource_type)\n\n self.check_status_code_success(\n operation, status_code, message)\n return message",
"def CreateAlertRule(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateAlertRule\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateAlertRuleResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def _create_db_rule_with_name(self, name, stage=False):\n self.rule_table._table.put_item(Item={'RuleName': name, 'Staged': stage})",
"def create_resource(self, name):\n raise NotImplementedError",
"def create_crush_rule(event) -> None:\n\n rule_name = event.params.get('name')\n failure_domain = event.params.get('failure-domain')\n device_class = event.params.get('device-class')\n\n cmd = [\n 'ceph', 'osd', 'crush', 'rule',\n 'create-replicated',\n rule_name,\n 'default',\n failure_domain\n ]\n if device_class:\n cmd.append(device_class)\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as e:\n logger.warn(e)\n event.fail(\"rule creation failed due to exception\")\n return\n\n event.set_results({'message': 'success'})"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new action for a specific rule. | def create_action_for_rule_by_id(self, ruleid: str, action_post: ActionPOST, query_params: Dict[str, object] = None) -> Action:
if query_params is None:
query_params = {}
path_params = {
"ruleid": ruleid,
}
path = Template("/catalog/v2alpha2/rules/${ruleid}/actions").substitute(path_params)
url = self.base_client.build_url(path)
data = action_post.to_dict()
response = self.base_client.post(url, json=data, params=query_params)
return handle_response(response, Action) | [
"def create_action_for_rule(self, ruleresourcename: str, action_post: ActionPOST, query_params: Dict[str, object] = None) -> Action:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"ruleresourcename\": ruleresourcename,\n }\n\n path = Template(\"/catalog/v2alpha2/rules/${ruleresourcename}/actions\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = action_post.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Action)",
"def _add_action_rule(self,\n action_rule_stable: list,\n action_rule_flexible: list,\n action_rule_decision: list,\n action_rule_supp: list,\n action_rule_conf: list,\n uplift: float):\n action_rule = [action_rule_stable, action_rule_flexible, action_rule_decision]\n self.action_rules.append([action_rule, action_rule_supp, action_rule_conf, uplift])",
"def create_rule(connection, rule_info):\n connection.command_path = 'rule'\n extra_headers = {\n connection.header_key: connection.token,\n 'Content-Type': 'text/xml'\n }\n url = connection.build_url()\n rule_data = _build_rule_payload(rule_info)\n verify_ssl = connection.verify_ssl\n res = requests.post(url, headers=extra_headers,\n data=rule_data,\n verify=verify_ssl)\n if res.status_code == 201:\n return rules.parse_rule(res.content)\n\n if res.status_code == 403 and \"Rule already exists\" in res.text:\n raise RuleCreationDuplicateRule(\"Rule already exists\")\n\n raise RuleCreationException(\"Error creating rule: {0} => {0}\".format(\n res.status_code, res.content\n ))",
"def create_rule(self, rule_post: RulePOST, query_params: Dict[str, object] = None) -> Rule:\n if query_params is None:\n query_params = {}\n\n path_params = {\n }\n\n path = Template(\"/catalog/v2alpha2/rules\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = rule_post.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Rule)",
"def add_rule(self, predicate, target, action=None):\n self.rules.append((predicate, target, action))",
"def create_action(self, action_name, action_config):\n action_type = action_config['type']\n clz = Actions.get_action_class(action_type)\n action = clz()\n action.set_info(self.name, action_name, self.config)\n return action",
"def add(self, rule):\r\n self.insertRule(rule, index=None)",
"def rule_create(request, **kwargs):\n body = {'optimizer_rule': kwargs}\n rule = neutronclient(request).create_optimizer_rule(\n body).get('optimizer_rule')\n return Rule(rule)",
"def create(session: Session, rule_name: str, rule_type: str, data: str, frequency: int) -> Rule:\n if not rule_name or not rule_type or not data:\n raise ValueError(\"A rule name, a type, an argument and a frequency is required.\")\n try:\n rule = Rule(rule_name, rule_type, data, frequency)\n session.add(rule)\n session.commit()\n return rule\n except IntegrityError as ex:\n raise RuleExistsError(\"The rule already exists\") from ex",
"def create_action(**args):\n kind = args['kind']\n if kind == NO_ACTION:\n return Action()\n elif kind == SLICE_CHANGED:\n return SliceChangedAction(args['val'])\n\n elif kind == ARCH_CP_CHANGED:\n return ArchCpChangedAction(args['curr'], args['prev'], args['index'])\n elif kind == ARCH_CP_ADDED:\n return ArchCpAddedAction(args['cp'], args['index'])\n elif kind == ARCH_CP_REMOVED:\n return ArchCpRemovedAction(args['index'])\n\n elif kind == LEFT_CANAL_CP_CHANGED:\n return LeftCanalCpChangedAction(args['curr'], args['prev'], args['index'])\n elif kind == RIGHT_CANAL_CP_CHANGED:\n return RightCanalCpChangedAction(args['curr'], args['prev'], args['index'])\n elif kind == LEFT_CANAL_CP_ADDED:\n return LeftCanalCpAddedAction(args['cp'], args['index'])\n elif kind == RIGHT_CANAL_CP_ADDED:\n return RightCanalCpAddedAction(args['cp'], args['index'])\n elif kind == LEFT_CANAL_CP_REMOVED:\n return LeftCanalCpRemovedAction(args['index'])\n elif kind == RIGHT_CANAL_CP_REMOVED:\n return RightCanalCpRemovedAction(args['index'])\n\n elif kind == SIDE_VOLUME_CP_ADDED:\n return SideVolumeCpAddedAction(args['cp'], args['index'], args['pos'])\n elif kind == SIDE_VOLUME_CP_REMOVED:\n return SideVolumeCpRemovedAction(args['index'], args['pos'])\n elif kind == SIDE_VOLUME_CP_CHANGED:\n return SideVolumeCpChangedAction(args['curr'], args['prev'], args['index'], args['pos'])\n elif kind == SIDE_VOLUME_SPLINE_EXTRACTED:\n return SideVolumeSplineExtractedAction(args['pos'], args['from_pos'])\n elif kind == SIDE_VOLUME_SPLINE_RESET:\n return SideVolumeSplineResetAction(args['pos'])\n\n elif kind == TILTED_PLANES_ANNOTATION:\n return TiltedPlanesAnnotationAction()\n elif kind == DEFAULT_PLANES_ANNOTATION:\n return DefaultPlanesAnnotationAction()\n\n else:\n raise ValueError(\"kind not recognized\")",
"def add(self, condition, action, last=False):\n self._rules.append(Rule(condition, action, last))",
"def new_action(self, name):\n if not self.sub_actor:\n self.sub_actor = Actor(self)\n return self.sub_actor.new_action(name)",
"def visit_create_rule(element, compiler, **kw):\n rule = element.rule\n opt_or_replace = \"OR REPLACE\" if element.or_replace else None\n where_clause = (\"WHERE \" + rule.condition if rule.condition is not None\n else None)\n opt_instead = \"INSTEAD\" if rule.do_instead else None\n compiled_commands = tuple(map(partial(compile_if_clause, compiler),\n rule.commands))\n if len(compiled_commands) == 1:\n commands = compiled_commands[0]\n else:\n commands = \"({})\".format(\"; \".join(compiled_commands))\n rule_name = compiler.preparer.quote(rule.name)\n table_name = compiler.preparer.format_table(rule.table)\n return _join_tokens(\n \"CREATE\", opt_or_replace, \"RULE\", rule_name, \"AS ON\", rule.event, \"TO\",\n table_name, where_clause, \"DO\", opt_instead, commands)",
"def build(cls, rule):\n\n if isinstance(rule, cls):\n return rule\n\n rule = parse(rule)\n assert isinstance(rule, dict), f'Not a valid rule: {rule}'\n type = get_event_class_by_type(rule.pop('type') if 'type' in rule else 'Event')\n\n args = {}\n for key, value in rule.items():\n args[key] = value\n\n return cls(type=type, **args)",
"def add_rule(self, action_type, role, resource):\n\n if not self.validate(role, resource):\n return\n permission = (role, resource)\n if permission not in self.ACTION_DICT.get(action_type):\n self.ACTION_DICT.get(action_type).append(permission)",
"def create_action(actor, action, verb, target=None):\n action = Action.objects.new(actor=actor, action_object=action, verb=verb, target=target)\n action.save()\n\n if app_settings.NOTIFY_ON_ACTION:\n send_notification_from_action(action, receivers)",
"def parse(cls, expr: str) -> \"Action\":\n return _parse_and_convert(expr, rule_name=\"onlyAction\")",
"def add_action(item, action):\n return item.copy().addParseAction(action)",
"def get_action_by_id_for_rule(self, ruleresourcename: str, actionid: str, query_params: Dict[str, object] = None) -> Action:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"ruleresourcename\": ruleresourcename,\n \"actionid\": actionid,\n }\n\n path = Template(\"/catalog/v2alpha2/rules/${ruleresourcename}/actions/${actionid}\").substitute(path_params)\n url = self.base_client.build_url(path)\n response = self.base_client.get(url, params=query_params)\n return handle_response(response, Action)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new annotation for a specific dashboard. | def create_annotation_for_dashboardby_id(self, dashboardid: str, annotation_post: AnnotationPOST, query_params: Dict[str, object] = None) -> Annotation:
if query_params is None:
query_params = {}
path_params = {
"dashboardid": dashboardid,
}
path = Template("/catalog/v2alpha2/dashboards/${dashboardid}/annotations").substitute(path_params)
url = self.base_client.build_url(path)
data = annotation_post.to_dict()
response = self.base_client.post(url, json=data, params=query_params)
return handle_response(response, Annotation) | [
"def create_annotations(self) -> None:\n pass",
"def create_annotation_for_dataset_by_id(self, datasetid: str, annotation_post: AnnotationPOST, query_params: Dict[str, object] = None) -> Annotation:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetid\": datasetid,\n }\n\n path = Template(\"/catalog/v2alpha2/datasets/${datasetid}/annotations\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = annotation_post.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Annotation)",
"def post_annotations(self):\n annotations_url = self.url + \"/annotations\"\n requests.post(annotations_url, json=self.annotations, auth=self.auth)",
"def setAnnotation(*args, **kwargs):\n \n pass",
"def import_dashboard(self, dashboard={'dashboard': {...}, 'folderId': 0, 'overwrite': True}):\n # make id value as None before creating the dashboard\n dashboard[\"dashboard\"][\"id\"] = None\n try:\n imported_dashboard = self.create_or_update(dashboard=dashboard)\n except Exception as e:\n print(\"Got error: %s\" % e)\n \n return imported_dashboard",
"def test_create_annotations(self):\n segmentation = adapter.SFFSegmentation() # annotation\n segmentation.name = u\"name\"\n segmentation.software_list = adapter.SFFSoftwareList()\n segmentation.software_list.append(\n adapter.SFFSoftware(\n name=u\"Software\",\n version=u\"1.0.9\",\n processing_details=u\"Processing details\"\n )\n )\n segmentation.details = u\"Details\"\n # global external references\n segmentation.global_external_references = adapter.SFFGlobalExternalReferenceList()\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'one',\n url=u'two',\n accession=u'three'\n )\n )\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'four',\n url=u'five',\n accession=u'six'\n )\n )\n segmentation.segments = adapter.SFFSegmentList()\n segment = adapter.SFFSegment()\n biol_ann = adapter.SFFBiologicalAnnotation()\n biol_ann.name = u\"Segment1\"\n biol_ann.description = u\"Some description\"\n # external refs\n biol_ann.external_references = adapter.SFFExternalReferenceList()\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sldjflj\",\n accession=u\"doieaik\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sljd;f\",\n accession=u\"20ijalf\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"lsdjlsd\",\n url=u\"lsjfd;sd\",\n accession=u\"23ijlsdjf\"\n )\n )\n biol_ann.number_of_instances = 30\n segment.biological_annotation = biol_ann\n # colour\n segment.colour = adapter.SFFRGBA(\n red=1,\n green=0,\n blue=1,\n alpha=0\n )\n segmentation.segments.append(segment)\n # export\n # segmentation.export(os.path.join(TEST_DATA_PATH, u'sff', u'v0.7', u'test_annotated_segmentation.sff'))\n # assertions\n self.assertEqual(segmentation.name, u'name')\n self.assertEqual(segmentation.version, segmentation._local.schema_version) # automatically set\n software = segmentation.software_list[0]\n self.assertEqual(software.name, u\"Software\")\n self.assertEqual(software.version, u\"1.0.9\")\n self.assertEqual(software.processing_details, u\"Processing details\")\n self.assertEqual(segmentation.details, u\"Details\")\n # global external references\n self.assertEqual(segmentation.global_external_references[0].resource, u'one')\n self.assertEqual(segmentation.global_external_references[0].url, u'two')\n self.assertEqual(segmentation.global_external_references[0].accession, u'three')\n self.assertEqual(segmentation.global_external_references[1].resource, u'four')\n self.assertEqual(segmentation.global_external_references[1].url, u'five')\n self.assertEqual(segmentation.global_external_references[1].accession, u'six')\n # segment: biological_annotation\n self.assertEqual(segment.biological_annotation.name, u\"Segment1\")\n self.assertEqual(segment.biological_annotation.description, u\"Some description\")\n self.assertEqual(len(segment.biological_annotation.external_references), 3)\n self.assertEqual(segment.biological_annotation.external_references[0].resource, u\"sldjflj\")\n self.assertEqual(segment.biological_annotation.external_references[0].accession, u\"doieaik\")\n self.assertEqual(segment.biological_annotation.external_references[1].resource, u\"sljd;f\")\n self.assertEqual(segment.biological_annotation.external_references[1].accession, u\"20ijalf\")\n self.assertEqual(segment.biological_annotation.external_references[2].resource, u\"lsdjlsd\")\n self.assertEqual(segment.biological_annotation.external_references[2].url, u\"lsjfd;sd\")\n self.assertEqual(segment.biological_annotation.external_references[2].accession, u\"23ijlsdjf\")\n self.assertEqual(segment.biological_annotation.number_of_instances, 30)\n # colour\n self.assertEqual(segment.colour.value, (1, 0, 1, 0))",
"def create_annotation(\n self,\n parent,\n annotation,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n # Wrap the transport method to add retry and timeout logic.\n if \"create_annotation\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"create_annotation\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.create_annotation,\n default_retry=self._method_configs[\"CreateAnnotation\"].retry,\n default_timeout=self._method_configs[\"CreateAnnotation\"].timeout,\n client_info=self._client_info,\n )\n\n request = incidents_service_pb2.CreateAnnotationRequest(\n parent=parent, annotation=annotation\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"parent\", parent)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"create_annotation\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )",
"def setannotation(self, *args):\n\n typ, dat = self._simple_command('SETANNOTATION', *args)\n return self._untagged_response(typ, dat, 'ANNOTATION')",
"def make_annotations(dir_tiles, config, zoom=\"18\"):\n print(\"MAKE ANNOTATIONS\")\n # Read groups file\n with open(config) as json_file:\n config = json.load(json_file)\n\n # Get sub-folder names\n dir_imgtiles, dir_labeltiles = tiles.get_tiles_directories(dir_tiles)\n\n dir_imgtiles_zoom = dir_imgtiles / zoom\n dir_labeltiles_zoom = dir_labeltiles / zoom\n\n # Create the annotation JSON file\n is_crowd = False\n annotations_json = annotations.write_complete_annotations(\n dir_imgtiles_zoom, dir_labeltiles_zoom, config, is_crowd, zoom\n )\n\n print(f\"The file {annotations_json} contains your annotations.\")",
"def _create_annotations(self, args: parser_extensions.Namespace):\n annotations = flags.Get(args, 'annotations')\n return self._dict_to_annotations_message(annotations)",
"def add_annotation(self, dg_ann, **more_attrs):\n # First, get the embedding element for all annotations.\n anns_el = self.find_or_create_annotations()\n # Second, create an appropriate element for the new annotation.\n if dg_ann.user is not None:\n username = dg_ann.user.username\n else:\n username = ''\n ann_el = etree.SubElement(\n anns_el,\n self.ANNOTATION_ELEM,\n id=str(dg_ann.pk),\n program_version=dg_ann.program_version,\n date_saved=self.format_datetime(dg_ann.date_saved),\n user=username)\n for name, val in more_attrs.iteritems():\n ann_el.set(name, val)\n if 'offensive' in settings.EXTRA_QUESTIONS:\n ann_el.set('offensive', str(dg_ann.offensive))\n if 'accent' in settings.EXTRA_QUESTIONS:\n ann_el.set('accent', dg_ann.accent or \"native\")\n if 'quality' in settings.EXTRA_QUESTIONS:\n ann_el.set('quality',\n ('clear' if dg_ann.quality == 1 else 'noisy'))\n if dg_ann.notes:\n ann_el.text = dg_ann.notes",
"def dashboard_import(\n self,\n resource_group_name, # type: str\n dashboard_name, # type: str\n dashboard, # type: \"models.Dashboard\"\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.Dashboard\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.Dashboard\"]\n error_map = kwargs.pop('error_map', {})\n api_version = \"2019-01-01-preview\"\n\n # Construct URL\n url = self.create_or_update.metadata['url']\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'dashboardName': self._serialize.url(\"dashboard_name\", dashboard_name, 'str', max_length=64, min_length=3),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n query_parameters['api-version'] = self._serialize.query(\n \"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {}\n header_parameters['Accept'] = 'application/json'\n header_parameters['Content-Type'] = 'application/json'\n\n # Construct body\n body_content = self._serialize.body(dashboard, 'Dashboard')\n\n # Construct and send request\n request = self._client.put(\n url, query_parameters, header_parameters, body_content)\n pipeline_response = self._client._pipeline.run(\n request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code,\n response=response, error_map=error_map)\n raise models.ErrorResponseException.from_response(\n response, self._deserialize)\n\n deserialized = None\n if response.status_code == 200:\n deserialized = self._deserialize('Dashboard', pipeline_response)\n\n if response.status_code == 201:\n deserialized = self._deserialize('Dashboard', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized",
"def _write_annotation(filename, annotation):\n _mkdir(os.path.dirname(filename))\n save_pbobject_as_json(annotation, filename)",
"def create_annotation_for_dataset_by_resource_name(self, datasetresourcename: str, annotation_post: AnnotationPOST, query_params: Dict[str, object] = None) -> Annotation:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetresourcename\": datasetresourcename,\n }\n\n path = Template(\"/catalog/v2alpha2/datasets/${datasetresourcename}/annotations\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = annotation_post.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Annotation)",
"def _build_annotation(arguments: Dict):\n if arguments[\"annotation_format\"] == \"pascal\":\n\n # write a PASCAL VOC file for this image\n # using all bounding boxes in the image's group\n _write_bboxes_as_pascal(\n arguments[\"bboxes\"],\n arguments[\"class_label\"],\n arguments[\"image_id\"],\n arguments[\"images_dir\"],\n arguments[\"annotations_dir\"],\n # arguments[\"include_segmentation_masks\"],\n )\n\n elif arguments[\"annotation_format\"] == \"darknet\":\n\n # write a Darknet annotation file for this image\n # using all bounding boxes in the image's group\n _write_bboxes_as_darknet(\n arguments[\"bboxes\"],\n arguments[\"class_index\"],\n arguments[\"image_id\"],\n arguments[\"images_dir\"],\n arguments[\"annotations_dir\"],\n )\n # elif arguments[\"annotation_format\"] == \"kitti\":\n # # TODO\n # pass\n else:\n raise ValueError(\n f\"Unsupported annotation format: \\\"{arguments['annotation_format']}\\\"\",\n )",
"def create_dashboard(self, dashboard_post: DashboardPOST, query_params: Dict[str, object] = None) -> Dashboard:\n if query_params is None:\n query_params = {}\n\n path_params = {\n }\n\n path = Template(\"/catalog/v2alpha2/dashboards\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = dashboard_post.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Dashboard)",
"def create_genome_annotation(self, filename):\n ann = Data.objects.create(\n name='Annotation_{}'.format(filename.split('.')[0]),\n process=get_process('upload-gtf'),\n contributor=get_superuser(),\n status=Data.STATUS_PROCESSING,\n input={'src': {'file': filename}, 'source': 'UCSC'})\n\n os.mkdir(os.path.join(self.data_dir, str(ann.id)))\n\n with gzip.open(os.path.join(self.test_files_path, filename), 'rb') as gzfile:\n with open(os.path.join(self.data_dir, str(ann.id), filename[:-3]), 'wb') as outfile:\n shutil.copyfileobj(gzfile, outfile)\n\n ann.output = {\n 'gtf': {'file': filename[:-3]},\n 'source': 'UCSC'\n }\n ann.status = Data.STATUS_DONE\n ann.save()\n\n with open(os.path.join(self.data_dir, str(ann.id), 'stdout.txt'), 'w') as stdout:\n stdout.write('Upload genome annotation with the '\n 'generate_diffexpr_cuffdiff django-admin command.')\n\n logger.info(__('Genome annotation created: {} (id={})', filename, ann.id))\n\n return ann",
"def _mkannotation(\n pa: typ.Dict[str, typ.Any],\n page: Page\n) -> typ.Optional[Annotation]:\n\n subtype = pa.get('Subtype')\n annot_type = None\n assert isinstance(subtype, PSLiteral)\n try:\n annot_type = ANNOT_SUBTYPES[subtype]\n except KeyError:\n pass\n\n if annot_type is None:\n if subtype not in IGNORED_ANNOT_SUBTYPES:\n logger.warning(\"Unsupported %s annotation ignored on %s\", subtype.name, page)\n return None\n\n contents = pa.get('Contents')\n if contents is not None:\n # decode as string, normalise line endings, replace special characters\n contents = cleanup_text(pdfminer.utils.decode_text(contents))\n\n rgb: typ.Optional[RGB] = None\n color = pa.get('C')\n if color is not None:\n if (isinstance(color, list)\n and len(color) == 3\n and all(isinstance(e, (int, float)) and 0 <= e <= 1 for e in color)):\n rgb = RGB(*color)\n else:\n logger.warning(\"Invalid color %s in annotation on %s\", color, page)\n\n # Rect defines the location of the annotation on the page\n rect = pdftypes.resolve1(pa.get('Rect'))\n\n # QuadPoints are defined only for \"markup\" annotations (Highlight, Underline, StrikeOut,\n # Squiggly), where they specify the quadrilaterals (boxes) covered by the annotation.\n quadpoints = pdftypes.resolve1(pa.get('QuadPoints'))\n\n author = pdftypes.resolve1(pa.get('T'))\n if author is not None:\n author = pdfminer.utils.decode_text(author)\n\n created = None\n dobj = pa.get('CreationDate')\n # some pdf apps set modification date, but not creation date\n dobj = dobj or pa.get('ModDate')\n # poppler-based apps (e.g. Okular) use 'M' for some reason\n dobj = dobj or pa.get('M')\n createds = pdftypes.resolve1(dobj)\n if createds is not None:\n createds = pdfminer.utils.decode_text(createds)\n created = decode_datetime(createds)\n\n return Annotation(page, annot_type, quadpoints, rect,\n contents, author=author, created=created, color=rgb)",
"def delete_annotation_of_dashboard_by_id(self, dashboardid: str, annotationid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"dashboardid\": dashboardid,\n \"annotationid\": annotationid,\n }\n\n path = Template(\"/catalog/v2alpha2/dashboards/${dashboardid}/annotations/${annotationid}\").substitute(path_params)\n url = self.base_client.build_url(path)\n response = self.base_client.delete(url, params=query_params)\n return handle_response(response, )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new annotation for a specific dataset. | def create_annotation_for_dataset_by_id(self, datasetid: str, annotation_post: AnnotationPOST, query_params: Dict[str, object] = None) -> Annotation:
if query_params is None:
query_params = {}
path_params = {
"datasetid": datasetid,
}
path = Template("/catalog/v2alpha2/datasets/${datasetid}/annotations").substitute(path_params)
url = self.base_client.build_url(path)
data = annotation_post.to_dict()
response = self.base_client.post(url, json=data, params=query_params)
return handle_response(response, Annotation) | [
"def create_annotation_for_dataset_by_resource_name(self, datasetresourcename: str, annotation_post: AnnotationPOST, query_params: Dict[str, object] = None) -> Annotation:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetresourcename\": datasetresourcename,\n }\n\n path = Template(\"/catalog/v2alpha2/datasets/${datasetresourcename}/annotations\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = annotation_post.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Annotation)",
"def create_annotations(self) -> None:\n pass",
"def setAnnotation(*args, **kwargs):\n \n pass",
"def new_dataset(dataset_name: str):\n icedata.template.generate_dataset(dataset_name)",
"def setannotation(self, *args):\n\n typ, dat = self._simple_command('SETANNOTATION', *args)\n return self._untagged_response(typ, dat, 'ANNOTATION')",
"def create_annotation_for_dashboardby_id(self, dashboardid: str, annotation_post: AnnotationPOST, query_params: Dict[str, object] = None) -> Annotation:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"dashboardid\": dashboardid,\n }\n\n path = Template(\"/catalog/v2alpha2/dashboards/${dashboardid}/annotations\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = annotation_post.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Annotation)",
"def create_genome_annotation(self, filename):\n ann = Data.objects.create(\n name='Annotation_{}'.format(filename.split('.')[0]),\n process=get_process('upload-gtf'),\n contributor=get_superuser(),\n status=Data.STATUS_PROCESSING,\n input={'src': {'file': filename}, 'source': 'UCSC'})\n\n os.mkdir(os.path.join(self.data_dir, str(ann.id)))\n\n with gzip.open(os.path.join(self.test_files_path, filename), 'rb') as gzfile:\n with open(os.path.join(self.data_dir, str(ann.id), filename[:-3]), 'wb') as outfile:\n shutil.copyfileobj(gzfile, outfile)\n\n ann.output = {\n 'gtf': {'file': filename[:-3]},\n 'source': 'UCSC'\n }\n ann.status = Data.STATUS_DONE\n ann.save()\n\n with open(os.path.join(self.data_dir, str(ann.id), 'stdout.txt'), 'w') as stdout:\n stdout.write('Upload genome annotation with the '\n 'generate_diffexpr_cuffdiff django-admin command.')\n\n logger.info(__('Genome annotation created: {} (id={})', filename, ann.id))\n\n return ann",
"def test_create_annotations(self):\n segmentation = adapter.SFFSegmentation() # annotation\n segmentation.name = u\"name\"\n segmentation.software_list = adapter.SFFSoftwareList()\n segmentation.software_list.append(\n adapter.SFFSoftware(\n name=u\"Software\",\n version=u\"1.0.9\",\n processing_details=u\"Processing details\"\n )\n )\n segmentation.details = u\"Details\"\n # global external references\n segmentation.global_external_references = adapter.SFFGlobalExternalReferenceList()\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'one',\n url=u'two',\n accession=u'three'\n )\n )\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'four',\n url=u'five',\n accession=u'six'\n )\n )\n segmentation.segments = adapter.SFFSegmentList()\n segment = adapter.SFFSegment()\n biol_ann = adapter.SFFBiologicalAnnotation()\n biol_ann.name = u\"Segment1\"\n biol_ann.description = u\"Some description\"\n # external refs\n biol_ann.external_references = adapter.SFFExternalReferenceList()\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sldjflj\",\n accession=u\"doieaik\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sljd;f\",\n accession=u\"20ijalf\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"lsdjlsd\",\n url=u\"lsjfd;sd\",\n accession=u\"23ijlsdjf\"\n )\n )\n biol_ann.number_of_instances = 30\n segment.biological_annotation = biol_ann\n # colour\n segment.colour = adapter.SFFRGBA(\n red=1,\n green=0,\n blue=1,\n alpha=0\n )\n segmentation.segments.append(segment)\n # export\n # segmentation.export(os.path.join(TEST_DATA_PATH, u'sff', u'v0.7', u'test_annotated_segmentation.sff'))\n # assertions\n self.assertEqual(segmentation.name, u'name')\n self.assertEqual(segmentation.version, segmentation._local.schema_version) # automatically set\n software = segmentation.software_list[0]\n self.assertEqual(software.name, u\"Software\")\n self.assertEqual(software.version, u\"1.0.9\")\n self.assertEqual(software.processing_details, u\"Processing details\")\n self.assertEqual(segmentation.details, u\"Details\")\n # global external references\n self.assertEqual(segmentation.global_external_references[0].resource, u'one')\n self.assertEqual(segmentation.global_external_references[0].url, u'two')\n self.assertEqual(segmentation.global_external_references[0].accession, u'three')\n self.assertEqual(segmentation.global_external_references[1].resource, u'four')\n self.assertEqual(segmentation.global_external_references[1].url, u'five')\n self.assertEqual(segmentation.global_external_references[1].accession, u'six')\n # segment: biological_annotation\n self.assertEqual(segment.biological_annotation.name, u\"Segment1\")\n self.assertEqual(segment.biological_annotation.description, u\"Some description\")\n self.assertEqual(len(segment.biological_annotation.external_references), 3)\n self.assertEqual(segment.biological_annotation.external_references[0].resource, u\"sldjflj\")\n self.assertEqual(segment.biological_annotation.external_references[0].accession, u\"doieaik\")\n self.assertEqual(segment.biological_annotation.external_references[1].resource, u\"sljd;f\")\n self.assertEqual(segment.biological_annotation.external_references[1].accession, u\"20ijalf\")\n self.assertEqual(segment.biological_annotation.external_references[2].resource, u\"lsdjlsd\")\n self.assertEqual(segment.biological_annotation.external_references[2].url, u\"lsjfd;sd\")\n self.assertEqual(segment.biological_annotation.external_references[2].accession, u\"23ijlsdjf\")\n self.assertEqual(segment.biological_annotation.number_of_instances, 30)\n # colour\n self.assertEqual(segment.colour.value, (1, 0, 1, 0))",
"def _build_annotation(arguments: Dict):\n if arguments[\"annotation_format\"] == \"pascal\":\n\n # write a PASCAL VOC file for this image\n # using all bounding boxes in the image's group\n _write_bboxes_as_pascal(\n arguments[\"bboxes\"],\n arguments[\"class_label\"],\n arguments[\"image_id\"],\n arguments[\"images_dir\"],\n arguments[\"annotations_dir\"],\n # arguments[\"include_segmentation_masks\"],\n )\n\n elif arguments[\"annotation_format\"] == \"darknet\":\n\n # write a Darknet annotation file for this image\n # using all bounding boxes in the image's group\n _write_bboxes_as_darknet(\n arguments[\"bboxes\"],\n arguments[\"class_index\"],\n arguments[\"image_id\"],\n arguments[\"images_dir\"],\n arguments[\"annotations_dir\"],\n )\n # elif arguments[\"annotation_format\"] == \"kitti\":\n # # TODO\n # pass\n else:\n raise ValueError(\n f\"Unsupported annotation format: \\\"{arguments['annotation_format']}\\\"\",\n )",
"def register_dataset_from_file(imagedir,anndir,textfile,shuffle, dsetname, CLASS_NAMES,rename_classes, omit_classes):\n ##Pop the dataset name from the _REGISTERED dict if it already exists\n if dsetname in DatasetCatalog.list():\n DatasetCatalog.remove(dsetname)\n\n DatasetCatalog.register(dsetname, lambda imagedir=imagedir : create_detectron2_dicts(imagedir,anndir,textfile, CLASS_NAMES, rename_classes, omit_classes, shuffle=False))\n MetadataCatalog.get(dsetname).set(thing_classes=CLASS_NAMES)",
"def __prepare_dataset(dataset, stride, prepared_annotations_name=\"prepared_train_annotations.pkl\",\n images_folder_default_name=\"train2017\",\n annotations_filename=\"person_keypoints_train2017.json\",\n verbose=True):\n if isinstance(dataset, ExternalDataset):\n if dataset.dataset_type.lower() != \"coco\":\n raise UserWarning(\"dataset_type must be \\\"COCO\\\"\")\n\n # Get files and subdirectories of dataset.path directory\n f = []\n dirs = []\n for (dirpath, dirnames, filenames) in os.walk(dataset.path):\n f = filenames\n dirs = dirnames\n break\n\n # Get images folder\n if images_folder_default_name not in dirs:\n raise UserWarning(\"Didn't find \\\"\" + images_folder_default_name +\n \"\\\" folder in the dataset path provided.\")\n images_folder = os.path.join(dataset.path, images_folder_default_name)\n\n # Get annotations file\n if annotations_filename not in f:\n raise UserWarning(\"Didn't find \\\"\" + annotations_filename +\n \"\\\" file in the dataset path provided.\")\n annotations_file = os.path.join(dataset.path, annotations_filename)\n\n # Convert annotations to internal format if needed\n if prepared_annotations_name not in f:\n if verbose:\n print(\"Didn't find \" + prepared_annotations_name + \" in dataset.path, creating new...\")\n prepare_train_labels.convert_annotations(annotations_file,\n output_path=os.path.join(dataset.path,\n prepared_annotations_name))\n if verbose:\n print(\"Created new .pkl file containing prepared annotations in internal format.\")\n prepared_train_labels = os.path.join(dataset.path, prepared_annotations_name)\n\n sigma = 7\n paf_thickness = 1\n return CocoTrainDataset(prepared_train_labels, images_folder,\n stride, sigma, paf_thickness,\n transform=transforms.Compose([\n ConvertKeypoints(),\n Scale(),\n Rotate(pad=(128, 128, 128)),\n CropPad(pad=(128, 128, 128)),\n Flip()]))\n elif isinstance(dataset, DatasetIterator):\n return dataset",
"def _create_annotations(self, args: parser_extensions.Namespace):\n annotations = flags.Get(args, 'annotations')\n return self._dict_to_annotations_message(annotations)",
"def prepare_dataset(\n mc: Mashcima,\n annotations: List[str],\n min_staff_with: int,\n single_staff=False\n):\n def _image_generator(annotation_index: int, _: List[str]) -> np.ndarray:\n return _complex_image_generator(\n mc, annotation_index, annotations, single_staff, min_staff_with\n )\n dataset = AnnotationsDataset(annotations, _image_generator)\n dataset = ParallelFeedingDataset(dataset) # make batch preparation parallel\n return dataset",
"def post_annotations(self):\n annotations_url = self.url + \"/annotations\"\n requests.post(annotations_url, json=self.annotations, auth=self.auth)",
"def add_annotation(self, dg_ann, **more_attrs):\n # First, get the embedding element for all annotations.\n anns_el = self.find_or_create_annotations()\n # Second, create an appropriate element for the new annotation.\n if dg_ann.user is not None:\n username = dg_ann.user.username\n else:\n username = ''\n ann_el = etree.SubElement(\n anns_el,\n self.ANNOTATION_ELEM,\n id=str(dg_ann.pk),\n program_version=dg_ann.program_version,\n date_saved=self.format_datetime(dg_ann.date_saved),\n user=username)\n for name, val in more_attrs.iteritems():\n ann_el.set(name, val)\n if 'offensive' in settings.EXTRA_QUESTIONS:\n ann_el.set('offensive', str(dg_ann.offensive))\n if 'accent' in settings.EXTRA_QUESTIONS:\n ann_el.set('accent', dg_ann.accent or \"native\")\n if 'quality' in settings.EXTRA_QUESTIONS:\n ann_el.set('quality',\n ('clear' if dg_ann.quality == 1 else 'noisy'))\n if dg_ann.notes:\n ann_el.text = dg_ann.notes",
"def annotate(args):\n from .annotation.annotation import annotate as anno\n anno(args)",
"def from_data(cls, data, attribute, xykey=None, **kwargs):\n annotator = cls(attribute, **kwargs)\n annotator.train(WeightedGraph(data, attribute, xykey=xykey))\n return annotator",
"def set_annotation(self, string):\n self.annotation = string\n return self",
"def create_annotation(\n self,\n parent,\n annotation,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n # Wrap the transport method to add retry and timeout logic.\n if \"create_annotation\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"create_annotation\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.create_annotation,\n default_retry=self._method_configs[\"CreateAnnotation\"].retry,\n default_timeout=self._method_configs[\"CreateAnnotation\"].timeout,\n client_info=self._client_info,\n )\n\n request = incidents_service_pb2.CreateAnnotationRequest(\n parent=parent, annotation=annotation\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"parent\", parent)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"create_annotation\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new annotation for a specific dataset. | def create_annotation_for_dataset_by_resource_name(self, datasetresourcename: str, annotation_post: AnnotationPOST, query_params: Dict[str, object] = None) -> Annotation:
if query_params is None:
query_params = {}
path_params = {
"datasetresourcename": datasetresourcename,
}
path = Template("/catalog/v2alpha2/datasets/${datasetresourcename}/annotations").substitute(path_params)
url = self.base_client.build_url(path)
data = annotation_post.to_dict()
response = self.base_client.post(url, json=data, params=query_params)
return handle_response(response, Annotation) | [
"def create_annotation_for_dataset_by_id(self, datasetid: str, annotation_post: AnnotationPOST, query_params: Dict[str, object] = None) -> Annotation:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetid\": datasetid,\n }\n\n path = Template(\"/catalog/v2alpha2/datasets/${datasetid}/annotations\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = annotation_post.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Annotation)",
"def create_annotations(self) -> None:\n pass",
"def setAnnotation(*args, **kwargs):\n \n pass",
"def new_dataset(dataset_name: str):\n icedata.template.generate_dataset(dataset_name)",
"def setannotation(self, *args):\n\n typ, dat = self._simple_command('SETANNOTATION', *args)\n return self._untagged_response(typ, dat, 'ANNOTATION')",
"def create_annotation_for_dashboardby_id(self, dashboardid: str, annotation_post: AnnotationPOST, query_params: Dict[str, object] = None) -> Annotation:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"dashboardid\": dashboardid,\n }\n\n path = Template(\"/catalog/v2alpha2/dashboards/${dashboardid}/annotations\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = annotation_post.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Annotation)",
"def create_genome_annotation(self, filename):\n ann = Data.objects.create(\n name='Annotation_{}'.format(filename.split('.')[0]),\n process=get_process('upload-gtf'),\n contributor=get_superuser(),\n status=Data.STATUS_PROCESSING,\n input={'src': {'file': filename}, 'source': 'UCSC'})\n\n os.mkdir(os.path.join(self.data_dir, str(ann.id)))\n\n with gzip.open(os.path.join(self.test_files_path, filename), 'rb') as gzfile:\n with open(os.path.join(self.data_dir, str(ann.id), filename[:-3]), 'wb') as outfile:\n shutil.copyfileobj(gzfile, outfile)\n\n ann.output = {\n 'gtf': {'file': filename[:-3]},\n 'source': 'UCSC'\n }\n ann.status = Data.STATUS_DONE\n ann.save()\n\n with open(os.path.join(self.data_dir, str(ann.id), 'stdout.txt'), 'w') as stdout:\n stdout.write('Upload genome annotation with the '\n 'generate_diffexpr_cuffdiff django-admin command.')\n\n logger.info(__('Genome annotation created: {} (id={})', filename, ann.id))\n\n return ann",
"def test_create_annotations(self):\n segmentation = adapter.SFFSegmentation() # annotation\n segmentation.name = u\"name\"\n segmentation.software_list = adapter.SFFSoftwareList()\n segmentation.software_list.append(\n adapter.SFFSoftware(\n name=u\"Software\",\n version=u\"1.0.9\",\n processing_details=u\"Processing details\"\n )\n )\n segmentation.details = u\"Details\"\n # global external references\n segmentation.global_external_references = adapter.SFFGlobalExternalReferenceList()\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'one',\n url=u'two',\n accession=u'three'\n )\n )\n segmentation.global_external_references.append(\n adapter.SFFExternalReference(\n resource=u'four',\n url=u'five',\n accession=u'six'\n )\n )\n segmentation.segments = adapter.SFFSegmentList()\n segment = adapter.SFFSegment()\n biol_ann = adapter.SFFBiologicalAnnotation()\n biol_ann.name = u\"Segment1\"\n biol_ann.description = u\"Some description\"\n # external refs\n biol_ann.external_references = adapter.SFFExternalReferenceList()\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sldjflj\",\n accession=u\"doieaik\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"sljd;f\",\n accession=u\"20ijalf\"\n )\n )\n biol_ann.external_references.append(\n adapter.SFFExternalReference(\n resource=u\"lsdjlsd\",\n url=u\"lsjfd;sd\",\n accession=u\"23ijlsdjf\"\n )\n )\n biol_ann.number_of_instances = 30\n segment.biological_annotation = biol_ann\n # colour\n segment.colour = adapter.SFFRGBA(\n red=1,\n green=0,\n blue=1,\n alpha=0\n )\n segmentation.segments.append(segment)\n # export\n # segmentation.export(os.path.join(TEST_DATA_PATH, u'sff', u'v0.7', u'test_annotated_segmentation.sff'))\n # assertions\n self.assertEqual(segmentation.name, u'name')\n self.assertEqual(segmentation.version, segmentation._local.schema_version) # automatically set\n software = segmentation.software_list[0]\n self.assertEqual(software.name, u\"Software\")\n self.assertEqual(software.version, u\"1.0.9\")\n self.assertEqual(software.processing_details, u\"Processing details\")\n self.assertEqual(segmentation.details, u\"Details\")\n # global external references\n self.assertEqual(segmentation.global_external_references[0].resource, u'one')\n self.assertEqual(segmentation.global_external_references[0].url, u'two')\n self.assertEqual(segmentation.global_external_references[0].accession, u'three')\n self.assertEqual(segmentation.global_external_references[1].resource, u'four')\n self.assertEqual(segmentation.global_external_references[1].url, u'five')\n self.assertEqual(segmentation.global_external_references[1].accession, u'six')\n # segment: biological_annotation\n self.assertEqual(segment.biological_annotation.name, u\"Segment1\")\n self.assertEqual(segment.biological_annotation.description, u\"Some description\")\n self.assertEqual(len(segment.biological_annotation.external_references), 3)\n self.assertEqual(segment.biological_annotation.external_references[0].resource, u\"sldjflj\")\n self.assertEqual(segment.biological_annotation.external_references[0].accession, u\"doieaik\")\n self.assertEqual(segment.biological_annotation.external_references[1].resource, u\"sljd;f\")\n self.assertEqual(segment.biological_annotation.external_references[1].accession, u\"20ijalf\")\n self.assertEqual(segment.biological_annotation.external_references[2].resource, u\"lsdjlsd\")\n self.assertEqual(segment.biological_annotation.external_references[2].url, u\"lsjfd;sd\")\n self.assertEqual(segment.biological_annotation.external_references[2].accession, u\"23ijlsdjf\")\n self.assertEqual(segment.biological_annotation.number_of_instances, 30)\n # colour\n self.assertEqual(segment.colour.value, (1, 0, 1, 0))",
"def _build_annotation(arguments: Dict):\n if arguments[\"annotation_format\"] == \"pascal\":\n\n # write a PASCAL VOC file for this image\n # using all bounding boxes in the image's group\n _write_bboxes_as_pascal(\n arguments[\"bboxes\"],\n arguments[\"class_label\"],\n arguments[\"image_id\"],\n arguments[\"images_dir\"],\n arguments[\"annotations_dir\"],\n # arguments[\"include_segmentation_masks\"],\n )\n\n elif arguments[\"annotation_format\"] == \"darknet\":\n\n # write a Darknet annotation file for this image\n # using all bounding boxes in the image's group\n _write_bboxes_as_darknet(\n arguments[\"bboxes\"],\n arguments[\"class_index\"],\n arguments[\"image_id\"],\n arguments[\"images_dir\"],\n arguments[\"annotations_dir\"],\n )\n # elif arguments[\"annotation_format\"] == \"kitti\":\n # # TODO\n # pass\n else:\n raise ValueError(\n f\"Unsupported annotation format: \\\"{arguments['annotation_format']}\\\"\",\n )",
"def register_dataset_from_file(imagedir,anndir,textfile,shuffle, dsetname, CLASS_NAMES,rename_classes, omit_classes):\n ##Pop the dataset name from the _REGISTERED dict if it already exists\n if dsetname in DatasetCatalog.list():\n DatasetCatalog.remove(dsetname)\n\n DatasetCatalog.register(dsetname, lambda imagedir=imagedir : create_detectron2_dicts(imagedir,anndir,textfile, CLASS_NAMES, rename_classes, omit_classes, shuffle=False))\n MetadataCatalog.get(dsetname).set(thing_classes=CLASS_NAMES)",
"def __prepare_dataset(dataset, stride, prepared_annotations_name=\"prepared_train_annotations.pkl\",\n images_folder_default_name=\"train2017\",\n annotations_filename=\"person_keypoints_train2017.json\",\n verbose=True):\n if isinstance(dataset, ExternalDataset):\n if dataset.dataset_type.lower() != \"coco\":\n raise UserWarning(\"dataset_type must be \\\"COCO\\\"\")\n\n # Get files and subdirectories of dataset.path directory\n f = []\n dirs = []\n for (dirpath, dirnames, filenames) in os.walk(dataset.path):\n f = filenames\n dirs = dirnames\n break\n\n # Get images folder\n if images_folder_default_name not in dirs:\n raise UserWarning(\"Didn't find \\\"\" + images_folder_default_name +\n \"\\\" folder in the dataset path provided.\")\n images_folder = os.path.join(dataset.path, images_folder_default_name)\n\n # Get annotations file\n if annotations_filename not in f:\n raise UserWarning(\"Didn't find \\\"\" + annotations_filename +\n \"\\\" file in the dataset path provided.\")\n annotations_file = os.path.join(dataset.path, annotations_filename)\n\n # Convert annotations to internal format if needed\n if prepared_annotations_name not in f:\n if verbose:\n print(\"Didn't find \" + prepared_annotations_name + \" in dataset.path, creating new...\")\n prepare_train_labels.convert_annotations(annotations_file,\n output_path=os.path.join(dataset.path,\n prepared_annotations_name))\n if verbose:\n print(\"Created new .pkl file containing prepared annotations in internal format.\")\n prepared_train_labels = os.path.join(dataset.path, prepared_annotations_name)\n\n sigma = 7\n paf_thickness = 1\n return CocoTrainDataset(prepared_train_labels, images_folder,\n stride, sigma, paf_thickness,\n transform=transforms.Compose([\n ConvertKeypoints(),\n Scale(),\n Rotate(pad=(128, 128, 128)),\n CropPad(pad=(128, 128, 128)),\n Flip()]))\n elif isinstance(dataset, DatasetIterator):\n return dataset",
"def _create_annotations(self, args: parser_extensions.Namespace):\n annotations = flags.Get(args, 'annotations')\n return self._dict_to_annotations_message(annotations)",
"def prepare_dataset(\n mc: Mashcima,\n annotations: List[str],\n min_staff_with: int,\n single_staff=False\n):\n def _image_generator(annotation_index: int, _: List[str]) -> np.ndarray:\n return _complex_image_generator(\n mc, annotation_index, annotations, single_staff, min_staff_with\n )\n dataset = AnnotationsDataset(annotations, _image_generator)\n dataset = ParallelFeedingDataset(dataset) # make batch preparation parallel\n return dataset",
"def post_annotations(self):\n annotations_url = self.url + \"/annotations\"\n requests.post(annotations_url, json=self.annotations, auth=self.auth)",
"def add_annotation(self, dg_ann, **more_attrs):\n # First, get the embedding element for all annotations.\n anns_el = self.find_or_create_annotations()\n # Second, create an appropriate element for the new annotation.\n if dg_ann.user is not None:\n username = dg_ann.user.username\n else:\n username = ''\n ann_el = etree.SubElement(\n anns_el,\n self.ANNOTATION_ELEM,\n id=str(dg_ann.pk),\n program_version=dg_ann.program_version,\n date_saved=self.format_datetime(dg_ann.date_saved),\n user=username)\n for name, val in more_attrs.iteritems():\n ann_el.set(name, val)\n if 'offensive' in settings.EXTRA_QUESTIONS:\n ann_el.set('offensive', str(dg_ann.offensive))\n if 'accent' in settings.EXTRA_QUESTIONS:\n ann_el.set('accent', dg_ann.accent or \"native\")\n if 'quality' in settings.EXTRA_QUESTIONS:\n ann_el.set('quality',\n ('clear' if dg_ann.quality == 1 else 'noisy'))\n if dg_ann.notes:\n ann_el.text = dg_ann.notes",
"def annotate(args):\n from .annotation.annotation import annotate as anno\n anno(args)",
"def from_data(cls, data, attribute, xykey=None, **kwargs):\n annotator = cls(attribute, **kwargs)\n annotator.train(WeightedGraph(data, attribute, xykey=xykey))\n return annotator",
"def set_annotation(self, string):\n self.annotation = string\n return self",
"def create_annotation(\n self,\n parent,\n annotation,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n # Wrap the transport method to add retry and timeout logic.\n if \"create_annotation\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"create_annotation\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.create_annotation,\n default_retry=self._method_configs[\"CreateAnnotation\"].retry,\n default_timeout=self._method_configs[\"CreateAnnotation\"].timeout,\n client_info=self._client_info,\n )\n\n request = incidents_service_pb2.CreateAnnotationRequest(\n parent=parent, annotation=annotation\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"parent\", parent)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"create_annotation\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.