query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Creates a new identity provider in your tenancy. For more information, see `Identity Providers and Federation`__. You must specify your tenancy's OCID as the compartment ID in the request object. Remember that the tenancy is simply the root compartment. For information about OCIDs, see `Resource Identifiers`__. You must also specify a name for the `IdentityProvider`, which must be unique across all `IdentityProvider` objects in your tenancy and cannot be changed. You must also specify a description for the `IdentityProvider` (although it can be an empty string). It does not have to be unique, and you can change it anytime with
def create_identity_provider(self, create_identity_provider_details, **kwargs): resource_path = "/identityProviders" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_identity_provider got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_identity_provider_details, response_type="IdentityProvider") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_identity_provider_details, response_type="IdentityProvider")
[ "def create_identity_provider(module, sdk, cloud, name):\n\n if module.check_mode:\n return True, None\n\n description = module.params.get('description')\n enabled = module.params.get('enabled')\n domain_id = module.params.get('domain_id')\n remote_ids = module.params.get('remote_ids')\n\n if enabled is None:\n enabled = True\n if remote_ids is None:\n remote_ids = []\n\n attributes = {\n 'domain_id': domain_id,\n 'enabled': enabled,\n 'remote_ids': remote_ids,\n }\n if description is not None:\n attributes['description'] = description\n\n try:\n idp = cloud.identity.create_identity_provider(id=name, **attributes)\n except sdk.exceptions.OpenStackCloudException as ex:\n module.fail_json(msg='Failed to create identity provider: {0}'.format(str(ex)))\n return (True, idp)", "def create_cloud_provider(providername):\n backend_name = request.get_json().get(\"backend\")\n service_name = request.get_json().get(\"service\")\n response = jsonify(\n admin.create_provider(\n current_app.scoped_session(),\n providername,\n backend=backend_name,\n service=service_name,\n )\n )\n return response", "def create_conference_provider(ConferenceProviderName=None, ConferenceProviderType=None, IPDialIn=None, PSTNDialIn=None, MeetingSetting=None, ClientRequestToken=None):\n pass", "def create(self, identity, record=None, data=None, **kwargs):\n data['id'] = data['id'].lower()\n self._validate(data['id'])\n record['id'] = data['id']\n try:\n provider = record.__class__.pid.field._provider.create(record=record)\n except PIDAlreadyExists:\n raise ValidationError(\n 'A community with this identifier already exists.',\n field_name='id',\n )\n setattr(record, 'pid', provider.pid)", "def m_create_identity(DID, domain_name, website, commercial_name, parent_node_account, password, overwrite):\n\n error, didDoc = create_identity(\n DID, domain_name, website, commercial_name, parent_node_account, password, overwrite)\n if error is not None:\n print(error)\n\n print(f\"Created\")", "def _create_resource_provider(self, context, uuid, name,\n parent_provider_uuid=None):\n url = \"/resource_providers\"\n payload = {\n 'uuid': uuid,\n 'name': name,\n }\n if parent_provider_uuid is not None:\n payload['parent_provider_uuid'] = parent_provider_uuid\n\n # Bug #1746075: First try the microversion that returns the new\n # provider's payload.\n resp = self.post(url, payload,\n version=POST_RPS_RETURNS_PAYLOAD_API_VERSION,\n global_request_id=context.global_id)\n\n placement_req_id = self.get_placement_request_id(resp)\n\n if resp:\n msg = (\"[%(placement_req_id)s] Created resource provider record \"\n \"via placement API for resource provider with UUID \"\n \"%(uuid)s and name %(name)s.\")\n args = {\n 'uuid': uuid,\n 'name': name,\n 'placement_req_id': placement_req_id,\n }\n LOG.info(msg, args)\n return resp.json()", "def PostIdentityProviders(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def add_identity_provider(self, **identity_provider_args: Any) -> dict:\n try:\n existing = self.db.query(IdentityProviders).filter_by(id_openeo=identity_provider_args['id']).first()\n if existing:\n return ServiceException(service_name, 400, f\"Identity Provider {identity_provider_args['id']}\"\n f\" exists already in the database. Could not be\"\n f\" added\").to_dict()\n\n identity_provider = IdentityProviderSchema().load(identity_provider_args)\n self.db.add(identity_provider)\n self.db.commit()\n\n LOGGER.info(f\"Identity provider '{identity_provider.id_openeo}' added to database.\")\n return {\n \"status\": \"success\",\n \"code\": 200,\n \"data\": {'message': f\"Identity provider '{identity_provider.id_openeo}' added to database.\"}\n }\n except Exception as exp:\n return ServiceException(service_name, 500, str(exp)).to_dict()", "def add_new_provider(self, provider_name, provider_type, endpoints, zone_id, provider_region):\n try:\n result = self.client.post(self.providers_url, name=provider_name,\n type=ManageIQProvider.PROVIDER_TYPES[provider_type],\n zone={'id': zone_id},\n connection_configurations=endpoints,\n provider_region=provider_region)\n provider_id = result['results'][0]['id']\n self.changed = True\n except Exception as e:\n self.module.fail_json(msg=\"Failed to add provider. Error: {!r}\".format(e))\n return provider_id", "def create_identity_pool(self, identity_pool_name,\n allow_unauthenticated_identities,\n supported_login_providers=None,\n developer_provider_name=None,\n open_id_connect_provider_ar_ns=None):\n params = {\n 'IdentityPoolName': identity_pool_name,\n 'AllowUnauthenticatedIdentities': allow_unauthenticated_identities,\n }\n if supported_login_providers is not None:\n params['SupportedLoginProviders'] = supported_login_providers\n if developer_provider_name is not None:\n params['DeveloperProviderName'] = developer_provider_name\n if open_id_connect_provider_ar_ns is not None:\n params['OpenIdConnectProviderARNs'] = open_id_connect_provider_ar_ns\n return self.make_request(action='CreateIdentityPool',\n body=json.dumps(params))", "def __init__(__self__, *,\n identity_pool_id: pulumi.Input[str],\n identity_provider_name: pulumi.Input[str],\n principal_tags: Optional[Any] = None,\n use_defaults: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"identity_pool_id\", identity_pool_id)\n pulumi.set(__self__, \"identity_provider_name\", identity_provider_name)\n if principal_tags is not None:\n pulumi.set(__self__, \"principal_tags\", principal_tags)\n if use_defaults is not None:\n pulumi.set(__self__, \"use_defaults\", use_defaults)", "def createIdentity(self, identityName, params = None):\n return IdentityCertificate.certificateNameToPublicKeyName(\n self.createIdentityAndCertificate(identityName, params))", "def create(self, identity, data=None, record=None, **kwargs):\n if system_process in identity.provides:\n return\n\n member = {\n \"type\": \"user\",\n \"id\": str(identity.id),\n }\n self.service.members.add(\n # the user is not yet owner of the community (is being added)\n # therefore we cannot use `identity`\n system_identity,\n record.id,\n {\"members\": [member], \"role\": current_roles.owner_role.name},\n uow=self.uow,\n )\n\n # Invalidate the membership cache\n on_user_membership_change(identity=identity)", "def __init__(__self__, *,\n identity_pool_id: pulumi.Input[str],\n identity_provider_name: pulumi.Input[str],\n principal_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n use_defaults: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"identity_pool_id\", identity_pool_id)\n pulumi.set(__self__, \"identity_provider_name\", identity_provider_name)\n if principal_tags is not None:\n pulumi.set(__self__, \"principal_tags\", principal_tags)\n if use_defaults is not None:\n pulumi.set(__self__, \"use_defaults\", use_defaults)", "def add_identity(self,item_name,item_category=None,item_type=None):\r\n return DiscoIdentity(self,item_name,item_category,item_type)", "def _create_resource_provider(session, uuid):\n url = '/resource_providers'\n data = {'uuid': uuid, 'name': uuid}\n resp = session.post(url, json=data)\n if resp:\n return resp.json()['generation']\n _print('failed to create provider')\n sys.exit(1)", "def test_create_identity(self):\n pass", "def _tenant_create(self, name_length=10, **kwargs):\n name = self._generate_random_name(length=name_length)\n return self.admin_clients(\"keystone\").tenants.create(name, **kwargs)", "def create_identity(self, global_time=None):\n meta = self._community.get_meta_message(u\"dispersy-identity\")\n\n if global_time == None:\n global_time = self.claim_global_time()\n\n return meta.impl(authentication=(self._my_member,), distribution=(global_time,))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new MFA TOTP device for the user. A user can have one MFA TOTP device.
def create_mfa_totp_device(self, user_id, **kwargs): resource_path = "/users/{userId}/mfaTotpDevices" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_mfa_totp_device got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="MfaTotpDevice") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="MfaTotpDevice")
[ "def create_drf_token(self):\n Token.objects.get_or_create(user=self.user)", "def activate_mfa_totp_device(self, user_id, mfa_totp_device_id, mfa_totp_token, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}/actions/activate\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"activate_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing),\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=mfa_totp_token,\n response_type=\"MfaTotpDeviceSummary\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=mfa_totp_token,\n response_type=\"MfaTotpDeviceSummary\")", "def createToken(self, props):\n\n conn = nimble.getConnection()\n result = conn.runPythonModule(\n CreateToken,\n uid=props['uid'],\n props=props,\n runInMaya=True)\n\n if result.payload.get('error'):\n print('Error in createToken:', result.payload.get('message'))\n return False", "def create_user(user):\n create_edx_user(user)\n create_edx_auth_token(user)", "def create(self, validated_data):\n return MFAMethod.objects.get_or_create(\n user=self.user,\n name=self.context['name'],\n defaults={\n 'secret': create_secret(),\n 'is_active': False,\n }\n )", "def create_token(request, user):\n\n key = get_random_string(100)\n data = {}\n ip = get_client_ip_address(request)\n\n return Token.objects.create(user=user, key=key, data=json.dumps(data), ip=ip)", "def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)", "def Register(cls, client, user_id, device_dict, is_first=True):\r\n assert 'device_id' in device_dict, device_dict\r\n\r\n device = yield gen.Task(Device.Query,\r\n client,\r\n user_id,\r\n device_dict['device_id'],\r\n None,\r\n must_exist=False)\r\n if device is None:\r\n device = Device.Create(user_id=user_id, timestamp=util.GetCurrentTimestamp(), **device_dict)\r\n else:\r\n device.UpdateFields(**device_dict)\r\n\r\n yield gen.Task(device.Update, client)\r\n\r\n # If this is the first mobile device to be registered, then turn turn off email alerting\r\n # and turn on full push alerting to mobile devices.\r\n if is_first:\r\n settings = AccountSettings.CreateForUser(user_id,\r\n email_alerts=AccountSettings.EMAIL_NONE,\r\n sms_alerts=AccountSettings.SMS_NONE,\r\n push_alerts=AccountSettings.PUSH_ALL)\r\n yield gen.Task(settings.Update, client)\r\n\r\n raise gen.Return(device)", "def create_gateway_device(self, body=None):\r\n return self.post(self.gateway_devices_path, body=body)", "def testCreateTrialUser(self, m):\n createusers = UserManager()\n result = createusers.register_user(\"user_trial1\", \"anypassword\", \"trial\")\n self.assertIsNotNone(result)", "def create_teacher(username, password, email, preferred_language,skype_id,name, phone_number, country,availability):\n person.create_person(username,password,email,preferred_language,skype_id,name,phone_number,country)\n teacher_account_id = person.get_last()\n query = 'INSERT INTO teacher VALUES( %s,%s );'\n args = (teacher_account_id, availability)\n database.connection.save_data(query, args)", "def create_control_device(self,user):\n if not ControlDevice.objects.filter(user=user).exists():\n response = self._aliyun.register_control_device()\n print('Aliyun response is ')\n print(response)\n if response is not None:\n control_device = ControlDevice(\n user=user,\n product_name='KessK_Controllor',\n device_name=response['DeviceName'],\n product_key=response['ProductKey'],\n device_secret=response['DeviceSecret'],\n )\n control_device.save()\n return ControlDevice.objects.get(user=user)", "def create_user(self):\n User.objects.create_user('test', 'testing@test.com', 'testing')", "def create_device(self, app_name='FooBar', device_type='Raspberry Pi 2'):\n\n app = self.balena.models.application.create(app_name, device_type, self.default_organization['id'])\n return app, self.balena.models.device.register(app['id'], self.balena.models.device.generate_uuid())", "def user(self, user_token, user_device=None):\n self.set('user', user_token)\n self.set('device', user_device)", "def register_user_device(username: str, password: str, mac_address: str, email: Optional[str] = None) -> \\\n Union[str, Token]:\n ret = register_user(username, password, email)\n if isinstance(ret, str):\n return ret\n else:\n user_id = ret\n token, device_id = _add_update_device(user_id, mac_address)\n client_logger_security().info(f\"Successfully added new device: user_id={user_id}, device_id={device_id}\")\n _set_user_authenticated(user_id, device_id)\n return token", "def create_user(child_name,child_id,app_id):\n print inspect.stack()[0][3]\n uri = backendAPI + \"/addkid\"\n payload = {}\n payload[\"rollnumber\"] = child_id\n payload[\"name\"] = child_name\n payload[\"deviceid\"] = app_id\n payload[\"score\"] = \"0\"\n payload = json.dumps(payload)\n headers = {'content-type': \"application/json\"}\n response = requests.request(\"POST\", uri, data=payload, headers=headers)\n print response.text\n if response.json()[\"deviceid\"] == app_id:\n return True # set to true for testing & boilerplate\n else:\n return False", "def add_my_device(self, device_id):\n method = \"POST\"\n url = \"/v1/user/addmydevice\"\n d = {\"deviceid\": device_id}\n return self.xda.requests.basic_request(method, url, body=d)", "def RegisterDevice(self, device_id, machine_id, type, username):\n dmtoken_chars = []\n while len(dmtoken_chars) < 32:\n dmtoken_chars.append(random.choice('0123456789abcdef'))\n dmtoken = ''.join(dmtoken_chars)\n allowed_policy_types = {\n dm.DeviceRegisterRequest.BROWSER: [\n 'google/chrome/user',\n 'google/chrome/extension'\n ],\n dm.DeviceRegisterRequest.USER: [\n 'google/chromeos/user',\n 'google/chrome/extension'\n ],\n dm.DeviceRegisterRequest.DEVICE: [\n 'google/chromeos/device',\n 'google/chromeos/publicaccount',\n 'google/chrome/extension',\n 'google/chromeos/signinextension'\n ],\n dm.DeviceRegisterRequest.ANDROID_BROWSER: [\n 'google/android/user'\n ],\n dm.DeviceRegisterRequest.TT: ['google/chromeos/user',\n 'google/chrome/user'],\n }\n if machine_id in KIOSK_MACHINE_IDS:\n enrollment_mode = dm.DeviceRegisterResponse.RETAIL\n else:\n enrollment_mode = dm.DeviceRegisterResponse.ENTERPRISE\n self._registered_tokens[dmtoken] = {\n 'device_id': device_id,\n 'device_token': dmtoken,\n 'allowed_policy_types': allowed_policy_types[type],\n 'machine_name': 'chromeos-' + machine_id,\n 'machine_id': machine_id,\n 'enrollment_mode': enrollment_mode,\n 'username': username,\n }\n self.WriteClientState()\n return self._registered_tokens[dmtoken]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new network source in your tenancy. You must specify your tenancy's OCID as the compartment ID in the request object (remember that the tenancy is simply the root compartment). Notice that IAM resources (users, groups, compartments, and some policies) reside within the tenancy itself, unlike cloud resources such as compute instances, which typically reside within compartments inside the tenancy. For information about OCIDs, see `Resource Identifiers`__. You must also specify a name for the network source, which must be unique across all network sources in your tenancy, and cannot be changed. You can use this name or the OCID when writing policies that apply to the network source. For more information about policies, see `How Policies Work`__. You must also specify a description for the network source (although it can be an empty string). It does not
def create_network_source(self, create_network_source_details, **kwargs): resource_path = "/networkSources" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_network_source got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_network_source_details, response_type="NetworkSources") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_network_source_details, response_type="NetworkSources")
[ "def new_source(self, name):\n params = {\"name\": name}\n return JSONRPCRequest(self, \"newSource\", params)", "def network_create(ctx, name, blueprint):\n network = ctx.obj['CLIENT'].network.create(name, blueprint)\n click.echo('Created network: %s' % network.name)", "def source_network(self, action, name=\"\", literal=None):\n # using dict() as default value is dangerous here, any thoughts/workarounds on this?\n logging.debug(\"In source_network() for AccessRules class.\")\n if literal and name != \"\":\n raise ValueError(\n \"Only one of literals or name (object name) should be set while creating a source network\"\n )\n\n if not hasattr(self, \"sourceNetworks\"):\n self.sourceNetworks = {\"objects\": [], \"literals\": {}}\n\n if action == \"add\":\n if literal:\n type_ = get_networkaddress_type(literal)\n self.sourceNetworks[\"literals\"][literal] = type_\n logging.info(\n f'Adding literal \"{literal}\" of type \"{type_}\" to sourceNetworks for this AccessRules.'\n )\n else:\n ipaddresses_json = NetworkAddresses(fmc=self.fmc).get()\n networkgroup_json = NetworkGroups(fmc=self.fmc).get()\n fqdns_json = FQDNS(fmc=self.fmc).get()\n items = (\n ipaddresses_json.get(\"items\", [])\n + networkgroup_json.get(\"items\", [])\n + fqdns_json.get(\"items\", [])\n )\n new_net = None\n for item in items:\n if item[\"name\"] == name:\n new_net = {\n \"name\": item[\"name\"],\n \"id\": item[\"id\"],\n \"type\": item[\"type\"],\n }\n break\n if new_net is None:\n logging.warning(\n f'Network \"{name}\" is not found in FMC. Cannot add to sourceNetworks.'\n )\n else:\n if \"sourceNetworks\" in self.__dict__:\n # thus either some objects are already present in sourceNetworks,\n # or only literals are present in sourceNetworks\n if \"objects\" in self.__dict__[\"sourceNetworks\"]:\n # some objects are already present\n duplicate = False\n # see if its a duplicate or not. If not, append to the list of\n # existing objects in sourceNetworks\n for obj in self.sourceNetworks[\"objects\"]:\n if obj[\"name\"] == new_net[\"name\"]:\n duplicate = True\n break\n if not duplicate:\n self.sourceNetworks[\"objects\"].append(new_net)\n logging.info(\n f'Adding \"{name}\" to sourceNetworks for this AccessRules.'\n )\n else:\n # this means no objects were present in sourceNetworks,\n # and sourceNetworks contains literals only\n self.sourceNetworks.update({\"objects\": [new_net]})\n # So update the sourceNetworks dict which contained 'literals' key initially\n # to have a 'objects' key as well\n logging.info(\n f'Adding \"{name}\" to sourceNetworks for this AccessRules.'\n )\n else:\n # None of literals or objects are present in sourceNetworks,\n # so initialize it with objects and update the provided object\n self.sourceNetworks = {\"objects\": [new_net]}\n logging.info(\n f'Adding \"{name}\" to sourceNetworks for this AccessRules.'\n )\n elif action == \"remove\":\n if \"sourceNetworks\" in self.__dict__:\n if name != \"\":\n # an object's name has been provided to be removed\n objects = []\n for obj in self.sourceNetworks[\"objects\"]:\n if obj[\"name\"] != name:\n objects.append(obj)\n if len(objects) == 0:\n # it was the last object which was deleted now\n del self.sourceNetworks\n logging.info(\n f'Removed \"{name}\" from sourceNetworks for this AccessRules'\n )\n logging.info(\n \"All Source Networks removed from this AccessRules object.\"\n )\n else:\n self.sourceNetworks[\"objects\"] = objects\n logging.info(\n f'Removed \"{name}\" from sourceNetworks for this AccessRules.'\n )\n else:\n # a literal value has been provided to be removed\n type_ = self.sourceNetworks[\"literals\"].get(literal)\n if type_:\n self.sourceNetworks[\"literals\"].pop(literal)\n logging.info(\n f'Removed literal \"{literal}\" of type '\n f'\"{type_}\" from sourceNetworks for this AccessRules.'\n )\n else:\n logging.info(\n f'Unable to removed literal \"{literal}\" from sourceNetworks as it was not found'\n )\n else:\n logging.info(\n \"sourceNetworks doesn't exist for this AccessRules. Nothing to remove.\"\n )\n elif action == \"clear\":\n if \"sourceNetworks\" in self.__dict__:\n del self.sourceNetworks\n logging.info(\n \"All Source Networks removed from this AccessRules object.\"\n )", "def create_network(self, body=None):\r\n return self.post(self.networks_path, body=body)", "def _send_create_network_request(self, context, network):\n LOG.debug(_('_send_create_network_request: %s'), network['id'])\n profile = self.get_network_profile(context,\n network[n1kv_profile.PROFILE_ID])\n n1kvclient = n1kv_client.Client()\n if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_VXLAN:\n n1kvclient.create_bridge_domain(network)\n n1kvclient.create_network_segment(network, profile)", "def post_new_source(self, src_name, version_id):\n body = {\n \"name\": src_name,\n \"technicalName\": src_name,\n \"type\": \"NonRelational\",\n \"description\": \"This is a description\",\n \"summary\": \"This is a summary\",\n \"tags\": [\"RGPD\"],\n \"children\": []\n }\n response = self.post_api(\"https://api.datagalaxy.com/v2/sources/\" + version_id, body)\n return response", "def create_network(self, context, network):\n (network_type, physical_network,\n segmentation_id) = self._process_provider_create(context,\n network['network'])\n self._add_dummy_profile_only_if_testing(network)\n profile_id = self._process_network_profile(context, network['network'])\n LOG.debug(_('create network: profile_id=%s'), profile_id)\n session = context.session\n with session.begin(subtransactions=True):\n if not network_type:\n # tenant network\n (physical_network, network_type, segmentation_id,\n multicast_ip) = n1kv_db_v2.alloc_network(session,\n profile_id)\n LOG.debug(_('Physical_network %(phy_net)s, '\n 'seg_type %(net_type)s, '\n 'seg_id %(seg_id)s, '\n 'multicast_ip %(multicast_ip)s'),\n {'phy_net': physical_network,\n 'net_type': network_type,\n 'seg_id': segmentation_id,\n 'multicast_ip': multicast_ip})\n if not segmentation_id:\n raise q_exc.TenantNetworksDisabled()\n else:\n # provider network\n if network_type == c_const.NETWORK_TYPE_VLAN:\n network_profile = self.get_network_profile(context,\n profile_id)\n seg_min, seg_max = self._get_segment_range(\n network_profile['segment_range'])\n if not seg_min <= segmentation_id <= seg_max:\n raise cisco_exceptions.VlanIDOutsidePool\n n1kv_db_v2.reserve_specific_vlan(session,\n physical_network,\n segmentation_id)\n multicast_ip = \"0.0.0.0\"\n net = super(N1kvNeutronPluginV2, self).create_network(context,\n network)\n n1kv_db_v2.add_network_binding(session,\n net['id'],\n network_type,\n physical_network,\n segmentation_id,\n multicast_ip,\n profile_id)\n self._process_l3_create(context, net, network['network'])\n self._extend_network_dict_provider(context, net)\n self._extend_network_dict_profile(context, net)\n\n try:\n self._send_create_network_request(context, net)\n except(cisco_exceptions.VSMError,\n cisco_exceptions.VSMConnectionFailed):\n super(N1kvNeutronPluginV2, self).delete_network(context, net['id'])\n else:\n # note - exception will rollback entire transaction\n LOG.debug(_(\"Created network: %s\"), net['id'])\n return net", "def create_network(self, network_body, **kwargs):\n body = {\n \"network\" : network_body\n }\n return self._post(self.networks_path, body=body, **kwargs)", "def create_network(self):\n #Create the network\n self.network = Network(\"50.19.23.117\", 8080)", "def create_network(self, context, **kwargs):\n LOG.info(_LI('Create network started, network: %s.'), kwargs['id'])\n del kwargs['id']\n del kwargs['name']\n migration_ref = kwargs.pop('migration_ref')\n resource_ref = kwargs.pop('resource_ref')\n migration_ref.migration_event = 'Creating at destination'\n migration_ref.save()\n try:\n hypervisor_ref = objects.Hypervisor.get(context,\n migration_ref.destination_hypervisor)\n drv = importutils.import_object(hypervisor_ref.driver,\n hypervisor_ref=hypervisor_ref)\n drv.create_network(context, **kwargs)\n except Exception:\n migration_ref.migration_status = 'ERROR'\n migration_ref.migration_event = None\n migration_ref.save()\n raise\n migration_ref.migration_status = 'COMPLETE'\n migration_ref.migration_event = None\n migration_ref.save()\n resource_ref.migrated = True\n resource_ref.save()", "def fusion_api_create_network_set(self, body, api=None, headers=None):\n return self.network_set.create(body, api, headers)", "def create_default_network(context):\n return [{\n 'type': 'templates/network.py',\n 'name': 'fc-network',\n 'properties': {\n 'resourceName': 'network',\n 'name': 'network',\n 'projectId': '$(ref.fc-project.projectId)',\n 'autoCreateSubnetworks': True,\n # We pass the dependsOn list into the network template as a\n # parameter. Deployment Manager doesn't support dependsOn for\n # template-call nodes, so we can't have this resource itself depend on\n # the project-wide resources.\n 'dependsOn': '$(ref.fc-project.resourceNames)',\n },\n }]", "def create_network(self, context, network):\n (network_type, physical_network,\n segmentation_id) = self._process_provider_create(context,\n network['network'])\n profile_id = self._process_network_profile(context, network['network'])\n segment_pairs = None\n LOG.debug('Create network: profile_id=%s', profile_id)\n session = context.session\n with session.begin(subtransactions=True):\n if not network_type:\n # tenant network\n (physical_network, network_type, segmentation_id,\n multicast_ip) = n1kv_db_v2.alloc_network(session,\n profile_id,\n context.tenant_id)\n LOG.debug('Physical_network %(phy_net)s, '\n 'seg_type %(net_type)s, '\n 'seg_id %(seg_id)s, '\n 'multicast_ip %(multicast_ip)s',\n {'phy_net': physical_network,\n 'net_type': network_type,\n 'seg_id': segmentation_id,\n 'multicast_ip': multicast_ip})\n if network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT:\n segment_pairs = (\n self._parse_multi_segments(context, network['network'],\n n1kv.SEGMENT_ADD))\n LOG.debug('Seg list %s ', segment_pairs)\n elif network_type == c_const.NETWORK_TYPE_TRUNK:\n network_profile = self.get_network_profile(context,\n profile_id)\n segment_pairs = (\n self._parse_trunk_segments(context, network['network'],\n n1kv.SEGMENT_ADD,\n physical_network,\n network_profile['sub_type']\n ))\n LOG.debug('Seg list %s ', segment_pairs)\n else:\n if not segmentation_id:\n raise n_exc.TenantNetworksDisabled()\n else:\n # provider network\n if network_type == c_const.NETWORK_TYPE_VLAN:\n network_profile = self.get_network_profile(context,\n profile_id)\n seg_min, seg_max = self._get_segment_range(\n network_profile['segment_range'])\n if not seg_min <= segmentation_id <= seg_max:\n raise cisco_exceptions.VlanIDOutsidePool()\n n1kv_db_v2.reserve_specific_vlan(session,\n physical_network,\n segmentation_id)\n multicast_ip = \"0.0.0.0\"\n net = super(N1kvNeutronPluginV2, self).create_network(context,\n network)\n n1kv_db_v2.add_network_binding(session,\n net['id'],\n network_type,\n physical_network,\n segmentation_id,\n multicast_ip,\n profile_id,\n segment_pairs)\n self._process_l3_create(context, net, network['network'])\n self._extend_network_dict_provider(context, net)\n self._extend_network_dict_profile(context, net)\n try:\n if network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT:\n self._send_add_multi_segment_request(context, net['id'],\n segment_pairs)\n else:\n self._send_create_network_request(context, net, segment_pairs)\n except(cisco_exceptions.VSMError,\n cisco_exceptions.VSMConnectionFailed):\n with excutils.save_and_reraise_exception():\n self._delete_network_db(context, net['id'])\n else:\n LOG.debug(\"Created network: %s\", net['id'])\n return net", "def copy_network(self, source_network, new_network):\n vals = self.value_query(\"SELECT studyid, node_num, edge_num \"\n \"FROM networks WHERE networkID=%s;\",\n values=(source_network,), fetch=True)\n self.value_query(\"INSERT INTO networks (networkID, studyID, node_num, edge_num) \"\n \"VALUES (%s, %s, %s, %s)\", values=(new_network,) + vals[0])\n edges = self.value_query(\"SELECT source, target, weight FROM edges \"\n \"WHERE networkID=%s;\", values=(source_network,), fetch=True)\n self.value_query(\"INSERT INTO edges (networkID, source, target, weight) \"\n \"VALUES (%s, %s, %s, %s)\",\n values=[(new_network,) + edge for edge in edges])", "def _send_create_network_profile_request(self, context, profile):\n LOG.debug('_send_create_network_profile_request: %s', profile['id'])\n n1kvclient = n1kv_client.Client()\n n1kvclient.create_network_segment_pool(profile, context.tenant_id)", "def createNetwork(self, networkType):\n\n network = None\n\n if networkType == ISOLATED_NETWORK:\n try:\n network = Network.create(\n self.apiclient,\n self.services[\"isolated_network\"],\n networkofferingid=self.isolated_network_offering.id,\n accountid=self.account.name,\n domainid=self.account.domainid,\n zoneid=self.zone.id)\n self.cleanup.append(network)\n except Exception as e:\n self.fail(\"Isolated network creation failed because: %s\" % e)\n\n elif networkType == SHARED_NETWORK:\n physical_network, vlan = get_free_vlan(self.api_client, self.zone.id)\n\n # create network using the shared network offering created\n self.services[\"shared_network\"][\"acltype\"] = \"domain\"\n self.services[\"shared_network\"][\"vlan\"] = vlan\n self.services[\"shared_network\"][\n \"networkofferingid\"] = self.shared_network_offering.id\n self.services[\"shared_network\"][\n \"physicalnetworkid\"] = physical_network.id\n\n self.services[\"shared_network\"] = setSharedNetworkParams(\n self.services[\"shared_network\"])\n\n try:\n network = Network.create(\n self.api_client,\n self.services[\"shared_network\"],\n networkofferingid=self.shared_network_offering.id,\n zoneid=self.zone.id)\n self.cleanup.append(network)\n except Exception as e:\n self.fail(\"Shared Network creation failed because: %s\" % e)\n\n elif networkType == VPC_NETWORK:\n self.services[\"vpc\"][\"cidr\"] = \"10.1.1.1/16\"\n self.debug(\"creating a VPC network in the account: %s\" %\n self.account.name)\n vpc = VPC.create(\n self.apiclient,\n self.services[\"vpc\"],\n vpcofferingid=self.vpc_off.id,\n zoneid=self.zone.id,\n account=self.account.name,\n domainid=self.account.domainid)\n self.cleanup.append(vpc)\n vpcs = VPC.list(self.apiclient, id=vpc.id)\n self.assertEqual(\n validateList(vpcs)[0],\n PASS,\n \"VPC list validation failed, vpc list is %s\" %\n vpcs)\n\n network = Network.create(\n self.api_client,\n self.services[\"isolated_network\"],\n networkofferingid=self.isolated_network_offering_vpc.id,\n accountid=self.account.name,\n domainid=self.account.domainid,\n zoneid=self.zone.id,\n vpcid=vpc.id,\n gateway=\"10.1.1.1\",\n netmask=\"255.255.255.0\")\n self.cleanup.append(network)\n return network", "def create_data_source(apiId=None, name=None, description=None, type=None, serviceRoleArn=None, dynamodbConfig=None, lambdaConfig=None, elasticsearchConfig=None, httpConfig=None, relationalDatabaseConfig=None):\n pass", "def create_network_profile(self, body=None):\r\n return self.post(self.network_profiles_path, body=body)", "def _send_create_network_request(self, context, network, segment_pairs):\n LOG.debug('_send_create_network_request: %s', network['id'])\n profile = self.get_network_profile(context,\n network[n1kv.PROFILE_ID])\n n1kvclient = n1kv_client.Client()\n if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY:\n n1kvclient.create_bridge_domain(network, profile['sub_type'])\n if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_TRUNK:\n self._populate_member_segments(context, network, segment_pairs,\n n1kv.SEGMENT_ADD)\n network['del_segment_list'] = []\n if profile['sub_type'] == c_const.NETWORK_TYPE_OVERLAY:\n encap_dict = {'name': (network['name'] +\n c_const.ENCAPSULATION_PROFILE_SUFFIX),\n 'add_segment_list': (\n self._get_encap_segments(context,\n segment_pairs)),\n 'del_segment_list': []}\n n1kvclient.create_encapsulation_profile(encap_dict)\n n1kvclient.create_network_segment(network, profile)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a subscription to a region for a tenancy.
def create_region_subscription(self, create_region_subscription_details, tenancy_id, **kwargs): resource_path = "/tenancies/{tenancyId}/regionSubscriptions" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_region_subscription got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "tenancyId": tenancy_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_region_subscription_details, response_type="RegionSubscription") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_region_subscription_details, response_type="RegionSubscription")
[ "def create_subscription(self):\n\n self.clear_subscriptions()\n\n # creating new subscription\n r = self.fitbit_service.post('http://api.fitbit.com/1/user/-/apiSubscriptions/%s.json' % self.userid, data={}, header_auth=True)\n logging.info('Adding new subscription for user %s. The code: %s Message: %s', self.userid, r.status_code, r.text)", "def test_create_a_new_subscription(self):\n pass", "def test_create_subscription(self):\n pass", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_create_endpoint_with_region(self):\n ref = self.new_endpoint_ref(service_id=self.service_id)\n ref[\"region\"] = uuid.uuid4().hex\n ref.pop('region_id')\n self.post('/endpoints', body={'endpoint': ref}, expected_status=201)\n # Make sure the region is created\n self.get('/regions/%(region_id)s' % {\n 'region_id': ref[\"region\"]})", "def _create_subscription(self):\n try:\n self.client.create_subscription(\n name=self.subscription_path, topic=self.topic_path\n )\n except NotFound:\n # suitable topic does not exist in the Pitt-Google project\n raise ValueError(\n (\n f\"A subscription named {self.subscription_name} does not exist\"\n \"in the Google Cloud Platform project \"\n f\"{settings.GOOGLE_CLOUD_PROJECT}, \"\n \"and one cannot be automatically create because Pitt-Google \"\n \"does not publish a public topic with the same name.\"\n )\n )\n else:\n self._log_and_print(f\"Created subscription: {self.subscription_path}\")", "def create_subscription(connection, project_id, body, fields=None, error_msg=None):\n return connection.post(\n url=f'{connection.base_url}/api/subscriptions',\n params={'fields': fields},\n headers={'X-MSTR-ProjectID': project_id},\n json=body,\n )", "def create_subscription(self, query):\n pass", "def list_region_subscriptions(self, tenancy_id, **kwargs):\n resource_path = \"/tenancies/{tenancyId}/regionSubscriptions\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_region_subscriptions got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tenancyId\": tenancy_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"list[RegionSubscription]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"list[RegionSubscription]\")", "def do_create_subscription(csp: CloudProviderInterface, environment_id=None):\n environment = Environments.get(environment_id)\n payload = build_subscription_payload(environment)\n try:\n csp.create_subscription(payload)\n except GeneralCSPException as e:\n app.logger.warning(\n \"Unable to create subscription for environment %s.\", environment.id,\n )\n raise e", "def subscription_create(transport, request, queue_name, subscription_data):\n\n request.operation = 'subscription_create'\n request.params['queue_name'] = queue_name\n request.content = json.dumps(subscription_data)\n resp = transport.send(request)\n\n return resp.deserialized_content", "def add_subscription(self):\n schema = schemas.load(schemas.Subscription, self.request)\n subscription = self.customer.add_subscription(**schema)\n self.request.db.flush()\n self.request.response.status_int = 201\n return {'abonnement': subscription}", "def create_subscription_in_snuba(query_subscription_id, **kwargs):\n try:\n subscription = QuerySubscription.objects.get(id=query_subscription_id)\n except QuerySubscription.DoesNotExist:\n metrics.incr(\"snuba.subscriptions.create.subscription_does_not_exist\")\n return\n if subscription.status != QuerySubscription.Status.CREATING.value:\n metrics.incr(\"snuba.subscriptions.create.incorrect_status\")\n return\n if subscription.subscription_id is not None:\n metrics.incr(\"snuba.subscriptions.create.already_created_in_snuba\")\n # This mostly shouldn't happen, but it's possible that a subscription can get\n # into this state. Just attempt to delete the existing subscription and then\n # create a new one.\n try:\n _delete_from_snuba(\n QueryDatasets(subscription.snuba_query.dataset), subscription.subscription_id\n )\n except SnubaError:\n logger.exception(\"Failed to delete subscription\")\n\n subscription_id = _create_in_snuba(subscription)\n subscription.update(\n status=QuerySubscription.Status.ACTIVE.value, subscription_id=subscription_id\n )", "def test_create_subscription_template(self):\n pass", "def add_subscription():\n\n # get Destination, EventTypes and Context from post\n # generate subscription uuid\n # add subscription in subscriptions_by_type\n # add subscription in all_subscriptions\n\n try:\n body = request.get_json()\n destination = body[\"Destination\"]\n\n if not validators.url(destination):\n abort(status.HTTP_400_BAD_REQUEST,\n \"Destination must be an URI.\")\n\n event_types = body[\"EventTypes\"]\n\n if not event_types:\n abort(status.HTTP_400_BAD_REQUEST,\n \"EventTypes cannot be empty.\")\n\n context = body.get(\"Context\")\n except KeyError:\n error_message = \"Invalid JSON key. The JSON request body \" \\\n \"must have the keys Destination and EventTypes. \" \\\n \"The Context is optional.\"\n abort(status.HTTP_400_BAD_REQUEST, error_message)\n\n subscription_id = str(uuid.uuid1())\n\n try:\n # Build Subscription object and validates it\n sc = Subscription(subscription_id, destination,\n event_types, context)\n except ValidationError:\n error_message = \"Invalid EventType. The EventTypes are \" \\\n \"StatusChange, ResourceUpdated, ResourceAdded, \" \\\n \"ResourceRemoved and Alert.\"\n abort(status.HTTP_400_BAD_REQUEST, error_message)\n\n for event_type in sc.get_event_types():\n util.get_subscriptions_by_type()[event_type][subscription_id] = sc\n\n util.get_all_subscriptions()[subscription_id] = sc\n\n # Build redfish json\n json_str = sc.serialize()\n\n # Build response and returns\n response = Response(\n response=json_str,\n status=status.HTTP_201_CREATED,\n mimetype=\"application/json\")\n response.headers.add(\n \"Location\", \"/redfish/v1/EventService/EventSubscriptions/\"\n \"{}\".format(subscription_id))\n return response", "def post(self, request, **kwargs):\n serializer = CreateSubscriptionSerializer(data=request.data)\n\n if serializer.is_valid():\n api_key = serializer.data.get(\n \"api_key\", STRIPE_SECRET_KEY)\n try:\n customer, _created = Customer.get_or_create(\n subscriber=subscriber_request_callback(self.request)\n )\n customer.add_card(serializer.data[\"stripe_token\"])\n customer.subscribe(\n serializer.data[\"plan\"],\n serializer.data.get(\"account\", None),\n serializer.data.get(\"charge_immediately\", True),\n api_key=api_key\n )\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except InvalidRequestError as e:\n return Response(str(e), status=status.HTTP_400_BAD_REQUEST)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def create_event_subscription(SubscriptionName=None, SnsTopicArn=None, SourceType=None, EventCategories=None, SourceIds=None, Enabled=None, Tags=None):\n pass", "def create_subscription(project_id, topic_name, subscription_name):\n # [START pubsub_create_pull_subscription]\n from google.cloud import pubsub_v1\n\n subscriber = pubsub_v1.SubscriberClient()\n topic_path = subscriber.topic_path(project_id, topic_name)\n subscription_path = subscriber.subscription_path(\n project_id, subscription_name)\n\n subscription = subscriber.create_subscription(\n subscription_path, topic_path)\n\n print('Subscription created: {}'.format(subscription))\n # [END pubsub_create_pull_subscription]", "def add_region(parser):\n parser.add_argument('-r', '--region', default='us-east-1', help='Region to create Resources in, e.g. us-east-1')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new SMTP credential for the specified user. An SMTP credential has an SMTP user name and an SMTP password. You must specify a description for the SMTP credential (although it can be an empty string). It does not have to be unique, and you can change it anytime with
def create_smtp_credential(self, create_smtp_credential_details, user_id, **kwargs): resource_path = "/users/{userId}/smtpCredentials" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_smtp_credential got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_smtp_credential_details, response_type="SmtpCredential") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_smtp_credential_details, response_type="SmtpCredential")
[ "def create_credential(user_name, sitename, accountname, password):\n new_credential = Credential(user_name, sitename, accountname, password)\n return new_credential", "def create_new_credential(account,userName,password):\n new_credential = Credentials(account,userName,password)\n return new_credential", "def CreateNewSmtpUser(s):\n payload = ['adduser %s %s\\n' % (FLAGS.exploit_user, FLAGS.exploit_password),\n 'quit\\n']\n SendPayload(s, payload)\n logging.info('Created new user %s/%s' % (\n FLAGS.exploit_user, FLAGS.exploit_password))\n s.close()", "def create_credential(self, body=None):\r\n return self.post(self.credentials_path, body=body)", "def create_new_credential(account, userName, password):\n return Credentials(account, userName, password)", "def create_user(BrokerId=None, ConsoleAccess=None, Groups=None, Password=None, Username=None):\n pass", "async def create_credential_offer(self, credential_definition_id) -> str:", "async def create_credential(task_id: int, credential_type: str, account: str, realm: str, credential: str,\n metadata: str = \"\", comment: str = None) -> dict:\n try:\n task = await app.db_objects.get(db_model.task_query, id=task_id)\n from app.api.credential_api import create_credential_func\n cred = {\n \"task\": task,\n \"type\": credential_type,\n \"account\": account,\n \"realm\": realm,\n \"credential\": credential,\n \"comment\": comment,\n \"metadata\": metadata\n }\n await create_credential_func(task.operator, task.callback.operation, cred)\n return {\"status\": \"success\"}\n except Exception as e:\n return {\"status\": \"error\", \"error\": str(e)}", "def create_from_credential(credential):\n if credential.external_uid:\n return\n if not credential.secret:\n return\n if not credential.email:\n return\n if not getattr(credential, 'name', None):\n credential.name = credential.text\n if not getattr(credential, 'name', None):\n credential.name = credential.org_designator\n\n auth0api = Auth0(gaetkconfig.AUTH0_DOMAIN, get_auth0_access_token())\n payload = {\n 'connection': 'DefaultDatabase',\n 'email': credential.email,\n 'password': credential.secret,\n 'user_id': credential.uid,\n 'user_metadata': {\n 'name': credential.name,\n 'nickname': 'User fuer {}'.format(credential.org_designator)\n },\n 'email_verified': True,\n 'verify_email': False,\n 'app_metadata': {\n 'org_designator': credential.org_designator,\n 'permissions': credential.permissions,\n }\n }\n newuser = None\n try:\n newuser = auth0api.users.create(payload)\n except Auth0Error as ex:\n if ex.status_code in [400, 409] and ex.message == 'The user already exists.':\n logger.info('The user already exists: %s %r %s', credential.uid, ex, payload)\n try:\n newuser = auth0api.users.get('auth0|{}'.format(credential.uid))\n except:\n logger.warn('email collision? %s', credential.uid)\n # propbably we have an E-Mail Address collision. This means\n # several Credentials with the same E-Mail Adresses.\n reply = auth0api.users.list(\n connection='DefaultDatabase',\n q='email:\"{}\"'.format(credential.email),\n search_engine='v2')\n if reply['length'] > 0:\n logger.info('reply=%s', reply)\n other_uid = reply['users'][0]['user_id']\n newuser = auth0api.users.get(other_uid)\n # doppelbelegung bei Auth0 notieren\n if newuser.get('app_metadata'):\n logger.debug('app_metadata=%r', newuser['app_metadata'])\n altd = newuser['app_metadata'].get('org_designator_alt', [])\n altd = list(set(altd + [credential.org_designator]))\n altu = newuser['app_metadata'].get('uid_alt', [])\n altu = list(set(altu + [credential.uid]))\n logger.warn('updating duplicate Auth0 %s %s %s %s', altd, altu, other_uid, newuser)\n auth0api.users.update(\n other_uid,\n {'app_metadata': {'org_designator_alt': altd,\n 'uid_alt': altu}})\n else:\n logger.error('%r newuser = %s %s', 'auth0|{}'.format(credential.uid), newuser, ex)\n raise\n except:\n logger.warn('payload = %s', payload)\n raise\n if newuser is None or (newuser.get('error')):\n logger.warn('reply=%s payload = %s', newuser, payload)\n raise RuntimeError('Auth0-Fehler: %s' % newuser)\n\n logger.info('new auth0 user %s', newuser)\n credential.meta['auth0_user_id'] = credential.external_uid = newuser['user_id']\n credential.put()\n return", "def MailUser(user, passwd, sender):\n\n try:\n logging.info('Mailing password to user [%s]', user)\n\n msg = MIMEMultipart('alternative')\n msg['Subject'] = 'Your Google Apps Password Has Been Changed'\n msg['From'] = sender\n msg['To'] = user\n\n text = 'Your new password is %s' % passwd\n html = \"\"\"\\\n <html>\n <head></head>\n <body>\n <p><font face=\"courier\">Your new password is %s</font></p>\n </body>\n </html>\"\"\" % passwd\n\n part1 = MIMEText(text, 'plain')\n part2 = MIMEText(html, 'html')\n\n msg.attach(part1)\n msg.attach(part2)\n\n smtp_client = smtplib.SMTP('localhost')\n smtp_client.sendmail(sender, user, msg.as_string())\n smtp_client.quit()\n except Exception, e:\n logging.error('Error thrown when mailing password to user [%s] error [%s] ',\n user, e)\n raise", "def Create( profile_name,\r\n host,\r\n username=None,\r\n password=None,\r\n port=26,\r\n from_name=None,\r\n from_email=None,\r\n ssl=False,\r\n output_stream=sys.stdout,\r\n ):\r\n\r\n if not from_name and not from_email:\r\n raise CommandLine.UsageException(\"'from_name' or 'from_email' must be provided\")\r\n\r\n mailer = SmtpMailer( host,\r\n username=username,\r\n password=password,\r\n port=port,\r\n from_name=from_name,\r\n from_email=from_email,\r\n ssl=ssl,\r\n )\r\n mailer.Save(profile_name)\r\n\r\n output_stream.write(\"The profile '{}' has been created.\\n\".format(profile_name))", "def create_user_credentials(storage_type, storage_id, space_name, client_ip,\n user_details):\n user_id = user_details[\"id\"]\n if user_id == \"0\":\n return PosixCredentials(0, 0)\n\n uid = gid = gen_storage_id(user_id)\n return PosixCredentials(uid, gid)", "def create_email(user):\n if 'research' in user.get_domains():\n domain = 'research'\n else: domain = 'academic'\n subject = \"ECE/CIS Account Created\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n \n message = \"Your ECE/CIS %s account has been created with the username: %s\\n\\n\" % (domain, user.username)\n message += \"Please do not reply to this message. If you need assistance with your account, please visit:\\n\"\n message += \"%s\\n\\n\" % helprequest\n message += \"-- EE/CIS Labstaff\\n\"\n\n send('account@eecis.udel.edu', 'ECE/CIS Account System', \\\n [user.email], subject, message, MAILHOST)", "def create_demo_user():\n email = click.prompt(\"Email\")\n if '@' not in email:\n email += \"@pricetracker.ml\"\n password = click.prompt(\"Password\", hide_input=True, confirmation_prompt=True)\n password_hash = bcrypt.generate_password_hash(password).decode(\"utf-8\")\n user = User(email=email, password_hash=password_hash, is_demo_user=True)\n db.session.add(user)\n db.session.commit()", "def createTPotUser(hostPort, creatorUser, creatorPwd=None, createdPwd=None):\n if creatorPwd is None:\n # try to find user password in text file if password not specified\n with open(\"passwords.txt\") as f:\n creatorPwd = findPassword(f.read(), creatorUser)\n\n userName = \"t_pot_internal\"\n # first create role to assign to user\n roleName = createTPotRole(hostPort, creatorUser, creatorPwd)\n\n # generate random password if none specified\n if createdPwd is None:\n createdPwd = \"\".join(\n secrets.choice(string.ascii_letters + string.digits) for _ in range(20)\n )\n\n userData = {\n \"roles\": [\n roleName,\n ],\n \"password\": createdPwd,\n \"full_name\": \"\",\n \"email\": \"\",\n \"metadata\": {},\n \"enabled\": True,\n }\n\n userResp = requests.post(\n f\"https://{hostPort}/_security/user/{userName}\",\n auth=(creatorUser, creatorPwd),\n json=userData,\n )\n\n try:\n userResp.raise_for_status()\n except HTTPError:\n # Usually if API request is made before elasticsearch service is ready\n raise BadAPIRequestError(\n f\"{userResp.text}\\nBad API request. See response above.\"\n )\n\n # creating the same user twice will not change anything\n if not userResp.json()[\"created\"]:\n raise NotCreatedError(f\"{userName} user not created. Does it already exist?\")\n\n return userName, createdPwd", "def _CreateUser(user_dict):\r\n identity_key = 'Email:%s' % user_dict['email']\r\n identity = yield gen.Task(Identity.Query, client, identity_key, None, must_exist=False)\r\n\r\n # Get existing user id and web device id, if they exist.\r\n user_id = None\r\n if identity is not None and identity.user_id is not None:\r\n user = yield gen.Task(User.Query, client, identity.user_id, None, must_exist=False)\r\n if user is not None:\r\n user_id = user.user_id\r\n webapp_dev_id = user.webapp_dev_id\r\n\r\n if user_id is None:\r\n # Allocate new user id and web device id.\r\n user_id, webapp_dev_id = yield User.AllocateUserAndWebDeviceIds(client)\r\n\r\n # Create prospective user.\r\n user, identity = yield gen.Task(User.CreateProspective,\r\n client,\r\n user_id,\r\n webapp_dev_id,\r\n identity_key,\r\n util.GetCurrentTimestamp())\r\n\r\n # Register the user.\r\n user_dict = deepcopy(user_dict)\r\n user_dict['user_id'] = user_id\r\n user = yield gen.Task(User.Register,\r\n client,\r\n user_dict,\r\n {'key': identity_key, 'authority': 'Viewfinder'},\r\n util.GetCurrentTimestamp(),\r\n rewrite_contacts=False)\r\n\r\n # Turn off email alerts.\r\n settings = AccountSettings.CreateForUser(user_id, email_alerts=AccountSettings.EMAIL_NONE)\r\n yield gen.Task(settings.Update, client)\r\n\r\n # Make this a system user so that client will not add it to contacts.\r\n yield user.MakeSystemUser(client)\r\n\r\n raise gen.Return(user)", "def create_server(host, port, uid, pwd):\r\n s = smtplib.SMTP(host, port)\r\n s.starttls()\r\n s.login(\r\n uid,\r\n pwd\r\n )\r\n return s", "def create_service_credentials(user, new_roles=None):\n tenant = config('service-tenant')\n if not tenant:\n raise Exception(\"No service tenant provided in config\")\n\n domain = None\n if get_api_version() > 2:\n domain = DEFAULT_DOMAIN\n passwd = create_user_credentials(user, get_service_password,\n set_service_password,\n tenant=tenant, new_roles=new_roles,\n grants=[config('admin-role')],\n domain=domain)\n if get_api_version() > 2:\n # Create account in SERVICE_DOMAIN as well using same password\n domain = SERVICE_DOMAIN\n passwd = create_user_credentials(user, get_service_password,\n set_service_password,\n tenant=tenant, new_roles=new_roles,\n grants=[config('admin-role')],\n domain=domain)\n return passwd", "def add_account_credentials(self, domain, username, password='PromptMe', mailbox_name='Inbox'):\n\t\ttry:\n\t\t\tif password == 'PromptMe':\n\t\t\t\tself.print_notification('input', 35, 'Password for '+ username + '@' + domain, '')\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tpassword = getpass.getpass('')\n\n\t\t\t\tif domain in self.email_credentials:\n\t\t\t\t\tself.email_credentials[domain].update({username : [password, mailbox_name]})\n\t\t\t\telse:\n\t\t\t\t\tself.email_credentials.update({domain : {username : [password, mailbox_name]}})\n\t\texcept:\n\t\t\tself.print_notification('error', 35, 'Failed to add credentails for:', username + '@' + domain)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new tag in the specified tag namespace. The tag requires either the OCID or the name of the tag namespace that will contain this tag definition. You must specify a name for the tag, which must be unique across all tags in the tag namespace and cannot be changed. The name can contain any ASCII character except the space (_) or period (.) characters. Names are case insensitive. That means, for example, \"myTag\" and \"mytag\" are not allowed in the same namespace. If you specify a name that's already in use in the tag namespace, a 409 error is returned. The tag must have a description. It does not have to be unique, and you can change it with
def create_tag(self, tag_namespace_id, create_tag_details, **kwargs): resource_path = "/tagNamespaces/{tagNamespaceId}/tags" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_tag got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "tagNamespaceId": tag_namespace_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_tag_details, response_type="Tag") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_tag_details, response_type="Tag")
[ "def create_namespace_tags(self, namespace, **kwargs):\n url = 'metadefs/namespaces/%s/tags' % namespace\n data = json.dumps(kwargs)\n resp, body = self.post(url, data)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def create(self, name, tag):\n\n\t\turl_json = urllib.urlencode({\"name\": name, \"tag\": tag})\n\t\treturn self._create(\"/tag?json_hash=%s\" % url_json, \"tag\")", "def create_namespaced_resource(namespace, body):\n api = get_api(body[\"apiVersion\"], body[\"kind\"])\n return api.create(namespace=namespace, body=body)", "def create_tag(self, entry_name, tag):\n return self.__datacatalog.create_tag(parent=entry_name, tag=tag)", "def create_tag(request, conn=None, **kwargs):\n if not request.POST:\n return {\"error\": \"need to POST\"}\n\n tag_name = request.POST.get(\"tag_name\")\n tag_desc = request.POST.get(\"tag_description\", None)\n\n tag = omero.model.TagAnnotationI()\n tag.textValue = rstring(str(tag_name))\n if tag_desc is not None:\n tag.description = rstring(str(tag_desc))\n\n tag = conn.getUpdateService().saveAndReturnObject(tag, conn.SERVICE_OPTS)\n tag = conn.getObject(\"TagAnnotation\", tag.id.val)\n\n return {'id':tag.id, 'name':tag.getTextValue(), 'desc':tag.getDescription(), 'owner':tag.getOwnerFullName()}", "def create_release_tag(self, tag_name, message=None):\n raise NotImplementedError", "def create_tag_namespace(self, create_tag_namespace_details, **kwargs):\n resource_path = \"/tagNamespaces\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_tag_namespace got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_tag_namespace_details,\n response_type=\"TagNamespace\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_tag_namespace_details,\n response_type=\"TagNamespace\")", "def create_tag(name):\n name = name.strip().lower()\n tag = Tags(name)\n try:\n db_session.add(tag)\n db_session.commit()\n except exc.IntegrityError as err:\n db_session.rollback()\n return 'Tag \"%s\" has not been added - already exists: %s.' % (name, err), 'warning', None\n return 'Tag \"%s\" has been added.' % name, 'success', tag", "def create_or_get_tag(self, tag_name: str, *args, **kwargs):\n\n tag_data = api.create_or_get_tag(\n tag_name,\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return en.Tag(tag_data)", "def CreateTag(region, resource_id, tag_name, tag_value):\r\n ec2 = _Connect(region)\r\n ec2.create_tags([resource_id], {tag_name: tag_value})", "def _create_tag_request():\n\n key = helpers.get('Tag.1.Key')\n value = helpers.get('Tag.1.Value')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'createTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key,\n 'tags[0].value': value\n }\n\n response = requester.make_request_async(args)\n\n return response", "def create_tag(cls, name):\n tag = cls(name = name)\n db.session.add(tag)\n db.session.commit()\n return tag", "def make_tag(tag_name, text='', tag_attr=None):\n if tag_attr is None:\n tag_attr = {}\n\n doc = xml.dom.minidom.Document()\n element = doc.createElement(tag_name)\n if tag_attr:\n for k, v in izip(list(tag_attr.keys()), list(tag_attr.values())):\n element.setAttribute(unicode(k), unicode(v))\n if text:\n text_node = doc.createTextNode(text.strip())\n element.appendChild(text_node)\n return element", "def addTag(nodeID, tag):", "def create_tag(\n self,\n ) -> Callable[[datacatalog.CreateTagRequest], Awaitable[tags.Tag]]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"create_tag\" not in self._stubs:\n self._stubs[\"create_tag\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.datacatalog.v1.DataCatalog/CreateTag\",\n request_serializer=datacatalog.CreateTagRequest.serialize,\n response_deserializer=tags.Tag.deserialize,\n )\n return self._stubs[\"create_tag\"]", "def create_in_workspace(self, workspace, params={}, **options):\n path = \"/workspaces/%s/tags\" % (workspace)\n return self.client.post(path, params, **options)", "def add_tag(self, obj, tag_name):\r\n tag_names = parse_tag_input(tag_name)\r\n if not len(tag_names):\r\n raise AttributeError(_('No tags were given: \"%s\".') % tag_name)\r\n if len(tag_names) > 1:\r\n raise AttributeError(_('Multiple tags were given: \"%s\".') % tag_name)\r\n tag_name = tag_names[0]\r\n if settings.FORCE_LOWERCASE_TAGS:\r\n tag_name = tag_name.lower()\r\n tag, created = self.get_or_create(name=tag_name)\r\n ctype = ContentType.objects.get_for_model(obj)\r\n TaggedItem._default_manager.get_or_create(\r\n tag=tag, content_type=ctype, object_id=obj.pk)", "def create_tag(self, new_tag):\r\n role = self._session.role\r\n \r\n if role is None:\r\n self._session.role = \"developer\"\r\n role = self._session.role\r\n\r\n args = \"release -create %s -from %s -bl %s -active -allow_parallel_check_out\" % (new_tag, self.objectname, self.objectname)\r\n self._session.role = \"build_mgr\"\r\n\r\n result = self._session.execute(\" %s\" \\\r\n % (args), Result(self._session))\r\n self._session.role = role\r\n\r\n return result.output", "def add_tag(self, tag_name):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO tag(tag) VALUES(%s)', (tag_name,))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new tag default in the specified compartment for the specified tag definition. If you specify that a value is required, a value is set during resource creation (either by the user creating the resource or another tag defualt). If no value is set, resource creation is blocked. If the `isRequired` flag is set to \"true\", the value is set during resource creation. If the `isRequired` flag is set to \"false\", the value you enter is set during resource creation.
def create_tag_default(self, create_tag_default_details, **kwargs): resource_path = "/tagDefaults" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token", "opc_request_id" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_tag_default got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing), "opc-request-id": kwargs.get("opc_request_id", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_tag_default_details, response_type="TagDefault") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_tag_default_details, response_type="TagDefault")
[ "def __init__(self, default=NO_DEFAULT, required=False):\n self.default = default\n self.required = required", "def register_option_pair(key, default_value):\n\n _OPTION_TEMPLATE[key] = default_value", "def createDevIDAttr(shapefileName, defaultVal):\n\n inputds = ogr.Open(shapefileName,update=True)\n if not inputds:\n sys.exit(\"Unable to open input file '{0}'\".format(shapefileName))\n\n inputlyr = inputds.GetLayer()\n\n # Create field definition(s)\n # Add input Layer Fields to the output Layer if defined in field_names arg.\n inLayerDefn = inputlyr.GetLayerDefn()\n if inLayerDefn.GetFieldIndex(cc.DEV_LAYER_ATTRIBUTE_NAME) == -1:\n print(\"\\tCreating an Attribute '{0}' in vector file '{1}'\".format(cc.DEV_LAYER_ATTRIBUTE_NAME,shapefileName))\n\n inputlyr.CreateField(ogr.FieldDefn(cc.DEV_LAYER_ATTRIBUTE_NAME, ogr.OFTInteger))\n\n for inFeature in inputlyr:\n inFeature.SetField(cc.DEV_LAYER_ATTRIBUTE_NAME,defaultVal)\n inputlyr.SetFeature(inFeature)\n\n inputds.Destroy()\n print(\"\\tCreated an Attribute '{0}' in vector file '{1}'\".format(cc.DEV_LAYER_ATTRIBUTE_NAME,shapefileName))", "def create_provisioning_template_version(templateName=None, templateBody=None, setAsDefault=None):\n pass", "def update_tag_default(self, tag_default_id, update_tag_default_details, **kwargs):\n resource_path = \"/tagDefaults/{tagDefaultId}\"\n method = \"PUT\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\",\n \"opc_request_id\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_tag_default got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagDefaultId\": tag_default_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing),\n \"opc-request-id\": kwargs.get(\"opc_request_id\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_default_details,\n response_type=\"TagDefault\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_default_details,\n response_type=\"TagDefault\")", "def fillDefaults(param_def_dict, param_dict):\n for pname, pdict in param_def_dict.items():\n if not pdict['required'] and pname not in param_dict:\n if pdict['default'] is not None:\n param_dict[pname] = pdict['default']", "def create_object_parameter_from_default(obj, default):\n values = []\n if default.enum:\n for v in DefaultParameterVl.objects.filter(parameter=default).all():\n values.append({'value' : v.value,\n 'caption' : v.caption})\n return create_object_parameter(obj, 'user', False,\n tp = default.tp,\n name=default.name,\n descr=default.descr,\n values=values)", "def tag(self, value):\r\n self._tag = value if value is not None else None", "def set_default_value(structure_items, structure_item_name, default_value):\n si = structure_items.get(structure_item_name, None)\n if si and si.content:\n si.content = default_value\n si.scores = [get_pay_money_probability(structure_items)]", "def define_flag(option, default_value, documentation):\n _FLAGS.append((option, default_value, documentation))\n function_template = set_dummy_function_template(option, default_value)\n exec(function_template)\n __x()", "def _create_tag_request():\n\n key = helpers.get('Tag.1.Key')\n value = helpers.get('Tag.1.Value')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'createTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key,\n 'tags[0].value': value\n }\n\n response = requester.make_request_async(args)\n\n return response", "def __init__(self, name=None, values=None, default_value=None):\n self.swagger_types = {\n 'name': 'str',\n 'values': 'list[TagPropertyAllowedValue]',\n 'default_value': 'str'\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'values': 'values',\n 'default_value': 'defaultValue'\n }\n\n self._name = name\n self._values = values\n self._default_value = default_value", "def create_tag(request, conn=None, **kwargs):\n if not request.POST:\n return {\"error\": \"need to POST\"}\n\n tag_name = request.POST.get(\"tag_name\")\n tag_desc = request.POST.get(\"tag_description\", None)\n\n tag = omero.model.TagAnnotationI()\n tag.textValue = rstring(str(tag_name))\n if tag_desc is not None:\n tag.description = rstring(str(tag_desc))\n\n tag = conn.getUpdateService().saveAndReturnObject(tag, conn.SERVICE_OPTS)\n tag = conn.getObject(\"TagAnnotation\", tag.id.val)\n\n return {'id':tag.id, 'name':tag.getTextValue(), 'desc':tag.getDescription(), 'owner':tag.getOwnerFullName()}", "def _handle_default(self, string):\n self.doc.add_default_element(string)", "def validate_default(self, value):\n return self.__validate(value, self.validate_default_element)", "def delete_tag_default(self, tag_default_id, **kwargs):\n resource_path = \"/tagDefaults/{tagDefaultId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_request_id\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_tag_default got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagDefaultId\": tag_default_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-request-id\": kwargs.get(\"opc_request_id\", missing),\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)", "def default_arg(default):\n class DefaultArg(argparse.Action):\n def __call__(self, parser, namespace, value, option_string):\n if value is None:\n setattr(namespace, self.dest, default)\n else:\n setattr(namespace, self.dest, value)\n\n return DefaultArg", "def set_boolean_default_value(self, value=None):\n raise NotImplementedError(\n 'operation set_boolean_default_value(...) not yet implemented')", "def make_tag(tag_name, text='', tag_attr=None):\n if tag_attr is None:\n tag_attr = {}\n\n doc = xml.dom.minidom.Document()\n element = doc.createElement(tag_name)\n if tag_attr:\n for k, v in izip(list(tag_attr.keys()), list(tag_attr.values())):\n element.setAttribute(unicode(k), unicode(v))\n if text:\n text_node = doc.createTextNode(text.strip())\n element.appendChild(text_node)\n return element" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new tag namespace in the specified compartment. You must specify the compartment ID in the request object (remember that the tenancy is simply the root compartment). You must also specify a name for the namespace, which must be unique across all namespaces in your tenancy and cannot be changed. The name can contain any ASCII character except the space (_) or period (.). Names are case insensitive. That means, for example, \"myNamespace\" and \"mynamespace\" are not allowed in the same tenancy. Once you created a namespace, you cannot change the name. If you specify a name that's already in use in the tenancy, a 409 error is returned. You must also specify a description for the namespace. It does not have to be unique, and you can change it with
def create_tag_namespace(self, create_tag_namespace_details, **kwargs): resource_path = "/tagNamespaces" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_tag_namespace got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_tag_namespace_details, response_type="TagNamespace") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_tag_namespace_details, response_type="TagNamespace")
[ "def create_namespace(self, request):\n return self._create(request, u\"namespaces\")", "def create_namespaced_resource(namespace, body):\n api = get_api(body[\"apiVersion\"], body[\"kind\"])\n return api.create(namespace=namespace, body=body)", "def create_namespace_tags(self, namespace, **kwargs):\n url = 'metadefs/namespaces/%s/tags' % namespace\n data = json.dumps(kwargs)\n resp, body = self.post(url, data)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def create_namespace(provenance, namespace):\n provenance.add_namespace(namespace, uri=ESMVALTOOL_URI_PREFIX + namespace)", "def test_create_namespace(self):\n self.mock_object(self.client, 'send_request')\n\n self.client.create_namespace(\n fake_client.VOLUME_NAME, fake_client.NAMESPACE_NAME,\n fake_client.VOLUME_SIZE_TOTAL, {'OsType': 'linux'})\n\n path = f'/vol/{fake_client.VOLUME_NAME}/{fake_client.NAMESPACE_NAME}'\n body = {\n 'name': path,\n 'space.size': str(fake_client.VOLUME_SIZE_TOTAL),\n 'os_type': 'linux',\n }\n self.client.send_request.assert_called_once_with(\n '/storage/namespaces', 'post', body=body)", "def createNamespace(program: ghidra.program.model.listing.Program, namespacePath: List[unicode]) -> ghidra.program.model.symbol.Namespace:\n ...", "def create_namespace(node, namespace, delete_before_create=True):\n if delete_before_create:\n Namespaces.delete_namespace(node, namespace)\n\n cmd = f\"ip netns add {namespace}\"\n exec_cmd_no_error(node, cmd, sudo=True)\n Namespaces.__namespaces.append(namespace)", "def createNamespace(self):\r\n raise NotImplementedError('Endpoint can not be used directly.')", "def create_namespaced_net_namespace(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_net_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_net_namespace`\")\n\n resource_path = '/oapi/v1/netnamespaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1NetNamespace',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_create_namespace(self):\n\n status, body = self.api_create_namespace(self.current_db, self.test_ns)\n self.assertEqual(True, status == self.API_STATUS['success'], body)\n\n status, body = self.api_get_namespaces(self.current_db)\n self.validate_get_list_response(status, body, 'Namespaces', True)\n\n status, body = self.api_get_namespace(self.current_db, self.test_ns)\n self.validate_get_namespace_response(status, body)", "def change_tag_namespace_compartment(self, tag_namespace_id, change_tag_namespace_compartment_detail, **kwargs):\n resource_path = \"/tagNamespaces/{tagNamespaceId}/actions/changeCompartment\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"change_tag_namespace_compartment got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagNamespaceId\": tag_namespace_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=change_tag_namespace_compartment_detail)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=change_tag_namespace_compartment_detail)", "def create_namespaced_namespace(self, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_namespaced_namespace_with_http_info(body, **kwargs)\n else:\n (data) = self.create_namespaced_namespace_with_http_info(body, **kwargs)\n return data", "def create_ns(self, context, ns):\n ns_info = ns['ns']\n name = ns_info['name']\n\n if ns_info.get('nsd_template'):\n nsd_name = utils.generate_resource_name(name, 'inline')\n nsd = {'nsd': {\n 'attributes': {'nsd': ns_info['nsd_template']},\n 'description': ns_info['description'],\n 'name': nsd_name,\n 'template_source': 'inline',\n 'tenant_id': ns_info['tenant_id']}}\n ns_info['nsd_id'] = self.create_nsd(context, nsd).get('id')\n\n nsd = self.get_nsd(context, ns['ns']['nsd_id'])\n nsd_dict = yaml.safe_load(nsd['attributes']['nsd'])\n vnfm_plugin = manager.TackerManager.get_service_plugins()['VNFM']\n onboarded_vnfds = vnfm_plugin.get_vnfds(context, [])\n region_name = ns_info.get('placement_attr', {}).\\\n get('region_name', None)\n\n vim_res = self.vim_client.get_vim(context, ns['ns']['vim_id'],\n region_name)\n if not ns['ns']['vim_id']:\n ns['ns']['vim_id'] = vim_res['vim_id']\n\n # TODO(phuoc): currently, create_ns function does not have\n # create_ns_pre function, that pre-defines information of a network\n # service. Creating ns_uuid keeps ns_id for consistency, it should be\n # provided as return value of create_ns_pre function in ns db.\n # Generate ns_uuid\n ns['ns']['ns_id'] = uuidutils.generate_uuid()\n\n # Step-1\n param_values = ns['ns']['attributes'].get('param_values', {})\n if 'get_input' in str(nsd_dict):\n self._process_parameterized_input(ns['ns']['attributes'],\n nsd_dict)\n # Step-2\n vnfds = nsd['vnfds']\n # vnfd_dict is used while generating workflow\n vnfd_dict = dict()\n for node_name, node_val in \\\n (nsd_dict['topology_template']['node_templates']).items():\n if node_val.get('type') not in vnfds:\n continue\n vnfd_name = vnfds[node_val.get('type')]\n if not vnfd_dict.get(vnfd_name):\n vnfd_dict[vnfd_name] = {\n 'id': self._get_vnfd_id(vnfd_name, onboarded_vnfds),\n 'instances': [node_name]\n }\n else:\n vnfd_dict[vnfd_name]['instances'].append(node_name)\n if not node_val.get('requirements'):\n continue\n if not param_values.get(vnfd_name):\n param_values[vnfd_name] = {}\n param_values[vnfd_name]['substitution_mappings'] = dict()\n req_dict = dict()\n requirements = node_val.get('requirements')\n for requirement in requirements:\n req_name = list(requirement.keys())[0]\n req_val = list(requirement.values())[0]\n res_name = req_val + ns['ns']['nsd_id'][:11]\n req_dict[req_name] = res_name\n if req_val in nsd_dict['topology_template']['node_templates']:\n param_values[vnfd_name]['substitution_mappings'][\n res_name] = nsd_dict['topology_template'][\n 'node_templates'][req_val]\n\n param_values[vnfd_name]['substitution_mappings'][\n 'requirements'] = req_dict\n ns['vnfd_details'] = vnfd_dict\n\n vnffgd_templates = self._get_vnffgds_from_nsd(nsd_dict)\n LOG.debug('vnffgd_templates: %s', vnffgd_templates)\n ns['vnffgd_templates'] = vnffgd_templates\n\n ns_dict = super(NfvoPlugin, self).create_ns(context, ns)\n\n super(NfvoPlugin, self).create_ns_post(\n context, ns_dict['id'], vnfd_dict, vnffgd_templates)\n return ns_dict", "def _make_namespace(node, ns, prefix, set_default=0):\n\n if prefix is not None or set_default:\n new_ns = libxml2mod.xmlNewNs(node, ns, prefix)\n else:\n new_ns = None\n return new_ns", "def create(cls, ns, name, **kwargs):\n key_name = '%s:%s' % (ns, name)\n return cls(key_name=key_name, ns=ns, name=name, **kwargs)", "def create_tag(self, tag_namespace_id, create_tag_details, **kwargs):\n resource_path = \"/tagNamespaces/{tagNamespaceId}/tags\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_tag got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagNamespaceId\": tag_namespace_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_tag_details,\n response_type=\"Tag\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_tag_details,\n response_type=\"Tag\")", "def _create_namespace(self):\n self.ocp.new_project(self.namespace)", "def create_namespace(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"create_namespace\")", "def create_namespace(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"create_namespace\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified compartment. The compartment must be empty.
def delete_compartment(self, compartment_id, **kwargs): resource_path = "/compartments/{compartmentId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_compartment got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "compartmentId": compartment_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
[ "def remove_compartment(compartment_id):\n logger = logging.getLogger(__name__)\n # Get the compartment then removes it\n compartment = Compartment.objects.get(id=compartment_id)\n try:\n with transaction.atomic():\n compartment.delete()\n except DatabaseError as remove_error:\n logger.error(remove_error)\n pass", "def removeCompartment(self, *args):\n return _libsbml.Model_removeCompartment(self, *args)", "def delete(environment_id, token):\n unit = db_session.get_session()\n environment = unit.query(models.Environment).get(environment_id)\n\n #preparing data for removal from conductor\n env = environment.description\n env['Objects'] = None\n\n data = {\n 'model': env,\n 'token': token,\n 'tenant_id': environment.tenant_id\n }\n\n rpc.engine().handle_task(data)\n\n with unit.begin():\n unit.delete(environment)", "def removeCompartmentReference(self, *args):\n return _libsbml.MultiCompartmentPlugin_removeCompartmentReference(self, *args)", "def delcomponent(self,\n context=[],\n componentid=None):\n if componentid == None:\n raise ValueError, \"delcomponent: componentid is None\"\n return jsoncall.do_call(\"delcomponent\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password,\\\n 'context':context,\\\n 'componentid':componentid},\n self.connection)", "def delete(args, config):\n print('Deletes a selected HPC fleet with name \"{}\"'.format(args.fleet_name))", "def delete_pcb_component(self, comp_name):\n arg = [\"NAME:Selections\", \"Selections:=\", comp_name]\n\n self.modeler.oeditor.Delete(arg)\n return True", "def delete_address(self) -> object:\n self.delete_button.click()\n\n return DeletionModal(self).wait_for_component_to_be_present()", "def removeCompartmentType(self, *args):\n return _libsbml.Model_removeCompartmentType(self, *args)", "def removeCompartmentGlyph(self, *args):\n return _libsbml.Layout_removeCompartmentGlyph(self, *args)", "def delete_composed_node(cls, composed_node_uuid):\n cls.dbdriver.delete_composed_node(composed_node_uuid)", "def vertree_delete(self):\n if not self.attached:\n raise CastleCollectionNotAttachedException()\n raise Exception(\"TODO\")", "def _deleteElement(self, identifier):\n self._collection.removeByIdentifier(identifier)\n return Deleted()", "def unsetCompartment(self):\n return _libsbml.CompartmentReference_unsetCompartment(self)", "def delete(self, path_segment=\"\", **kwargs):\n if path_segment == \"\":\n path = self.path\n elif path_segment.startswith('/'):\n path = path_segment\n else:\n path = self.service._abspath(self.path + path_segment)\n return self.service.delete(path, **kwargs)", "def delete(self, department_id):\n department = get_department_by_id(department_id)\n db.session.delete(department)\n db.session.commit()\n return {}, 204", "def host_storage_sector_delete(self, merkleroot):\n return self.http.post(host_constants.STORAGE_SECTOR_URL + merkleroot)", "def unsetCompartment(self):\n return _libsbml.Reaction_unsetCompartment(self)", "def _delete_environment(self, environment):\n self.clients(\"murano\").environments.delete(environment.id)\n utils.wait_for_delete(\n environment,\n update_resource=utils.get_from_manager(),\n timeout=CONF.benchmark.delete_environment_timeout,\n check_interval=CONF.benchmark.delete_environment_check_interval\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified MFA TOTP device for the specified user.
def delete_mfa_totp_device(self, user_id, mfa_totp_device_id, **kwargs): resource_path = "/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_mfa_totp_device got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id, "mfaTotpDeviceId": mfa_totp_device_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
[ "def delete_user_token(user_obj):\n Token.objects.filter(user=user_obj).delete()", "def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)", "def delete_user(user):\n logging.debug('{CRUD_operations} BEGIN function delete_user()')\n logging.debug('{CRUD_operations} Data received: user: %s', user)\n user.is_deleted = True\n logging.debug('{CRUD_operations} END function delete_user()')", "def delete_user(self):\n self.test_runner.run_user_delete()", "def delete_user():", "def test_delete_device_user(self):\n pass", "def delete_system_user(self):\n self.test_runner.run_system_user_delete()", "def delete_user(self) -> None:\n table_dictionary = {\n 'Apple': {\n 'table': 'AppleReceipts',\n 'user_id': 'User_id'\n },\n 'ESL': {\n 'table': 'ESLReceipts',\n 'user_id': 'User_id'\n },\n 'Transactions': {\n 'table': 'Transactions',\n 'user_id': 'User_id'\n },\n 'Users': {\n 'table': 'Users',\n 'user_id': 'id'\n },\n }\n\n # delete the current user's information from the db.\n for key in table_dictionary:\n query = f\"\"\"\n DELETE\n FROM {table_dictionary[key]['table']}\n WHERE {table_dictionary[key]['user_id']}=?;\n \"\"\"\n self.db.commit(query, values=(self.id,))\n\n # perform a sign out\n self.sign_out()\n\n log(f\"User:{self.id} has deleted their account.\")", "def delete_user(self, user_id):\r\n self._client.DarkBot.Orders.delete_one({\"_id\": user_id})", "def delete_user_entitlement(self, user_id):\n route_values = {}\n if user_id is not None:\n route_values['userId'] = self._serialize.url('user_id', user_id, 'str')\n self._send(http_method='DELETE',\n location_id='8480c6eb-ce60-47e9-88df-eca3c801638b',\n version='6.0-preview.3',\n route_values=route_values)", "def userdel(pwfile, user):\n return __salt__[\"webutil.userdel\"](pwfile, user)", "def delete_user(self):\n User.user_details.remove(self)", "def delete_user(user_name):\n iam_user = iam_manager.delete_user(user_name)\n return", "def delete_session_tokens_for_user(user_id: UserID) -> None:\n db.session.query(DbSessionToken) \\\n .filter_by(user_id=user_id) \\\n .delete()\n db.session.commit()", "def delete_user(self):\n try:\n self.db.delete_user(self.select_user[self.columns[0]]) # columns[0] is id\n self.rmv_windows_data()\n self.show_list()\n except AttributeError:\n pass", "def delete(self):\n g.current_user.token = None\n g.current_user.save()\n\n current_app.logger.info(\n 'Deleting token for user {}.'.format(g.current_user.username))\n return '', 204", "def delete_login_user(login_user):\r\n login_user.delete()", "def delete_user_by_xng_id(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/xngId/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['xngId'])\n return self.__create_request(payload=user, request_type=self.REQUEST_DELETE, version=\"v1\")", "def delete(self, user_id):\n log.info(\"Deleting local management user '{}'\".format(user_id))\n return self.conn.post(url='vdc/users/{}/deactivate'.format(user_id))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified network source
def delete_network_source(self, network_source_id, **kwargs): resource_path = "/networkSources/{networkSourceId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_network_source got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "networkSourceId": network_source_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
[ "def delete_source(self, source_id):\n body = {\"sourceId\": source_id}\n url = f\"{self.base_url}/api/v1/sources/delete\"\n response = requests.post(url, json=body)\n return response.status_code", "def delete(self, source):\n _source = self._source_prefix+source\n assert _source in self.cache.keys()\n del self.cache[_source]", "def delete_source(self, src_name: SourceName) -> None:\n while True:\n try:\n response = self.genes.query(\n IndexName=\"src_index\",\n KeyConditionExpression=Key(\"src_name\").eq(src_name.value),\n )\n except ClientError as e:\n raise DatabaseReadException(e)\n records = response[\"Items\"]\n if not records:\n break\n with self.genes.batch_writer(\n overwrite_by_pkeys=[\"label_and_type\", \"concept_id\"]\n ) as batch:\n for record in records:\n try:\n batch.delete_item(\n Key={\n \"label_and_type\": record[\"label_and_type\"],\n \"concept_id\": record[\"concept_id\"],\n }\n )\n except ClientError as e:\n raise DatabaseWriteException(e)\n\n try:\n self.metadata.delete_item(Key={\"src_name\": src_name.value})\n except ClientError as e:\n raise DatabaseWriteException(e)", "def RemoveSource(self,source):\n self._sources.RemoveSource(source)", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def delete_data_source(DataSourceId=None):\n pass", "def _delete_data(source, database):\n # Delete source's metadata\n try:\n metadata = database.metadata.query(\n KeyConditionExpression=Key(\n 'src_name').eq(SourceName[f\"{source.upper()}\"].value)\n )\n if metadata['Items']:\n database.metadata.delete_item(\n Key={'src_name': metadata['Items'][0]['src_name']},\n ConditionExpression=\"src_name = :src\",\n ExpressionAttributeValues={\n ':src': SourceName[f\"{source.upper()}\"].value}\n )\n except ClientError as e:\n click.echo(e.response['Error']['Message'])\n\n # Delete source's data from genes table\n try:\n while True:\n response = database.genes.query(\n IndexName='src_index',\n KeyConditionExpression=Key('src_name').eq(\n SourceName[f\"{source.upper()}\"].value)\n )\n\n records = response['Items']\n if not records:\n break\n\n with database.genes.batch_writer(\n overwrite_by_pkeys=['label_and_type', 'concept_id']) \\\n as batch:\n\n for record in records:\n batch.delete_item(\n Key={\n 'label_and_type': record['label_and_type'],\n 'concept_id': record['concept_id']\n }\n )\n except ClientError as e:\n click.echo(e.response['Error']['Message'])", "def remove(source_id):\n log.info(\"Removing source_id=%s\", source_id)\n get(source_id)\n del config_manager.get_sources()[source_id]", "def remove_source(self, label):\n self.sources_order.remove(label)\n self.sources.pop(label)", "def _send_delete_network_request(self, network):\n LOG.debug(_('_send_delete_network_request: %s'), network['id'])\n n1kvclient = n1kv_client.Client()\n if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_VXLAN:\n name = network['name'] + '_bd'\n n1kvclient.delete_bridge_domain(name)\n n1kvclient.delete_network_segment(network['name'])", "def _delete_network(self, network_id, timestamp):\n net_node = self.graph_db.get_node_by_uuid(network_id)\n if net_node:\n self.graph_db.delete_node(net_node, timestamp)", "def delete_source(username, id, force, token=None):\n if not force:\n click.confirm(\n \"Are you sure you want to delete {0} {1}?\".format(username, id), abort=True\n )\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}/{2}?access_token={3}\".format(\n mapbox_api, username, id, mapbox_token\n )\n r = requests.delete(url)\n if r.status_code == 204:\n click.echo(\"Source deleted.\")\n else:\n raise errors.TilesetsError(r.text)", "def delete_data_source(apiId=None, name=None):\n pass", "def remove_source(self):\n self.engine.remove_source()", "def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)", "def deleteSrcNode(neti:int, reai:int, srcNodeID:str):\n global stackFlag, errCode, networkSet, netSetStack, redoStack\n errCode = 0\n if neti < 0 or neti >= len(networkSet):\n errCode = -5\n else:\n r = networkSet[neti].reactions\n if reai < 0 or reai >= len(r):\n errCode = -6\n else:\n R = r[reai]\n if srcNodeID not in R.species[0]:\n errCode = -2\n\n if errCode < 0:\n raise ExceptionDict[errCode](errorDict[errCode], neti, reai, srcNodeID)\n else:\n if stackFlag:\n redoStack = TStack()\n netSetStack.push(networkSet)\n del R.species[0][srcNodeID]\n networkSet[neti].reactions[reai] = R", "def remove(self, source, graph, dest):\n return self.server.execute(self._execute_operation(\n source, graph, dest,\n ttypes.ExecuteOperationType.Remove))", "def remove_flow(self, source):\n log.debug('Remove flow from SW: {} ; dl_dest = {}'.format(self.connection.dpid, source))\n fm = of.ofp_flow_mod()\n fm.command = of.OFPFC_DELETE\n # fm.match.dl_dst = source # change this if necessary\n fm.match.dl_dst = source # change this if necessary\n self.connection.send(fm) # send flow-mod message", "def fusion_api_delete_network_set(self, name=None, uri=None, api=None, headers=None):\n return self.network_set.delete(name, uri, api, headers)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the specified SMTP credential for the specified user.
def delete_smtp_credential(self, user_id, smtp_credential_id, **kwargs): resource_path = "/users/{userId}/smtpCredentials/{smtpCredentialId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_smtp_credential got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id, "smtpCredentialId": smtp_credential_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
[ "def delete_credential(self, credential):\r\n return self.delete(self.credential_path % (credential))", "def delete_credential(self):\n\n Credentials.credential_list.remove(self)", "def delete_credentials(self):\n Credentials.credential_details.remove(self)", "def delete_credential(self):\n Credentials.credential_list.remove(self)", "def delete_credential(self):\n\n Credential.credential_list.remove(self)", "def delete_credential(self, context, id):\n return remove_credential(id)", "def delete_credential(self):\n Credential.credential_list.remove(self)", "def delete_credentials(account) : \n return Credentials.delete_credentials(account)", "def remove_credential(self, authenticator_id, credential_id):\n pass", "def remove_credentials(service: str) -> None:\n\n # SQL query to remove the user servise credentials from the database\n query = f\"DELETE FROM {service}_credentials WHERE user_id=?;\"\n\n # Execute the query\n with connect(DATABASE) as db:\n db.execute(query, (session[\"user_id\"],))\n db.commit()", "def delete_credentials(self):\n Credentials.credential_list.remove(self)", "def unset_credentials(ctx, user, store):\n try:\n logger.debug(\"store={store}, user={user}\".format(store=store, user=user))\n _pycred.unset_credentials(store, user)\n except Exception as e:\n logger.debug(e, exc_info=True)\n print('Error: {msg}'.format(msg=str(e)), file=sys.stderr)\n sys.exit(1)", "def delete(bot, update):\n chatID = update.message.chat_id\n username = get_user_info(chatID)['PID']\n logger.info(\"Deleting user credentials for {}!\".format(username))\n Chat.query.filter(Chat.chatID == chatID).delete() # Delete the user's record referenced by their ChatID\n Misc.query.filter(Misc.chatID == chatID).delete()\n db_session.commit()\n messageContent = \"Your credentials have been deleted, {}\\nHope to see you back soon!\".format(username[3:-4].title())\n bot.sendMessage(chat_id=update.message.chat_id, text=messageContent)\n \n mp.track(username, 'User Left')\n mp.people_set(username, {'active': False })", "def remove(ctx, all):\n # retrieving from parameter because host_info is already overwritten\n # with old password from credential file\n credentials.remove_credentials(ctx.obj['username'], all)", "def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)", "def test_delete_user_credentials(self):\n self.new_user_credential.saveCredentials()\n test_user_credentials = User_Credentials(\"TestPlatform\", \"TestFirst\", \"TestLast\", \"UserTest\", \"0700123654\",\n \"nm@mail.com\", \"pd123\")\n test_user_credentials.saveCredentials()\n\n self.new_user_credential.delete_account_credentials()\n self.assertEqual(len(User_Credentials.user_credential_list), 1)", "def delete_credentials(host):\n netrc_hosts = netrc.netrc().hosts\n if host in netrc_hosts:\n del netrc_hosts[host]\n write_netrc(netrc_hosts)", "def remove_mssql_credential(baseurl, token, credential_id):\n print(f\"remove mssql credential:[{credential_id}]\")\n headers.update({'Authorization': token})\n url = f\"{baseurl}/config/credentials/{credential_id}\"\n\n status_code, response_text = common.rest_request('DELETE', url, headers)\n common.validate_response(status_code, 204, response_text)", "def delete_credentials(self):\n Credentials.credentials_list.remove(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes the the specified tag default.
def delete_tag_default(self, tag_default_id, **kwargs): resource_path = "/tagDefaults/{tagDefaultId}" method = "DELETE" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_request_id", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "delete_tag_default got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "tagDefaultId": tag_default_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-request-id": kwargs.get("opc_request_id", missing), "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params)
[ "def remove_default(self):\n if self.default_present:\n self.removeItem(0)\n self.default_present = False", "def delete_default_value(self, attribute_name, classifier=None):\n if self.is_deleted:\n raise CException(f\"can't delete default value '{attribute_name!s}' on deleted stereotype\")\n class_path = self._get_default_value_class_path()\n if len(class_path) == 0:\n raise CException(f\"default values can only be used on a stereotype that extends metaclasses\")\n return delete_var_value(self, class_path, self.default_values_, attribute_name, VarValueKind.DEFAULT_VALUE,\n classifier)", "def delete_tag(tag):\n tag.destroy()", "def do_unset(self, arg):\n try:\n keys = arg.split()\n for key in keys:\n del self._defaults[key]\n except:\n print(\"Couldn't unset default(s) \"+arg)", "def delete_tag(self,tag):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n del self.tag_dict[tag]", "def delete_tag(self, *tags):\r\n return TagReference.delete(self, *tags)", "def category_default_reply_delete(\n request, structure_slug, category_slug, default_reply_id, structure\n):\n category = get_object_or_404(\n TicketCategory, organizational_structure=structure, slug=category_slug\n )\n default_reply = get_object_or_404(\n TicketCategoryDefaultReply, pk=default_reply_id, ticket_category=category\n )\n messages.add_message(\n request, messages.SUCCESS, _(\n \"Risposta predefinita eliminata correttamente\")\n )\n\n # log action\n logger.info(\n \"[{}] manager of structure {}\"\n \" {} deleted a default reply\"\n \" for category {}\".format(\n timezone.localtime(), structure, request.user, category\n )\n )\n\n default_reply.delete()\n return redirect(\n \"uni_ticket:manager_category_detail\",\n structure_slug=structure_slug,\n category_slug=category_slug,\n )", "def tags_delete(self, tag, **kwds):\n return self.request('tags/delete', tag=tag, **kwds)", "def clear_default(self, name, group=None):\n opt_info = self._get_opt_info(name, group)\n opt_info.pop('default', None)", "def delete_tag(self, *tags: TagReference) -> None:\n return TagReference.delete(self, *tags)", "def ClearDefault(self):\n \n self.default = None", "def delete(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.delete(path, params, **options)", "def delete_tag(key: str):\n _check_active_model_version()\n _active_model_version.delete_tag(key) # type: ignore", "def delete_tag(self, tag):\n return self.__datacatalog.delete_tag(name=tag.name)", "def default_tags(self, default_tags):\n\n self._default_tags = default_tags", "def delete(self):\n request = self.tags_service.delete(path=self._path)\n request.execute()", "def test_delete_tag(self):\n pass", "async def delete(self, ctx, *, name: str):\n if name in ['create','tag','delete','all', 'remove', 'walkthrough', 'make']:\n return await ctx.send(f'{name} is reserved name and cannot be deleted.')\n if name in self.ptags[str(ctx.author.id)]:\n self.ptags[str(ctx.author.id)].pop(name)\n self.save_settings()\n await ctx.send('Personal tag successfully deleted.')\n else:\n await ctx.send('Tag not found in your personal tags.')", "def _default_on_del(self, zkpath):\n fs.rm_safe(self.fpath(zkpath))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the authentication policy for the given tenancy. You must specify your tenant\u2019s OCID as the value for the compartment ID (remember that the tenancy is simply the root compartment).
def get_authentication_policy(self, compartment_id, **kwargs): resource_path = "/authenticationPolicies/{compartmentId}" method = "GET" expected_kwargs = ["retry_strategy"] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "get_authentication_policy got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "compartmentId": compartment_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="AuthenticationPolicy") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="AuthenticationPolicy")
[ "def get_credentials(tenant):\n print('Get credentials for: {}\\n'.format(tenant))\n path = 'secret/{}'.format(tenant)\n document = client.read(path)\n print('Credentials: {}\\n'.format(document))\n return document", "def _get_tenant_ocid(self):\n if isinstance(self._provider, oci.signer.Signer):\n return self._provider.api_key.split('/')[0]", "def get_tenant_keyring(self) -> Optional[ImaKeyring]:\n return self.keyrings.get(\"tenant_keyring\")", "def get_tenant_info(schema_name):\n with schema_context(schema_name):\n return Pharmacy.objects.filter(schema_name=schema_name).first()", "def get(self, req, policy_id):\n policy = self.rpc_client.policy_get(req.context,\n policy_id)\n\n return {'policy': policy}", "def rbac_policy_get(request, policy_id, **kwargs):\n policy = neutronclient(request).show_rbac_policy(\n policy_id, **kwargs).get('rbac_policy')\n return RBACPolicy(policy)", "def get_policy_client(self):\n return self._connection.get_client('azure.devops.released.policy.policy_client.PolicyClient')", "def client_access_policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_access_policy\")", "def get_policy(self, uid):\n for pol in self.auth:\n if pol.actor == uid:\n return pol\n\n return None", "def policies(self, request):\n policies = OtterPolicies(self.store, self.tenant_id, self.group_id,\n self.dispatcher)\n return policies.app.resource()", "def get_org_policy(self, resource, constraint, fields=None,\n verb='getOrgPolicy', **kwargs):\n arguments = {'resource': resource, 'fields': fields,\n 'body': {'constraint': constraint}}\n if kwargs:\n arguments.update(kwargs)\n return self.execute_query(\n verb=verb,\n verb_arguments=arguments,\n )", "def get(self):\n policy_number = reqparse.request.args.get('policy_number')\n category = reqparse.request.args.get('category')\n\n dao = ClaimDao()\n return dao.get(policy_number=policy_number, category=category)", "def get_tenant_config(tenant_id):\n for tenant in tenants:\n if tenant['tenant_id'] == tenant_id:\n return tenant\n raise errors.BaseTapisError(\"invalid tenant id.\")", "def policy_id(bucket_name, auth):\n\n endpoint = auth_lib.get_endpoint(\"storage\", bucket_name)\n if \".\" in bucket_name:\n response = requests.get(endpoint, auth=auth, verify=False)\n else:\n response = requests.get(endpoint, auth=auth)\n\n policy_id = response.headers.get(\"x-gmt-policyid\")\n\n return policy_id", "def get_tenant_by_id(tenant_id):\n tenant = identity.Tenant.query.filter_by(id=tenant_id).first()\n if tenant:\n return tenant\n abort(404, f\"Unable to find tenant with id: {tenant_id}\")", "def get_policy(usage_id):\r\n return policy.get(policy_key(usage_id), {})", "def tenant_access(self) -> Optional[pulumi.Input['ServiceTenantAccessArgs']]:\n return pulumi.get(self, \"tenant_access\")", "def get_policy_name(self, password):\n plen = len(password)\n for length in reversed(self.policies):\n if plen >= length:\n return self.policies[length].get('policy_name')\n return None", "def get_tenant(key, tenant_name):\n for tenant in key.tenants.list():\n if tenant.name == tenant_name:\n return tenant\n\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the specified compartment's information. This operation does not return a list of all the resources inside the compartment. There is no single API operation that does that. Compartments can contain multiple types of resources (instances, block storage volumes, etc.). To find out what's in a compartment, you must call the \"List\" operation for each resource type and specify the compartment's OCID as a query parameter in the request. For example,
def get_compartment(self, compartment_id, **kwargs): resource_path = "/compartments/{compartmentId}" method = "GET" expected_kwargs = ["retry_strategy"] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "get_compartment got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "compartmentId": compartment_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="Compartment") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="Compartment")
[ "def select_compartment(config_dict, prompt):\n try:\n oci_identity = oci.identity.IdentityClient(config_dict)\n oci_compartments = oci_identity.list_compartments(config_dict['tenancy'])\n except oci.exceptions.ServiceError as e:\n print_g('*** AUTHORISATION ERROR ***')\n sys.exit(1)\n except Exception as e:\n print_g('*** ERROR *** %s' % str(e))\n _logger.error('ERROR %s', str(e), exc_info=True)\n sys.exit(1)\n for comp in oci_compartments.data:\n print_g('%4d %-30s %s' % (oci_compartments.data.index(comp), comp.name, comp.id))\n return _select_from(oci_compartments.data, prompt)", "def getCompartment(entity):\n\n # TODO - only getting the first compartment\n if 'compartment' in entity and 'displayName' in entity['compartment'][0]:\n return entity['compartment'][0]['displayName']\n return None", "def getCompartment(self, *args):\n return _libsbml.Model_getCompartment(self, *args)", "def test_comp_show_compartment(self):\n try:\n help_data = subprocess.check_output([self.iscsi_config_path, '--show']).decode('utf-8').splitlines()\n _show_res('Show compartment compatibility', help_data)\n help_data = subprocess.check_output([self.iscsi_config_path, '--show',\n '--all']).decode('utf-8').splitlines()\n _show_res('Show compartment compatibility', help_data)\n help_data = subprocess.check_output([self.iscsi_config_path, '--show',\n '--compartment', self.compartment_name]).decode('utf-8').splitlines()\n _show_res('Show compartment compatibility', help_data)\n except Exception as e:\n self.fail('oci-iscsi-config --show --compartment <name> has failed: %s' % str(e))", "def get_compartment_id(oci_cfg, compartment_name):\n identity_client = identity.IdentityClient(oci_cfg)\n result = pagination.list_call_get_all_results(\n identity_client.list_compartments,\n cfg[\"tenancy\"],\n compartment_id_in_subtree=True,\n access_level=\"ACCESSIBLE\",\n )\n for c in result.data:\n if compartment_name == c.name:\n return c\n raise Exception(\"Compartment not found.\")", "def get_compartment(config_dict, prompt):\n yn = False\n _clear()\n while not yn:\n compartment = select_compartment(config_dict, prompt)\n print_g(compartment, term=False)\n print_g('Selected compartment: %s\\n' % compartment.name)\n yn = _read_yn('Continue?', default_yn=True)\n return compartment.id", "def getCompartment(self):\n return _libsbml.CompartmentReference_getCompartment(self)", "def list_compartments(self, compartment_id, **kwargs):\n resource_path = \"/compartments\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"access_level\",\n \"compartment_id_in_subtree\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_compartments got unknown kwargs: {!r}\".format(extra_kwargs))\n\n if 'access_level' in kwargs:\n access_level_allowed_values = [\"ANY\", \"ACCESSIBLE\"]\n if kwargs['access_level'] not in access_level_allowed_values:\n raise ValueError(\n \"Invalid value for `access_level`, must be one of {0}\".format(access_level_allowed_values)\n )\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"accessLevel\": kwargs.get(\"access_level\", missing),\n \"compartmentIdInSubtree\": kwargs.get(\"compartment_id_in_subtree\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Compartment]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Compartment]\")", "def resource_list(conf: OCIConfig):\n def _retrieve_resources_in_compartment(tree, region, traverse_level=1, scan_resources=False): \n logging.info('{} {}'.format('__'*traverse_level, tree['name']))\n items = tree.get(R.COMPARTMENT)\n for nested_item in [] if not items else items:\n traverse_level += 1\n scan = scan_resources or not bool(conf.compartment_filter) or nested_item.name in conf.compartment_filter\n _retrieve_resources_in_compartment(nested_item, region, traverse_level, scan_resources=scan)\n traverse_level -= 1\n if scan_resources:\n _get_network_resources(tree, conf)\n _get_bv_resources(tree, conf)\n _get_instance_resources(tree, conf)\n _get_lb_resources(tree, conf)\n _get_db_resources(tree, conf)\n _get_autonomous_resources(tree, conf)\n\n for r in conf.compartments_tree.keys():\n # logging.info(r)\n conf.workon_region = r\n logging.info(\"Resource discovery - visit compartments in {} region\".format(r))\n _init_api_client(conf)\n\n # bv_client.list_volumes('').data\n for tree in conf.compartments_tree[r]:\n scan = not bool(conf.compartment_filter) or tree.name in conf.compartment_filter\n _retrieve_resources_in_compartment(tree, r, scan_resources=scan)", "def getCompartment(self):\n return _libsbml.Species_getCompartment(self)", "def get(self, *args):\n return _libsbml.ListOfCompartmentTypes_get(self, *args)", "def getCompartmentType(self):\n return _libsbml.Compartment_getCompartmentType(self)", "def getCompartment(self):\n return _libsbml.Reaction_getCompartment(self)", "def getCompartment(self):\n return _libsbml.QualitativeSpecies_getCompartment(self)", "def compartment_tree_build(conf: OCIConfig):\n global identity_client\n identity_client = oci.identity.IdentityClient(conf.config)\n #get_regions(conf)\n tree = []\n\n def _get_nested_resources(api_list_call: identity_client.list_compartments, id: str, tree: []):\n\n elems = oci.pagination.list_call_get_all_results(api_list_call, id,compartment_id_in_subtree=False)\n for item in elems.data:\n compartment = OciCompartment(item, identity_client)\n if (conf.preserve_compartments and compartment.name in conf.preserve_compartments or\n (conf.skip_scan_preserved_resources and compartment.check_tags(conf.preserve_tags))):\n continue\n if not compartment.is_active():\n continue\n _get_nested_resources(api_list_call, compartment.id, compartment)\n tree.append(compartment)\n\n _get_nested_resources(identity_client.list_compartments, conf.tenancy, tree)\n\n return tree", "def get(self, *args):\n return _libsbml.ListOfCompartmentReferences_get(self, *args)", "def getCompartmentType(self):\n return _libsbml.MultiCompartmentPlugin_getCompartmentType(self)", "def getCompartmentType(self, *args):\n return _libsbml.Model_getCompartmentType(self, *args)", "def getCompartment(self):\n return _libsbml.MultiSpeciesType_getCompartment(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the specified MFA TOTP device for the specified user.
def get_mfa_totp_device(self, user_id, mfa_totp_device_id, **kwargs): resource_path = "/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}" method = "GET" expected_kwargs = ["retry_strategy"] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "get_mfa_totp_device got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id, "mfaTotpDeviceId": mfa_totp_device_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="MfaTotpDeviceSummary") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="MfaTotpDeviceSummary")
[ "def GetOTP(user):\r\n return _ComputeOTP(_GetUserSecret(user),\r\n long(time.time() / _GRANULARITY))", "def GetOTP(user):\n return _ComputeOTP(_GetUserSecret(user),\n long(time.time() / _GRANULARITY))", "def create_mfa_totp_device(self, user_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")", "def delete_mfa_totp_device(self, user_id, mfa_totp_device_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)", "def getTokenByUser(self, user):\n try:\n _token = None\n for token in self.getTokens().tokens:\n if str(token.user) == str(user):\n _token = token\n return _token\n except (FileNotFoundError, FileExistsError):\n return None", "def list_mfa_totp_devices(self, user_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"sort_by\",\n \"sort_order\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_mfa_totp_devices got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n if 'sort_by' in kwargs:\n sort_by_allowed_values = [\"TIMECREATED\", \"NAME\"]\n if kwargs['sort_by'] not in sort_by_allowed_values:\n raise ValueError(\n \"Invalid value for `sort_by`, must be one of {0}\".format(sort_by_allowed_values)\n )\n\n if 'sort_order' in kwargs:\n sort_order_allowed_values = [\"ASC\", \"DESC\"]\n if kwargs['sort_order'] not in sort_order_allowed_values:\n raise ValueError(\n \"Invalid value for `sort_order`, must be one of {0}\".format(sort_order_allowed_values)\n )\n\n query_params = {\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"sortBy\": kwargs.get(\"sort_by\", missing),\n \"sortOrder\": kwargs.get(\"sort_order\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[MfaTotpDeviceSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[MfaTotpDeviceSummary]\")", "def get_patient(self, user):\n pass", "def __retrieve_rt_token(user_id):\n\n slack_user = user_profile(user_id)\n if slack_user['ok']:\n username = slack_user['user']['profile'].get('email', '').split('@')[0]\n user = get_user_model().objects.filter(username=username).first()\n if user:\n prefs = UserPreferences.objects.filter(user=user).first()\n if prefs:\n if prefs.rt_token:\n cipher_suite = Fernet(settings.CRYPTO_KEY)\n return cipher_suite.decrypt(prefs.rt_token.encode('utf-8')).decode('utf-8')\n return None", "def get_user(self, user):\n return self.users[user]", "def get_token_by_userid(self, user):\n secret = False\n session = self.sessions.get_by_userid(self.to_userid(user))\n if session:\n secret = self.get_token_by_session(session.id())\n return secret", "def get_custom_jwt(user, device):\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n payload = jwt_otp_payload(user, device)\n return jwt_encode_handler(payload)", "def is_mfa_enabled(user):\n return hasattr(user, 'userotp')", "def activate_mfa_totp_device(self, user_id, mfa_totp_device_id, mfa_totp_token, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}/actions/activate\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"activate_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing),\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=mfa_totp_token,\n response_type=\"MfaTotpDeviceSummary\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=mfa_totp_token,\n response_type=\"MfaTotpDeviceSummary\")", "def get_user_devices(self, userID):\n # https://api.relayr.io/users/%s/devices\n url = '{0}/users/{1}/devices'.format(self.host, userID)\n _, data = self.perform_request('GET', url, headers=self.headers)\n return data", "def build_target(self, user, device):\n tropho = device.get('trophonius')\n push = device.get('push_token')\n if tropho is not None or push is not None:\n return (\n device['id'],\n user['_id'],\n tropho,\n push,\n device.get('os'),\n )", "def get_token(self, user):\n data = {\n 'username': user['username'],\n 'password': user['password']\n }\n resp = self.client.post('/api/token/', data, format='json')\n resp_content = json.loads(resp.content.decode('utf-8'))\n return resp_content[\"access\"]", "def get(user):\n if user:\n return Member.get_by_key_name(user.user_id())", "def get_user_token(self, user_id):\n token = self._db.get(\"usertoken.%s\" % user_id)\n if token is None:\n raise UserIdNotFound(user_id)\n return token.decode(\"utf-8\")", "def _get_device(self, dev_id):\n tuya = self.hass.data[DOMAIN][TUYA_DATA]\n return tuya.get_device_by_id(dev_id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the specified tenancy's information.
def get_tenancy(self, tenancy_id, **kwargs): resource_path = "/tenancies/{tenancyId}" method = "GET" expected_kwargs = ["retry_strategy"] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "get_tenancy got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "tenancyId": tenancy_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="Tenancy") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="Tenancy")
[ "def tenancy(self):\n return self._tenancy", "def get_tenancy(vm_):\n return config.get_cloud_config_value(\"tenancy\", vm_, __opts__, search_global=False)", "def get_tenant_info(schema_name):\n with schema_context(schema_name):\n return Pharmacy.objects.filter(schema_name=schema_name).first()", "def get_tenant_usage(self, tenant_id):\n return self._get(_quota.TenantUsage, tenant_id)", "def get_tenants():\n # these are the tenant_id strings configured for the service -\n tenants_strings = conf.tenants\n result = []\n # the tenants service is a special case, as it must be a) configured to serve all tenants and b) actually maintains\n # the list of tenants in its own DB. in this case, we return the empty list since the tenants service will use direct\n # db access to get necessary data.\n if conf.service_name == 'tenants' and tenants_strings[0] == '*':\n return result\n\n # in dev mode, services can be configured to not use the security kernel, in which case we must get\n # configuration for a \"dev\" tenant directly from the service configs:\n if not conf.use_sk:\n for tenant in tenants_strings:\n t = {'tenant_id': tenant,\n 'iss': conf.dev_iss,\n 'public_key': conf.dev_jwt_public_key,\n 'default_access_token_ttl': conf.dev_default_access_token_ttl,\n 'default_refresh_token_ttl': conf.dev_default_refresh_token_ttl,\n }\n result.append(t)\n\n else:\n # TODO -- look up tenants in the tenants API, get the associated parameters (including sk location)\n pass\n return result", "def get_tenant_list(conn: dict) -> dict:\n return get(conn, PCC_TENANT + \"/list\")", "def get_quotas_tenant(self, **_params):\r\n return self.get(self.quota_path % 'tenant', params=_params)", "def tenancy(self, tenancy):\n self._tenancy = tenancy", "def get_tenants(self):\n return self.get_operation(self.PREFIX_LIST[\"TENANTS\"])", "def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]", "def tenant(self):\n return self._tenant", "def scan_tenancy(config: OCIConfig):\n compartment_list(config)\n resource_list(config)\n logging.info('{}'.format(pformat(config.compartments_tree)))", "def get_tenants():\n tenant_list = []\n try:\n resp_tenants = cohesity_client.tenant.get_tenants()\n except APIException as ex:\n print(\"Unable to get tenant list: %s\" % ex.context.response.raw_body)\n raise SystemExit\n for tenant in resp_tenants:\n tenant_list.append(tenant.tenant_id)\n print(\"Adding Tenant: %s\" % tenant.name)\n return tenant_list", "def get_rt_info(self, request):\n params = self.params_valid(serializer=RtIdSerializer)\n result = rt.get_databus_rt_info(params[\"rt_id\"])\n\n return Response(result)", "def get_tenant(key, tenant_name):\n for tenant in key.tenants.list():\n if tenant.name == tenant_name:\n return tenant\n\n return None", "def get_all_tenants():\n tenants = identity.Tenant.query.all()\n return tenants", "def get_account_info(self, get=None):\n\t\tr = self.session.get(self.rest_url + '/UserAccount/ClientAndTradingAccount')\n\t\tresp = json.loads(r.text)\n\t\ttry:\n\t\t\tself.trading_account_id = resp['TradingAccounts'][0]['TradingAccountId']\n\t\t\tif get is not None:\n\t\t\t\treturn resp['TradingAccounts'][0][get]\n\t\t\telse:\n\t\t\t\treturn resp\n\t\texcept:\n\t\t\traise GCapiException(resp)", "def get_current_tenant():\n return getattr(_thread_locals, \"tenant\", None)", "def resources_info(self):\n data = None\n zoomeye_api = \"https://api.zoomeye.org/resources-info\"\n headers = {'Authorization': 'JWT %s' % self.token}\n resp = requests.get(zoomeye_api, headers=headers)\n if resp and resp.status_code == 200 and 'plan' in resp.json():\n data = resp.json()\n\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the specified UserGroupMembership's information.
def get_user_group_membership(self, user_group_membership_id, **kwargs): resource_path = "/userGroupMemberships/{userGroupMembershipId}" method = "GET" expected_kwargs = ["retry_strategy"] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "get_user_group_membership got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userGroupMembershipId": user_group_membership_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="UserGroupMembership") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="UserGroupMembership")
[ "def user_get_group_membership(self, group):\n try:\n return GroupMembership.objects.get(user=self, group=group)\n except GroupMembership.DoesNotExist:\n return None", "def fetch_their_members(our_group):\n\tgroup_id = our_group[\"groupId\"]\n\turl = f'{BASE_URL}/groups/{group_id}/members'\n\tparams = {'$select': 'userPrincipalName,id'}\n\treturn call_api(url, params)", "def get_membership_data_for_current_user(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/User/GetMembershipsForCurrentUser/\"))", "def get_group_information(self) -> dict[str, Any]:\r\n\r\n param = {\r\n \"includeFields\": \"groupName,groupIcon,members,mainThread,joinedTimestamp,modifiedTimestamp,isFavorite,existsNewArrival,partySessions,\"\r\n \"notificationSetting\"\r\n }\r\n\r\n try:\r\n response: dict[str, Any] = self._request_builder.get(\r\n url=f\"{BASE_PATH['gaming_lounge']}{API_PATH['group_members'].format(group_id=self.group_id)}\",\r\n params=param,\r\n ).json()\r\n\r\n return response\r\n except PSNAWPNotFound as not_found:\r\n raise PSNAWPNotFound(f\"Group ID {self.group_id} does not exist.\") from not_found", "def getMembersByGroup(group):\n \n db = connect_db()#Connect to the user groups database\n cursor = db.cursor()#Create cursor object for searching the database table\n cursor.execute(\"SELECT usersInGroup FROM userGroups WHERE groupName=?\", (group,))#Search the database for the user group id and return the members of that group\n rows = cursor.fetchall()#Store the group members in a variable\n if len(rows) == 1:#If there is only one item then the group was found\n db.close()#Close the database connection\n return rows[0][0]#Return the group members\n elif (len(rows) == 0):#If there are no items then the group is not in the table\n db.close()#Close the database\n return False#IMPLEMENT: Need to return an error message\n else:#If anything other than the above two conditions are met then something has gone very wrong\n print(\"Something has gone wrong and there are multiple results in the table\")\n db.close()\n return#Need to report the issue for system maintaince", "def get_membership_data_by_id_get(self, membershipId, membershipType):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/User/GetMembershipsById/{membershipId}/{membershipType}/\"))", "def get_groups_for_member_get(self, filter, groupType, membershipId, membershipType):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/User/{membershipType}/{membershipId}/{filter}/{groupType}/\"))", "def getMembership(config, user):\r\n\r\n seen = set()\r\n for member_of in _getMembership(config, user, seen):\r\n yield member_of\r\n\r\n # everyone is always a member of group \"all\"\r\n yield 'all'", "def getMember(self, *args):\n return _libsbml.Group_getMember(self, *args)", "def get_membersof(self, kwargs):\n group = kwargs[\"group\"]\n verbose = kwargs.get(\"verbose\", False)\n\n results = list(self.engine.query(self.engine.GROUP_DN_FILTER(group), [\"distinguishedName\", \"objectSid\"]))\n if results:\n group_dn = results[0][\"distinguishedName\"]\n else:\n error(\"Group {group} does not exists\".format(group=group))\n\n primary_group_id = results[0][\"objectSid\"].split('-')[-1]\n results = self.engine.query(self.engine.ACCOUNTS_IN_GROUP_FILTER(primary_group_id, group_dn))\n self.display(results, verbose)", "def get_memberships(self):\n return UnitMembership.objects.filter(unit=self).select_related(\"user\")", "def get_members_of_group_get(self, currentpage, groupId, memberType, nameSearch):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/{groupId}/Members/\"))", "def get_user_group(self, user: User) -> Group:\n ...", "def get(person_group_id):\n url = 'persongroups/{}'.format(person_group_id)\n\n return util.request('GET', url)", "def users_in_group(self, group_id):\n users = []\n users = self._get(('user', 'group', str(group_id)))\n for user in users:\n if 'dreamdiary.diary.user' in user['saml_permissions']:\n users.append(user)\n return users", "def list_group_members(self, token, userGroup):\n requestUser = self.get_username_from_token(token)\n dataBase = self.read_database()\n if userGroup not in dataBase['userGroups']:\n raise GroupDoesNotExistException(\"User group does not exist\")\n\n if requestUser not in dataBase['userGroups'][userGroup]['owners']:\n raise UserPermissionException(\"User is not an owner of this group\")\n owners = dataBase['userGroups'][userGroup]['owners']\n members = dataBase['userGroups'][userGroup]['members']\n return {'owners':owners, 'members':members}", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n group_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n user_id: Optional[pulumi.Input[str]] = None) -> 'UserMembershipV3':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _UserMembershipV3State.__new__(_UserMembershipV3State)\n\n __props__.__dict__[\"group_id\"] = group_id\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"user_id\"] = user_id\n return UserMembershipV3(resource_name, opts=opts, __props__=__props__)", "def view_group(request, group_id):\n users = models.UserProfile.all().order('email')\n if group_id:\n group = models.UserGroup.get_by_id(int(group_id))\n if group.users:\n users = models.UserProfile.get(group.users)\n else:\n users = []\n return utility.respond(request, 'admin/view_group', {'users': users})", "def get(self):\n usergroup_node = graph.find_one(\"Usergroup\",\n property_key='id',\n property_value=self.id)\n return usergroup_node" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets details on a specified work request. The workRequestID is returned in the opcworkrequestid header for any asynchronous operation in the Identity and Access Management service.
def get_work_request(self, work_request_id, **kwargs): resource_path = "/workRequests/{workRequestId}" method = "GET" expected_kwargs = ["retry_strategy"] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "get_work_request got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "workRequestId": work_request_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="WorkRequest") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="WorkRequest")
[ "def doi_info(self,doi):\n \n doi = _clean_doi(doi)\n \n url = self.BASE_URL + 'works/' + doi\n \n try:\n return self._make_get_request(url,models.work_single)\n except errors.RequestError:\n #TODO: Check for 404\n #last_response.status_code\n #TODO: Do this only if debugging is enabled\n if self.debug:\n #TODO: Also report code\n print(\"Error msg from server: \" + self.last_response.text)\n raise errors.InvalidDOI('Invalid DOI requested: ' + doi)\n \n #return self._make_get_request(url,models.Work,kwargs)", "def get_work_order_detail_by_advance_search(self):\n self.ensure_one()\n return {\n \"name\": _(\"Work Order\"),\n \"view_type\": \"form\",\n \"view_mode\": \"tree,form\",\n \"res_model\": \"fleet.vehicle.log.services\",\n \"type\": \"ir.actions.act_window\",\n \"domain\": [(\"id\", \"=\", self.work_order_id.id)]\n if self.work_order_id\n else [],\n \"context\": self._context,\n \"target\": \"current\",\n }", "def export_getCurrentExecutionOrder(self,requestName):\n\n if type(requestName) in StringTypes:\n result = requestDB._getRequestAttribute('RequestID',requestName=requestName)\n if not result['OK']:\n return result\n requestID = result['Value']\n else:\n requestID = requestName\n\n result = requestDB.getCurrentExecutionOrder(requestID)\n return result", "def getwork(self, data=None):\n if data is None:\n # Only if no data provided, it returns a WorkItem\n return WorkItem(**self.proxy.getwork())\n else:\n return self.proxy.getwork(data)", "def send_announcement_get_work_request(self):\n self.analysis_id = uuid.uuid4().hex\n while True:\n self.announce_socket.send_json(((self.analysis_id, self.work_addr),))\n try:\n return self.awthread.recv(self.work_socket, 250)\n except six.moves.queue.Empty:\n continue", "def list_work_requests(self, compartment_id, **kwargs):\n resource_path = \"/workRequests\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"resource_identifier\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_work_requests got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"resourceIdentifier\": kwargs.get(\"resource_identifier\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[WorkRequestSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[WorkRequestSummary]\")", "def work_order_receipt_retrieve(self, work_order_id, id=None):\n pass", "def request_id(self):\n return self._request_id", "def read(self, request, wfrun_id=None, *args, **kwargs):\n try:\n client_address = request.META['REMOTE_ADDR']\n ticket = request.META.get('HTTP_MI_TICKET', '')\n if ticket:\n try:\n user, tkt64 = authenticate(ticket=ticket, cip=client_address)\n except Exception, e:\n pass #return rc.FORBIDDEN\n else:\n return rc.FORBIDDEN\n if wfrun_id:\n taverna_execution = TavernaExecution.objects.get(pk=wfrun_id, ticket=ticket)\n keys = ['executionstatus', 'error', 'error_msg', 'workflowId', 'endpoint', 'asConfigId', 'expiry', 'startTime', 'Finished', 'exitcode', 'stdout', 'stderr', 'outputfolder', 'is_running', 'output']\n results = {}\n for key in keys:\n results[key] = getattr(taverna_execution, key)\n return results\n return rc.BAD_REQUEST\n except Exception, e:\n client.captureException()\n return rc.INTERNAL_ERROR", "def get_request_details(_irid):\r\n # Regex to check that entered value is digits separated by -\r\n if not bool(re.match(r\"^\\d+-\\d+$\", _irid)):\r\n sys.exit(\"Interpretation request ID doesn't match the format 11111-1, please check entry\")\r\n else:\r\n # If correctly formatted split interpretation_request on '-' and allocate to request_id, request_version\r\n request_id, request_version = _irid.split('-')\r\n return request_id, request_version", "def get_current_workfile_context():\n return get_workfile_metadata(SECTION_NAME_CONTEXT, {})", "def work_order_get_result(self, wo_submit):\n\n wo_getresult_request_json = wconfig.read_config(\n env['work_order_getresult_input_file'], \"\")\n\n configure_data_output = wconfig.configure_data(\n self.action_obj, input_json=wo_getresult_request_json,\n method_name=\"WorkOrderGetResult\",\n pre_test_response=wo_submit)\n\n # submit work order get result request and retrieve response\n\n if env['test_mode'] == env['listener_string']:\n get_result_response = submit_request_listener(\n env['uri_client'], configure_data_output)\n else:\n get_result_response = workorder_getresult_sdk(\n configure_data_output, wo_getresult_request_json)\n logger.info(\"Work order get result : {}\\n \".format(\n json.dumps(get_result_response, indent=4)\n ))\n return get_result_response", "def get_review_request(self, rid):\r\n rsp = self.api_call('api/review-requests/%s/' % rid)\r\n return rsp['review_request']", "def req_id(self) -> str:\n pass", "def RetrieveWorkerInCapability(**argd):\n checkSign = argd[\"nsid\"] + \",\" + argd[\"renid\"]\n token = EncryptUtil.DecodeURLSafeBase64(argd[\"token\"])\n try:\n tokenRet = EncryptUtil.VerifySign(checkSign, token, GlobalConfigContext.AUTH_NameService_PublicKey)\n except:\n tokenRet = False\n if tokenRet is False:\n return CGateway._UnauthorizedServiceResponse(token)\n flag1, ret1 = CGateway.core.RetrieveHumanWithCapability(GlobalConfigContext.AUTH_INTERNAL_SESSION, argd[\"capabilityName\"])\n flag2, ret2 = CGateway.core.RetrieveAgentWithCapability(GlobalConfigContext.AUTH_INTERNAL_SESSION, argd[\"capabilityName\"])\n return CGateway._DumpResponse(ret1 + ret2)", "def work_order_receipt_retrieve(self, work_order_id, id=None):\n if work_order_id is None or not is_hex(work_order_id):\n logging.error(\"Work order id is empty or Invalid\")\n return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER,\n \"Worker id is empty or Invalid\")\n\n json_rpc_request = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"WorkOrderReceiptRetrieve\",\n \"id\": id,\n \"params\": {\n \"workOrderId\": work_order_id\n }\n }\n response = self.__uri_client._postmsg(json.dumps(json_rpc_request))\n return response", "def get_intake_detail(request, intake_csid):\n return handle_request(request, 'cspace-services/intakes/%s' % intake_csid)", "def _request_metadata(self):\n request_metadata = remote_execution_pb2.RequestMetadata()\n request_metadata.tool_details.tool_name = REQUEST_METADATA_TOOL_NAME\n request_metadata.tool_details.tool_version = REQUEST_METADATA_TOOL_VERSION\n\n if self._action_id:\n request_metadata.action_id = self._action_id\n if self._tool_invocation_id:\n request_metadata.tool_invocation_id = self._tool_invocation_id\n if self._correlated_invocations_id:\n request_metadata.correlated_invocations_id = self._correlated_invocations_id\n\n return request_metadata.SerializeToString()", "def worker_retrieve(self, lookup_response):\n worker_obj = worker.SGXWorkerDetails()\n configure_data_output = wconfig.configure_data(\n self.action_obj, input_json=None,\n pre_test_response=lookup_response,\n method_name=\"WorkerRetrieve\")\n logger.info('*****Worker details Updated with Worker ID***** \\\n \\n%s\\n', configure_data_output)\n if env['test_mode'] == env['listener_string']:\n retrieve_response = submit_request_listener(\n env['uri_client'], configure_data_output)\n worker_obj.load_worker(retrieve_response['result']['details'])\n retrieve_response['workerId'] = \\\n configure_data_output[\"params\"][\"workerId\"]\n else:\n retrieve_response = worker_retrieve_sdk(configure_data_output)\n\n logger.info(\"Worker Retrieved : {%s}\\n \", retrieve_response)\n return retrieve_response" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the API signing keys for the specified user. A user can have a maximum of three keys. Every user has permission to use this API call for their own user ID. An administrator in your organization does not need to write a policy to give users this ability.
def list_api_keys(self, user_id, **kwargs): resource_path = "/users/{userId}/apiKeys" method = "GET" expected_kwargs = ["retry_strategy"] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_api_keys got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="list[ApiKey]") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="list[ApiKey]")
[ "def get_keys_from_user_id(self, user_id: int) -> list:\n path = os.path.join(self.api_address, 'users/{}/keys'.format(user_id))\n response = self.process_response_from_server(path)\n keys = [i[\"key\"] for i in response]\n return keys", "def get_keys(user_id):\n\n db_conn = sqlite3.connect(db_path)\n db = db_conn.cursor()\n keys = []\n try:\n for row in db.execute(\"SELECT public_key FROM public_keys WHERE username=? AND status=?\", [user_id, PK_STATUS_OK]):\n keys.append({\"public\": row[0]})\n db_conn.close()\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n if(keys == []):\n abort(404)\n return jsonify({'user':{'username':user_id, 'keys':keys}})", "def get_ssh_keys(self, user_id):\n _gu = self.get_user(user_id)\n if _gu is None:\n return []\n\n # build URL and make request\n return self._get('/users/{0}/keys'.format(_gu['id']))", "def list_keypairs(self, provider, user):\n self.check_auth(provider, user)\n user_info = self.clients[provider].get_or_create_user(user.username)\n return user_info.keys", "def list_user_keys(self):\n return AlgoliaUtils_request(self.headers, self.read_hosts, \"GET\", \"/1/keys\", self.timeout)", "def get_keys(self, user):\n try:\n ref = self.store.lookup_reference(user.keys_ref)\n assert ref.oid in self.store, \"Invalid user keys ref\"\n return PackedKeys(self.store[ref.oid].read_raw())\n except KeyError:\n raise NoKeysRef(user.name)", "def key_pair_get_all_by_user(self, context, user_id):\n return IMPL.key_pair_get_all_by_user(context, user_id)", "def key_get_user_keys(username):\n\n token = token_by_header_data(request.headers.get(\"X-Keydom-Session\"))\n\n if not token:\n req_user = None\n else:\n req_user = token.for_user\n\n if token is not None and token.has_expired:\n resp = routing.base.generate_error_response(code=403)\n resp[\"message\"] = \"Authentication token has expired. Request another.\"\n return json.dumps(resp) + \"\\n\"\n\n user = User.get(username=username)\n scope = Key.VIS_PUB # Default to lowest permission scope.\n if token and user.is_friends(req_user):\n scope = Key.VIS_PRIV\n elif user == req_user:\n scope = Key.VIS_SELF\n else:\n scope = Key.VIS_PUB\n\n user_keys = user.scoped_keys(scope)\n\n resp = routing.base.generate_base_response()\n resp[\"keys\"] = []\n resp[\"owner\"] = {\n \"username\": user.username,\n \"scope\": scope,\n }\n\n for key in user_keys:\n resp[\"keys\"].append({\n \"short_name\": key.short_name,\n \"key\": key.content,\n \"fingerprint\": key.fingerprint(),\n \"published\": str(key.published_at),\n })\n\n return json.dumps(resp) + \"\\n\"", "def key_get_keys():\n\n token = token_by_header_data(request.headers.get(\"X-Keydom-Session\"))\n\n if not token:\n resp = routing.base.generate_error_response(code=401)\n resp[\"message\"] = \"Invalid authentication token.\"\n return json.dumps(resp) + \"\\n\"\n\n if token.has_expired:\n resp = routing.base.generate_error_response(code=403)\n resp[\"message\"] = \"Authentication token has expired. Request another.\"\n return json.dumps(resp) + \"\\n\"\n\n user = token.for_user\n user_keys = user.scoped_keys(scope=Key.VIS_SELF)\n\n resp = routing.base.generate_bare_response()\n resp[\"keys\"] = []\n resp[\"user\"] = {\n \"username\": user.username,\n }\n\n for key in user_keys:\n resp[\"keys\"].append({\n \"short_name\": key.short_name,\n \"key\": key.content,\n \"fingerprint\": key.fingerprint(),\n \"published\": str(key.published_at),\n })\n\n return json.dumps(resp) + \"\\n\"", "def list_user_keys(self):\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/keys\" % self.url_index_name, self.client.timeout)", "def list_customer_secret_keys(self, user_id, **kwargs):\n resource_path = \"/users/{userId}/customerSecretKeys\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_customer_secret_keys got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"list[CustomerSecretKeySummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"list[CustomerSecretKeySummary]\")", "def api_keys(self) -> List[ApiKey]:\n api_keys = self.all_api_keys()\n my_api_keys = []\n\n if api_keys[\"id\"] == self.id:\n # This function was called on the authenticated user\n my_api_keys = api_keys[\"keys\"]\n else:\n # This function was called on a child user (authenticated as parent, only return\n # this child user's details).\n for child in api_keys[\"children\"]:\n if child.id == self.id:\n my_api_keys = child.keys\n break\n\n return my_api_keys", "def apikeys(request):\n display = get_boolean_value(request.GET.get('display', False))\n\n return render(request, 'gui/profile/profile_api_keys_list.html', {\n 'user': request.user,\n 'display_keys': display\n })", "def get_user_secret_keys(self):\n log.info('Getting all secrets for current api user (me)')\n return self.conn.get(url='object/secret-keys')", "def get_user_signatures(user_id):\n signatures = Signature().get_signatures_by_reference(str(user_id), 'user')\n return [signature.to_dict() for signature in signatures]", "def dc_user_profile_apikeys(request, username):\n user = get_edited_user(request, username)\n display = get_boolean_value(request.GET.get('display', False))\n\n return render(request, 'gui/profile/profile_api_keys_list.html', {\n 'user': user,\n 'display_keys': display\n })", "def get_s3_keys(bucket, user_keys = None):\n keys = []\n if user_keys is None:\n \t\t\t\ts3 = boto3.client('s3')\n else:\n s3 = boto3.client('s3', \n aws_access_key_id = user_keys[\"AWS_ACCESS_KEY_ID\"], \n aws_secret_access_key = user_keys[\"AWS_SECRET_ACCESS_KEY\"], \n region_name = user_keys[\"REGION_NAME\"]\n ) \t \n \n resp = s3.list_objects_v2(Bucket= bucket)\n for obj in resp['Contents']:\n keys.append(obj['Key'])\n return keys", "def get_api_keys(owner):\n api.get_all(owner)", "def test_get_user_api_keys(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the availability domains in your tenancy. Specify the OCID of either the tenancy or another of your compartments as the value for the compartment ID (remember that the tenancy is simply the root compartment). See `Where to Get the Tenancy's OCID and User's OCID`__. Note that the order of the results returned can change if availability domains are added or removed; therefore, do not create a dependency on the list order.
def list_availability_domains(self, compartment_id, **kwargs): resource_path = "/availabilityDomains" method = "GET" expected_kwargs = ["retry_strategy"] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_availability_domains got unknown kwargs: {!r}".format(extra_kwargs)) query_params = { "compartmentId": compartment_id } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[AvailabilityDomain]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[AvailabilityDomain]")
[ "def get_availability_domains(identity_client, compartment_id):\n result = pagination.list_call_get_all_results(\n identity_client.list_availability_domains,\n compartment_id\n )\n return result.data", "def select_availability_domain(config_dict, compartment_id):\n try:\n oci_identity = oci.identity.IdentityClient(config_dict)\n oci_availability_domains = oci_identity.list_availability_domains(compartment_id)\n except oci.exceptions.ServiceError as e:\n print_g('*** AUTHORISATION ERROR ***')\n _logger.error('Authorisation error', exc_info=True)\n sys.exit(1)\n except Exception as e:\n print_g('*** ERROR *** %s' % str(e))\n _logger.error('ERROR %s', str(e), exc_info=True)\n sys.exit(1)\n for domain in oci_availability_domains.data:\n print_g('%4d %-30s %s' % (oci_availability_domains.data.index(domain), domain.name, domain.id))\n return _select_from(oci_availability_domains.data, 'Select availability domain.')", "def list_agencies():\n domainHandler = DomainHandler()\n return domainHandler.list_agencies()", "def get_domains(self):\n return self.rest_helper(\"/domains.json\")", "def list_fault_domains(self, compartment_id, availability_domain, **kwargs):\n resource_path = \"/faultDomains\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_fault_domains got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"availabilityDomain\": availability_domain\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[FaultDomain]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[FaultDomain]\")", "def get_tenant_list(conn: dict) -> dict:\n return get(conn, PCC_TENANT + \"/list\")", "def scan_tenancy(config: OCIConfig):\n compartment_list(config)\n resource_list(config)\n logging.info('{}'.format(pformat(config.compartments_tree)))", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def get_domain_list(self):\n\t\tnow = datetime.datetime.now()\n\t\texp_from = \"%04d-%02d-%02d\" % (now.year-1, now.month, now.day)\n\t\texp_to = \"%04d-%02d-%02d\" % (now.year+15, now.month, now.day)\n\t\tdomain_list = []\n\t\tfor extension in ['.com', '.au']:\n\t\t\tresult = self.query('get_domains_by_expiredate', 'domain', { 'exp_from' : exp_from, 'exp_to' : exp_to, 'limit' : 100000, 'page' : 1, 'domain': extension})\n\t\t\t[ domain_list.append(x['name']) for x in result['attributes']['exp_domains'] ]\n\t\treturn domain_list", "def case_search_enabled_domains():\n return CaseSearchConfig.objects.filter(enabled=True).values_list('domain', flat=True)", "def list_compartments(self, compartment_id, **kwargs):\n resource_path = \"/compartments\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"access_level\",\n \"compartment_id_in_subtree\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_compartments got unknown kwargs: {!r}\".format(extra_kwargs))\n\n if 'access_level' in kwargs:\n access_level_allowed_values = [\"ANY\", \"ACCESSIBLE\"]\n if kwargs['access_level'] not in access_level_allowed_values:\n raise ValueError(\n \"Invalid value for `access_level`, must be one of {0}\".format(access_level_allowed_values)\n )\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"accessLevel\": kwargs.get(\"access_level\", missing),\n \"compartmentIdInSubtree\": kwargs.get(\"compartment_id_in_subtree\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Compartment]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Compartment]\")", "def domain_list(self) -> 'outputs.GetReposRepoDomainListResult':\n return pulumi.get(self, \"domain_list\")", "def list_availability_zones(self):\n uri = 'os-availability-zone'\n resp, body = self.get(uri)\n self.expected_success(200, resp.status)\n return self._parse_resp(body)", "def listDomains(self):\n reply = self.rpc.getDomains(self.username,\n self.password)\n if reply[0] == 'UNKNOWN_ERROR':\n raise Exception(\"RPC returned error: \" + reply[0])\n return reply", "def list_zones(self, **kwargs):\n return self.client['Account'].getDomains(**kwargs)", "def availability_domain(self):\n return self._availability_domain", "async def get_organizations(request: Request):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n return [org for org in organizations_obj]", "def get_domains(self):\n\n response = self.call(method='getDomains')\n domains = []\n for d in response:\n domain = self.domain(domain=d['domain'])\n domains.append(domain)\n return domains", "def get_domains() -> List[str]:\n ret = _call_endpoint(\"v1/domains\")\n # Example response:\n # [{'createdAt': '2016-06-25T03:08:44.000Z',\n # 'domain': 'mydomain.com',\n # 'domainId': 12345678,\n # 'expirationProtected': False,\n # 'expires': '2020-06-25T03:08:44.000Z',\n # 'holdRegistrar': False,\n # 'locked': True,\n # 'nameServers': None,\n # 'privacy': False,\n # 'renewAuto': True,\n # 'renewDeadline': '2020-08-09T03:08:44.000Z',\n # 'renewable': True,\n # 'status': 'ACTIVE',\n # 'transferProtected': False},]\n domains = [d[\"domain\"] for d in ret]\n return domains" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the compartments in a specified compartment. The members of the list returned depends on the values set for several parameters. With the exception of the tenancy (root compartment), the ListCompartments operation returns only the firstlevel child compartments in the parent compartment specified in `compartmentId`. The list does not include any subcompartments of the child compartments (grandchildren). The parameter `accessLevel` specifies whether to return only those compartments for which the requestor has INSPECT permissions on at least one resource directly or indirectly (the resource can be in a subcompartment). The parameter `compartmentIdInSubtree` applies only when you perform ListCompartments on the tenancy (root compartment). When set to true, the entire hierarchy of compartments can be returned. To get a full list of all compartments and subcompartments in the tenancy (root compartment), set the parameter `compartmentIdInSubtree` to true and `accessLevel` to ANY. See `Where to Get the Tenancy's OCID and User's OCID`__.
def list_compartments(self, compartment_id, **kwargs): resource_path = "/compartments" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "page", "limit", "access_level", "compartment_id_in_subtree" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_compartments got unknown kwargs: {!r}".format(extra_kwargs)) if 'access_level' in kwargs: access_level_allowed_values = ["ANY", "ACCESSIBLE"] if kwargs['access_level'] not in access_level_allowed_values: raise ValueError( "Invalid value for `access_level`, must be one of {0}".format(access_level_allowed_values) ) query_params = { "compartmentId": compartment_id, "page": kwargs.get("page", missing), "limit": kwargs.get("limit", missing), "accessLevel": kwargs.get("access_level", missing), "compartmentIdInSubtree": kwargs.get("compartment_id_in_subtree", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[Compartment]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[Compartment]")
[ "def getListOfCompartments(self):\n return self.model.getListOfCompartments()", "def compartment_tree_build(conf: OCIConfig):\n global identity_client\n identity_client = oci.identity.IdentityClient(conf.config)\n #get_regions(conf)\n tree = []\n\n def _get_nested_resources(api_list_call: identity_client.list_compartments, id: str, tree: []):\n\n elems = oci.pagination.list_call_get_all_results(api_list_call, id,compartment_id_in_subtree=False)\n for item in elems.data:\n compartment = OciCompartment(item, identity_client)\n if (conf.preserve_compartments and compartment.name in conf.preserve_compartments or\n (conf.skip_scan_preserved_resources and compartment.check_tags(conf.preserve_tags))):\n continue\n if not compartment.is_active():\n continue\n _get_nested_resources(api_list_call, compartment.id, compartment)\n tree.append(compartment)\n\n _get_nested_resources(identity_client.list_compartments, conf.tenancy, tree)\n\n return tree", "def getListOfCompartments(self, *args):\n return _libsbml.Model_getListOfCompartments(self, *args)", "def select_compartment(config_dict, prompt):\n try:\n oci_identity = oci.identity.IdentityClient(config_dict)\n oci_compartments = oci_identity.list_compartments(config_dict['tenancy'])\n except oci.exceptions.ServiceError as e:\n print_g('*** AUTHORISATION ERROR ***')\n sys.exit(1)\n except Exception as e:\n print_g('*** ERROR *** %s' % str(e))\n _logger.error('ERROR %s', str(e), exc_info=True)\n sys.exit(1)\n for comp in oci_compartments.data:\n print_g('%4d %-30s %s' % (oci_compartments.data.index(comp), comp.name, comp.id))\n return _select_from(oci_compartments.data, prompt)", "def get_compartment(self, compartment_id, **kwargs):\n resource_path = \"/compartments/{compartmentId}\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"get_compartment got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"compartmentId\": compartment_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"Compartment\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"Compartment\")", "def getListOfCompartmentReferences(self, *args):\n return _libsbml.MultiCompartmentPlugin_getListOfCompartmentReferences(self, *args)", "def resource_list(conf: OCIConfig):\n def _retrieve_resources_in_compartment(tree, region, traverse_level=1, scan_resources=False): \n logging.info('{} {}'.format('__'*traverse_level, tree['name']))\n items = tree.get(R.COMPARTMENT)\n for nested_item in [] if not items else items:\n traverse_level += 1\n scan = scan_resources or not bool(conf.compartment_filter) or nested_item.name in conf.compartment_filter\n _retrieve_resources_in_compartment(nested_item, region, traverse_level, scan_resources=scan)\n traverse_level -= 1\n if scan_resources:\n _get_network_resources(tree, conf)\n _get_bv_resources(tree, conf)\n _get_instance_resources(tree, conf)\n _get_lb_resources(tree, conf)\n _get_db_resources(tree, conf)\n _get_autonomous_resources(tree, conf)\n\n for r in conf.compartments_tree.keys():\n # logging.info(r)\n conf.workon_region = r\n logging.info(\"Resource discovery - visit compartments in {} region\".format(r))\n _init_api_client(conf)\n\n # bv_client.list_volumes('').data\n for tree in conf.compartments_tree[r]:\n scan = not bool(conf.compartment_filter) or tree.name in conf.compartment_filter\n _retrieve_resources_in_compartment(tree, r, scan_resources=scan)", "def getListOfCompartmentTypes(self, *args):\n return _libsbml.Model_getListOfCompartmentTypes(self, *args)", "def getCompartment(self, *args):\n return _libsbml.Model_getCompartment(self, *args)", "def _update_compounds_compartments(self, reaction):\n # assumes SBMLReaction\n for cmpd in reaction.compounds_iter():\n self.compounds.add(cmpd)\n if hasattr(cmpd, \"compartment\"):\n self.compartments.add(cmpd.compartment)", "def list_policies(self, compartment_id, **kwargs):\n resource_path = \"/policies\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_policies got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Policy]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Policy]\")", "def getCompartment(self):\n return _libsbml.CompartmentReference_getCompartment(self)", "def test_comp_show_compartment(self):\n try:\n help_data = subprocess.check_output([self.iscsi_config_path, '--show']).decode('utf-8').splitlines()\n _show_res('Show compartment compatibility', help_data)\n help_data = subprocess.check_output([self.iscsi_config_path, '--show',\n '--all']).decode('utf-8').splitlines()\n _show_res('Show compartment compatibility', help_data)\n help_data = subprocess.check_output([self.iscsi_config_path, '--show',\n '--compartment', self.compartment_name]).decode('utf-8').splitlines()\n _show_res('Show compartment compatibility', help_data)\n except Exception as e:\n self.fail('oci-iscsi-config --show --compartment <name> has failed: %s' % str(e))", "def get(self, *args):\n return _libsbml.ListOfCompartmentReferences_get(self, *args)", "def get_compounds(self, ctx, params):\n # ctx is the context object\n # return variables are: out_compounds\n #BEGIN get_compounds\n self._check_param(params, ['compounds'])\n out_compounds = []\n for x in params['compounds']:\n id = x.split('/')[-1]\n comp = self.compounds.get(id, None)\n if comp:\n comp['aliases'] = self.comp_aliases.get(id, '')\n out_compounds.append(comp)\n #END get_compounds\n\n # At some point might do deeper type checking...\n if not isinstance(out_compounds, list):\n raise ValueError('Method get_compounds return value ' +\n 'out_compounds is not type list as required.')\n # return the results\n return [out_compounds]", "def call_list_components(self, env_name):\n\n env = self.get_environment(env_name)\n return env.get_components()", "def get_compartment_id(oci_cfg, compartment_name):\n identity_client = identity.IdentityClient(oci_cfg)\n result = pagination.list_call_get_all_results(\n identity_client.list_compartments,\n cfg[\"tenancy\"],\n compartment_id_in_subtree=True,\n access_level=\"ACCESSIBLE\",\n )\n for c in result.data:\n if compartment_name == c.name:\n return c\n raise Exception(\"Compartment not found.\")", "def get_metabolites_by_compartment(self, c_id):\n\n assert c_id in list(self.compartments.keys()), 'No such compartment: ' + c_id\n\n return [m_id for m_id, met in self.metabolites.items() if met.compartment == c_id]", "def list(self):\r\n data_path = \"%s/collaborations\" % self._basepath\r\n params = {\"f\" : \"json\",\r\n \"num\":100,\r\n 'start' : 1}\r\n res = self._portal.con.get(data_path, params)\r\n collabs = []\r\n while len(res['collaborations']) > 0:\r\n for collab in res['collaborations']:\r\n collabs.append(Collaboration(collab_manager=self,\r\n collab_id=collab['id']))\r\n res = self._portal.con.get(data_path, params)\r\n params['start'] = res['nextStart']\r\n if res['nextStart'] == -1:\r\n return collabs\r\n return collabs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists all the tags enabled for costtracking in the specified tenancy. For information about costtracking tags, see `Using Costtracking Tags`__.
def list_cost_tracking_tags(self, compartment_id, **kwargs): resource_path = "/tagNamespaces/actions/listCostTrackingTags" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "page", "limit" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_cost_tracking_tags got unknown kwargs: {!r}".format(extra_kwargs)) query_params = { "compartmentId": compartment_id, "page": kwargs.get("page", missing), "limit": kwargs.get("limit", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[Tag]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[Tag]")
[ "def getAllTags(self):", "def list_tags():\n r = requests.get('http://api.greynoise.io:8888/v1/query/list')\n if r.status_code == 200:\n if 'tags' in r.json():\n return r.json()['tags']\n else:\n print(\"No tags found.\")\n else:\n return {}", "def tag_list(request, template_name='objectapp/tag_list.html'):\n tag_list = Tag.objects.usage_for_queryset(\n Gbobject.published.all(), counts=True)\n return render_to_response(template_name, {'object_list': tag_list},\n context_instance=RequestContext(request))", "def list_tags():\r\n tags = Tag.query.order_by(Tag.name).all()\r\n return render_template('tags.html', tags=tags)", "def showalltags(ctx):\n quotefile = ctx.obj['QUOTEFILE']\n\n tags = api.read_tags(quotefile)\n for tag in tags:\n print(tag)", "def get_tags_list(*args, **kwargs):\n return Tag.objects.active()", "def list_tags():\n\n tags = Tag.query.all()\n return render_template('tags/list_tags.html', tags=tags)", "def list_all_tags(self,obs):", "def get_all_tag_badges():\n return _site.fetch(\"badges/tags\", \"badges\")", "def getTagList(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALBehaviorManager\")\n return self.proxy.getTagList()", "def getTags(number=None):", "def list(self):\n\n\t\treturn self._list(\"/tag\", \"tag\")", "def list(self):\n return self._list(url='/tags', response_key='data')", "def get_all_tags_with_weights(self):\n return list(TagCourseEntity.objects.filter(courseentity=self))", "def handle_tags(self, request):\n \"\"\"\n @api {get} /tags List tags\n @apiName GetTags\n @apiGroup Misc\n @apiVersion 1.0.0\n\n @apiDescription List currenty used tags\n\n @apiSuccessExample {json} Example response:\n [\n \"tag1\",\n \"tag2\"\n ]\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n tags = []\n\n for task in self.cluster.config.get('tasks').values():\n if 'tags' in task:\n tags += task['tags']\n\n tags = list(set(tags))\n\n return HTTPReply(code = 200, body = json.dumps(tags), headers = headers)", "def tags(self):\r\n return self.backend.all_tags()", "def show_tags():\n tags = Tag.query.order_by(Tag.name).all()\n return render_template('all_tags.html', tags=tags)", "def get_pool_tags():\n\t\n\tpooltags = dump_pool_tags.get_all_pooltags()\n\tprint(\"The following pooltags are in use by the binary: \")\n\tprint(pooltags)", "def getTags(self):\n return []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the secret keys for the specified user. The returned object contains the secret key's OCID, but not the secret key itself. The actual secret key is returned only upon creation.
def list_customer_secret_keys(self, user_id, **kwargs): resource_path = "/users/{userId}/customerSecretKeys" method = "GET" expected_kwargs = ["retry_strategy"] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_customer_secret_keys got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="list[CustomerSecretKeySummary]") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="list[CustomerSecretKeySummary]")
[ "def get_user_secret_keys(self):\n log.info('Getting all secrets for current api user (me)')\n return self.conn.get(url='object/secret-keys')", "def get_keys_from_user_id(self, user_id: int) -> list:\n path = os.path.join(self.api_address, 'users/{}/keys'.format(user_id))\n response = self.process_response_from_server(path)\n keys = [i[\"key\"] for i in response]\n return keys", "def key_pair_get_all_by_user(self, context, user_id):\n return IMPL.key_pair_get_all_by_user(context, user_id)", "def list_keypairs(self, provider, user):\n self.check_auth(provider, user)\n user_info = self.clients[provider].get_or_create_user(user.username)\n return user_info.keys", "def get_ssh_keys(self, user_id):\n _gu = self.get_user(user_id)\n if _gu is None:\n return []\n\n # build URL and make request\n return self._get('/users/{0}/keys'.format(_gu['id']))", "def list_secret_kvs(self, path):\n secret_path = f\"secret/data/{path}\"\n secret = self.client.read(path=secret_path)\n\n # ignore deleted/destroyed secrets\n if secret is not None:\n return secret['data']['data']", "def get_keys(user_id):\n\n db_conn = sqlite3.connect(db_path)\n db = db_conn.cursor()\n keys = []\n try:\n for row in db.execute(\"SELECT public_key FROM public_keys WHERE username=? AND status=?\", [user_id, PK_STATUS_OK]):\n keys.append({\"public\": row[0]})\n db_conn.close()\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n if(keys == []):\n abort(404)\n return jsonify({'user':{'username':user_id, 'keys':keys}})", "def list_credentials(user):\n return Credentials.list_credentials(user)", "def secrets(self):\n return k8s_object.ListAsDictionaryWrapper(\n self._volumes,\n self._volume_class,\n value_field='secret',\n filter_func=lambda volume: volume.secret is not None)", "def list_user_keys(self):\n return AlgoliaUtils_request(self.headers, self.read_hosts, \"GET\", \"/1/keys\", self.timeout)", "def ListSecrets(self):\r\n return self._secrets.keys()", "def secrets(self):\n return k8s_object.ListAsDictionaryWrapper(\n self._m,\n self._item_class,\n key_field=self._key_field,\n value_field=self._value_field,\n filter_func=lambda mount: mount.name in self._volumes.secrets)", "def GetSecretKey(cls, user_id):\n uid = hashlib.sha256(str(user_id)).hexdigest()\n entity = ndb.Key(cls, uid).get()\n if not entity:\n entity = cls(id=uid, secret_key=GenerateRandomHexKey())\n entity.put()\n return entity.secret_key", "def _all_secrets(cls, *, secretsmanager_client):\n return secretsmanager_client.list_secrets()['SecretList']", "def list_api_keys(self, user_id, **kwargs):\n resource_path = \"/users/{userId}/apiKeys\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_api_keys got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"list[ApiKey]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"list[ApiKey]\")", "def keys(self):\n return [s for s in self._secrets.keys()]", "def get_keys(self, user):\n try:\n ref = self.store.lookup_reference(user.keys_ref)\n assert ref.oid in self.store, \"Invalid user keys ref\"\n return PackedKeys(self.store[ref.oid].read_raw())\n except KeyError:\n raise NoKeysRef(user.name)", "def key_get_user_keys(username):\n\n token = token_by_header_data(request.headers.get(\"X-Keydom-Session\"))\n\n if not token:\n req_user = None\n else:\n req_user = token.for_user\n\n if token is not None and token.has_expired:\n resp = routing.base.generate_error_response(code=403)\n resp[\"message\"] = \"Authentication token has expired. Request another.\"\n return json.dumps(resp) + \"\\n\"\n\n user = User.get(username=username)\n scope = Key.VIS_PUB # Default to lowest permission scope.\n if token and user.is_friends(req_user):\n scope = Key.VIS_PRIV\n elif user == req_user:\n scope = Key.VIS_SELF\n else:\n scope = Key.VIS_PUB\n\n user_keys = user.scoped_keys(scope)\n\n resp = routing.base.generate_base_response()\n resp[\"keys\"] = []\n resp[\"owner\"] = {\n \"username\": user.username,\n \"scope\": scope,\n }\n\n for key in user_keys:\n resp[\"keys\"].append({\n \"short_name\": key.short_name,\n \"key\": key.content,\n \"fingerprint\": key.fingerprint(),\n \"published\": str(key.published_at),\n })\n\n return json.dumps(resp) + \"\\n\"", "def list_user_keys(self):\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/keys\" % self.url_index_name, self.client.timeout)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the dynamic groups in your tenancy. You must specify your tenancy's OCID as the value for the compartment ID (remember that the tenancy is simply the root compartment). See `Where to Get the Tenancy's OCID and User's OCID`__.
def list_dynamic_groups(self, compartment_id, **kwargs): resource_path = "/dynamicGroups" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "page", "limit" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_dynamic_groups got unknown kwargs: {!r}".format(extra_kwargs)) query_params = { "compartmentId": compartment_id, "page": kwargs.get("page", missing), "limit": kwargs.get("limit", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[DynamicGroup]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[DynamicGroup]")
[ "def list(request):\n return render_to_response('rteacher/manage_groups_list.html', request, **klist(\n request=request\n ))", "def list_groups(self):\n pass", "def list_groups(request):\n return Group.objects.all()", "def list_groups():\n init_dao(env('client_id'), env('client_secret'), env('tenant_id'))\n return Response(get_all_groups(r.args.get('since'),r.args), content_type=CT)", "def list_ad_groups(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def list_groups(access_token):\n request_url = OKTA_URL + \"api/v1/groups\"\n headers = {\"Authorization\": \"Bearer \" + access_token}\n group_request = requests.get(request_url, headers=headers).json()\n return group_request", "def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)", "def show_group_list(request):\n group_list = request.user.groups.all()\n # construct dictionary for rendering\n display_groups = {}\n for group in group_list:\n # only display the 'real' group\n group_info = group.groupinfo\n if group_info.real_flag:\n display_groups[group_info.id] = group_info.name\n return render(request, \n 'group_info/group_list_page.html',\n {'display_groups': display_groups})", "def ObjectGroups(object_id):\n rhino_object = rhutil.coercerhinoobject(object_id, True, True)\n if rhino_object.GroupCount<1: return []\n group_indices = rhino_object.GetGroupList()\n rc = [scriptcontext.doc.Groups.GroupName(index) for index in group_indices]\n return rc", "def product_group_list(obj):\n client = get_client(obj)\n\n res = client.product_group_list()\n\n print(json.dumps(res, indent=4))", "def get(self, request):\n result = dc_manager.list_subcloud_groups(request)\n return {'items': [scg.to_dict() for scg in result]}", "def list_groups(request):\n groups = models.UserGroup.all().order('name')\n return utility.respond(request, 'admin/list_groups', {'groups': groups})", "def get_groups(self, obj):\n groupsForCompany = get_groups_with_perms(obj)\n return [x.id for x in groupsForCompany]", "def freshservice_agent_group_list(\n self,\n page: int = None,\n page_size: int = None,\n entity_id_value: int = None,\n ) -> Dict[str, Any]:\n params = remove_empty_elements({'page': page, 'per_page': page_size})\n\n return self._http_request(\n 'GET',\n f'api/v2/groups{get_url_suffix(entity_id_value)}',\n params=params)", "def list_template_groups(context):\n template_groups = get_oneoffixx_template_groups()\n terms = []\n for group in template_groups:\n terms.append(SimpleVocabulary.createTerm(group.get(\"id\"),\n group.get(\"id\"),\n group.get(\"localizedName\")))\n return MutableObjectVocabulary(terms)", "def list_product_groups(request):\n if login_required_if_login_only_mode(request):\n return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))\n\n return render(request, \"productdb/product_group/list-product_groups.html\", context={})", "def create_dynamic_group(data=None):\n return connector.SCIMGroup(\n displayName='display_name{0}'.format(uuid.uuid4()))", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def get_list_groups(self):\n list_response = requests.get(self.groups_url, headers=self.headers)\n return list_response.json()[\"groups\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the Fault Domains in your tenancy. Specify the OCID of either the tenancy or another of your compartments as the value for the compartment ID (remember that the tenancy is simply the root compartment). See `Where to Get the Tenancy's OCID and User's OCID`__.
def list_fault_domains(self, compartment_id, availability_domain, **kwargs): resource_path = "/faultDomains" method = "GET" expected_kwargs = ["retry_strategy"] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_fault_domains got unknown kwargs: {!r}".format(extra_kwargs)) query_params = { "compartmentId": compartment_id, "availabilityDomain": availability_domain } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[FaultDomain]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[FaultDomain]")
[ "def get_tenant_list(conn: dict) -> dict:\n return get(conn, PCC_TENANT + \"/list\")", "def get_availability_domains(identity_client, compartment_id):\n result = pagination.list_call_get_all_results(\n identity_client.list_availability_domains,\n compartment_id\n )\n return result.data", "def listDomains(self):\n reply = self.rpc.getDomains(self.username,\n self.password)\n if reply[0] == 'UNKNOWN_ERROR':\n raise Exception(\"RPC returned error: \" + reply[0])\n return reply", "def list_agencies():\n domainHandler = DomainHandler()\n return domainHandler.list_agencies()", "def scan_tenancy(config: OCIConfig):\n compartment_list(config)\n resource_list(config)\n logging.info('{}'.format(pformat(config.compartments_tree)))", "def domain_list(self) -> 'outputs.GetReposRepoDomainListResult':\n return pulumi.get(self, \"domain_list\")", "def get_domains(self):\n return self.rest_helper(\"/domains.json\")", "def domains(client):\n return client.domain.all()", "def get_all_domains(self):\n sql_string = \"select DomainAcc from PFAM\"\n dalObj = DAL(self.db_name, sql_string)\n results = dalObj.executeSelect()\n return results", "def list_domain_names():\n pass", "def Customer_listCloudDomains(apiURL):\n\n req = {\n 'cmd':'$Customer.listCloudDomains',\n 'args': None\n }\n\n reqs = json.dumps(req)\n info = makeZapiRequest(apiURL, reqs)\n\n return(info)", "def get_ad_entries(cohesity_client):\n resp = cohesity_client.active_directory.get_active_directory_entry()\n if resp:\n ad_list = list()\n for each_ad in resp:\n ad_list.append(each_ad.domain_name)\n config_dict[each_ad.domain_name] = [\n \"username\", \"password\", \"machine_accounts\"]\n exported_res_dict[\"Active directories\"] = ad_list\n return resp", "def catalog_entries(self, tenant_id):\n return [\n Entry(\n tenant_id, \"rax:dns\", \"cloudDNS\",\n [\n Endpoint(tenant_id, region, text_type(uuid4()), prefix=\"v1.0\")\n for region in self._regions\n ]\n )\n ]", "def test_list_domains(self):\n pass", "def get_tenant_list():\n tenants = Query_Objs(\"fvTenant\", \"tn\")\n tn_list = [\"\"]\n for tn in tenants:\n tn_list.append(tn.name)\n return tn_list", "def select_availability_domain(config_dict, compartment_id):\n try:\n oci_identity = oci.identity.IdentityClient(config_dict)\n oci_availability_domains = oci_identity.list_availability_domains(compartment_id)\n except oci.exceptions.ServiceError as e:\n print_g('*** AUTHORISATION ERROR ***')\n _logger.error('Authorisation error', exc_info=True)\n sys.exit(1)\n except Exception as e:\n print_g('*** ERROR *** %s' % str(e))\n _logger.error('ERROR %s', str(e), exc_info=True)\n sys.exit(1)\n for domain in oci_availability_domains.data:\n print_g('%4d %-30s %s' % (oci_availability_domains.data.index(domain), domain.name, domain.id))\n return _select_from(oci_availability_domains.data, 'Select availability domain.')", "def get_delta_domains():\n url = os.getenv('DELTAS_URL')\n if url is None:\n raise Exception('Delta report URL configuration not set!')\n\n json = requests.get(url, timeout=10).json()\n return [domain\n for (domain,)\n in json['values']\n if dnstwist.is_valid_domain(domain)]", "def print_all_domain_lists(self):\n for list in self.domainCollection:\n print(list.url_identifier)", "def get_domain_list(self):\n\t\tnow = datetime.datetime.now()\n\t\texp_from = \"%04d-%02d-%02d\" % (now.year-1, now.month, now.day)\n\t\texp_to = \"%04d-%02d-%02d\" % (now.year+15, now.month, now.day)\n\t\tdomain_list = []\n\t\tfor extension in ['.com', '.au']:\n\t\t\tresult = self.query('get_domains_by_expiredate', 'domain', { 'exp_from' : exp_from, 'exp_to' : exp_to, 'limit' : 100000, 'page' : 1, 'domain': extension})\n\t\t\t[ domain_list.append(x['name']) for x in result['attributes']['exp_domains'] ]\n\t\treturn domain_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists all the identity providers in your tenancy. You must specify the identity provider type (e.g., `SAML2` for identity providers using the SAML2.0 protocol). You must specify your tenancy's OCID as the value for the compartment ID (remember that the tenancy is simply the root compartment). See `Where to Get the Tenancy's OCID and User's OCID`__.
def list_identity_providers(self, protocol, compartment_id, **kwargs): resource_path = "/identityProviders" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "page", "limit" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_identity_providers got unknown kwargs: {!r}".format(extra_kwargs)) query_params = { "protocol": protocol, "compartmentId": compartment_id, "page": kwargs.get("page", missing), "limit": kwargs.get("limit", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[IdentityProvider]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[IdentityProvider]")
[ "def list(conn):\n try:\n return conn.get(url='/auth-providers')['providers']\n except SystemError as e:\n raise e", "def get_providers(context):\n request = context[\"request\"]\n adapter = get_adapter(request)\n providers = adapter.list_providers(request)\n return sorted(providers, key=lambda p: p.name)", "def get_auth_providers(self, organization_id: int) -> List[ApiAuthProvider]:\n pass", "def list_providers(self):\n return self.parent._request('/notifier/provider', {})", "def get_auth_providers(cls):\n return [cls.os_primary.auth_provider]", "def get_oidc_providers(self, user: Dict[str, Any] = None) -> dict:\n try:\n identity_providers = self.db.query(IdentityProviders).all()\n id_providers = {'providers': IdentityProviderSchema(many=True).dump(identity_providers)}\n # update scopes from string to list\n for item in id_providers['providers']:\n item['scopes'] = item['scopes'].split(\",\")\n LOGGER.info(\"Return available identity providers.\")\n return {\n \"status\": \"success\",\n \"code\": 200,\n \"data\": id_providers\n }\n except Exception as exp:\n return ServiceException(service_name, 500, str(exp)).to_dict()", "def get_all_tenants(context):\n return context.session.query(db_models.AristaProvisionedProjects)", "def get_tenants():\n # these are the tenant_id strings configured for the service -\n tenants_strings = conf.tenants\n result = []\n # the tenants service is a special case, as it must be a) configured to serve all tenants and b) actually maintains\n # the list of tenants in its own DB. in this case, we return the empty list since the tenants service will use direct\n # db access to get necessary data.\n if conf.service_name == 'tenants' and tenants_strings[0] == '*':\n return result\n\n # in dev mode, services can be configured to not use the security kernel, in which case we must get\n # configuration for a \"dev\" tenant directly from the service configs:\n if not conf.use_sk:\n for tenant in tenants_strings:\n t = {'tenant_id': tenant,\n 'iss': conf.dev_iss,\n 'public_key': conf.dev_jwt_public_key,\n 'default_access_token_ttl': conf.dev_default_access_token_ttl,\n 'default_refresh_token_ttl': conf.dev_default_refresh_token_ttl,\n }\n result.append(t)\n\n else:\n # TODO -- look up tenants in the tenants API, get the associated parameters (including sk location)\n pass\n return result", "def all(self, from_page=None):\n params = {}\n\n if from_page is not None:\n params[\"from\"] = from_page\n\n providers = self.client.get_paged(\"providers\", params=params)\n results = []\n for page in providers:\n results.extend([self._to_provider(i) for i in page[\"results\"]])\n return results", "def get_tenants():\n tenant_list = []\n try:\n resp_tenants = cohesity_client.tenant.get_tenants()\n except APIException as ex:\n print(\"Unable to get tenant list: %s\" % ex.context.response.raw_body)\n raise SystemExit\n for tenant in resp_tenants:\n tenant_list.append(tenant.tenant_id)\n print(\"Adding Tenant: %s\" % tenant.name)\n return tenant_list", "def get_all_tenants():\n tenants = identity.Tenant.query.all()\n return tenants", "def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]", "def scan_tenancy(config: OCIConfig):\n compartment_list(config)\n resource_list(config)\n logging.info('{}'.format(pformat(config.compartments_tree)))", "def registered_providers():\n return list(_DEFAULT_PROVIDER.providers)", "def tenancies(self) -> Iterable[dto.Tenancy]:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def network_service_providers(self):\n path = '/v2.0/service-providers'\n res = self.network.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack network service providers: %s' % \n truncate(res))\n return res[0]['service_providers']", "def get_endpoints(identity_provider):\n if identity_provider.auth_query_params is not None:\n auth_query_params = identity_provider.auth_query_params\n else:\n auth_query_params = {}\n\n if identity_provider.config_tag.lower() == OAUTH2_CONFIG_TYPE:\n auth_endpoint = identity_provider.oauth2.auth_endpoint\n token_endpoint = identity_provider.oauth2.token_endpoint\n auth_query_params.update(identity_provider.oauth2.auth_query_params)\n if identity_provider.config_tag.lower() == OIDC_CONFIG_TYPE:\n auth_endpoint = identity_provider.oidc.discovery_endpoint\n token_endpoint = identity_provider.oidc.auth_endpoint\n auth_query_params.update(identity_provider.oidc.auth_query_params)\n return [auth_endpoint, token_endpoint, auth_query_params]", "def list_identity_policies(Identity=None):\n pass", "def get_tenant_list(conn: dict) -> dict:\n return get(conn, PCC_TENANT + \"/list\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the group mappings for the specified identity provider.
def list_idp_group_mappings(self, identity_provider_id, **kwargs): resource_path = "/identityProviders/{identityProviderId}/groupMappings" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "page", "limit" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_idp_group_mappings got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "identityProviderId": identity_provider_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) query_params = { "page": kwargs.get("page", missing), "limit": kwargs.get("limit", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, query_params=query_params, header_params=header_params, response_type="list[IdpGroupMapping]") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, query_params=query_params, header_params=header_params, response_type="list[IdpGroupMapping]")
[ "def list_groups(self):\n pass", "def get_identity_groups(self):\n\t\tresult = {\n\t\t\t'success': False,\n\t\t\t'response': '',\n\t\t\t'error': '',\n\t\t}\n\n\t\tself.ise.headers.update({'Accept': 'application/vnd.com.cisco.ise.identity.identitygroup.1.0+xml'})\n\n\t\tresp = self.ise.get('{0}/config/identitygroup'.format(self.url_base))\n\n\t\tif resp.status_code == 200:\n\t\t\tresult['success'] = True\n\t\t\t###\n\t\t\tx = ERS._to_json(resp.text)['ns3:searchResult']['ns3:resources']['ns5:resource']\n\t\t\tprint (\"x\", len(x))\n\t\t\tprint (x[0])\n\t\t\tfor element in x[0]:\n\t\t\t\tprint (element,x[0][element])\n\t\t\t###\n\t\t\tresult['response'] = [(i['@name'], i['@id'], i['@description'],i['link']['@href'])\n\t\t\t\t\t\t\t\t for i in ERS._to_json(resp.text)['ns3:searchResult']['ns3:resources']['ns5:resource']]\n\t\t\treturn result\n\t\telse:\n\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\tresult['error'] = resp.status_code\n\t\t\treturn result", "def getGroupsForPrincipal(principalid):", "def list(request):\n return render_to_response('rteacher/manage_groups_list.html', request, **klist(\n request=request\n ))", "def list_groups(request):\n groups = models.UserGroup.all().order('name')\n return utility.respond(request, 'admin/list_groups', {'groups': groups})", "def list_groups(self, **params):\n url = 'groups'\n if params:\n url += '?%s' % urllib.urlencode(params)\n resp, body = self.get(url)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def mappings(self, group_id):\n hashmap = db_api.get_instance()\n mapping_list = []\n mappings_uuid_list = hashmap.list_mappings(group_uuid=group_id)\n for mapping_uuid in mappings_uuid_list:\n mapping_db = hashmap.get_mapping(uuid=mapping_uuid)\n mapping_list.append(mapping_models.Mapping(\n **mapping_db.export_model()))\n res = mapping_models.MappingCollection(mappings=mapping_list)\n return res", "def get_list_groups(self):\n list_response = requests.get(self.groups_url, headers=self.headers)\n return list_response.json()[\"groups\"]", "def list_gateway_groups(NextToken=None, MaxResults=None):\n pass", "def list_groups():\n init_dao(env('client_id'), env('client_secret'), env('tenant_id'))\n return Response(get_all_groups(r.args.get('since'),r.args), content_type=CT)", "def list_all_groups():\n title = \"Group list\"\n roots = Group.query.filter_by(parent_id=None).all()\n return render_template('groups/group_list.html', title=title, user=current_user,\n roots=roots, node=None)", "def get_all_groups(server_db):\r\n cursor = server_db.cursor()\r\n cursor.execute('''SELECT group_name, group_id FROM groups''')\r\n return cursor.fetchall()", "def list_groups(request):\n return Group.objects.all()", "def list_groups(self):\n return self._get(\"cloudConnectorGroups\").list", "def get_all_groups(self):\n self.cursor.execute(\"select * from groups\")\n self.connection.commit()\n return self.cursor.fetchall()", "def describe_db_security_groups(DBSecurityGroupName=None, Filters=None, MaxRecords=None, Marker=None):\n pass", "def show_group_list(request):\n group_list = request.user.groups.all()\n # construct dictionary for rendering\n display_groups = {}\n for group in group_list:\n # only display the 'real' group\n group_info = group.groupinfo\n if group_info.real_flag:\n display_groups[group_info.id] = group_info.name\n return render(request, \n 'group_info/group_list_page.html',\n {'display_groups': display_groups})", "def group_ids(self):\n return self._get('groups')", "def list_groups(access_token):\n request_url = OKTA_URL + \"api/v1/groups\"\n headers = {\"Authorization\": \"Bearer \" + access_token}\n group_request = requests.get(request_url, headers=headers).json()\n return group_request" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the MFA TOTP devices for the specified user. The returned object contains the device's OCID, but not the seed. The seed is returned only upon creation or when the IAM service regenerates the MFA seed for the device.
def list_mfa_totp_devices(self, user_id, **kwargs): resource_path = "/users/{userId}/mfaTotpDevices" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "page", "limit", "sort_by", "sort_order" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_mfa_totp_devices got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) if 'sort_by' in kwargs: sort_by_allowed_values = ["TIMECREATED", "NAME"] if kwargs['sort_by'] not in sort_by_allowed_values: raise ValueError( "Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values) ) if 'sort_order' in kwargs: sort_order_allowed_values = ["ASC", "DESC"] if kwargs['sort_order'] not in sort_order_allowed_values: raise ValueError( "Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values) ) query_params = { "page": kwargs.get("page", missing), "limit": kwargs.get("limit", missing), "sortBy": kwargs.get("sort_by", missing), "sortOrder": kwargs.get("sort_order", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, query_params=query_params, header_params=header_params, response_type="list[MfaTotpDeviceSummary]") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, query_params=query_params, header_params=header_params, response_type="list[MfaTotpDeviceSummary]")
[ "def list_user_devices(self, request):\n self.check_xsrf_token(self.request_state)\n user = user_lib.get_user_email()\n guest_permitted = config_model.Config.get('allow_guest_mode')\n device_message_list = []\n for device in device_model.Device.list_by_user(user):\n device_message_list.append(\n api_utils.build_device_message_from_model(device, guest_permitted))\n return device_messages.ListUserDeviceResponse(devices=device_message_list)", "def retrieve_user_devices(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n devices = self.database.retrieve_user_devices(user_id)\n if devices is not None:\n devices = list(set(devices)) # De-duplicate\n return devices", "def create_mfa_totp_device(self, user_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")", "def get_all_mfa_devices(self, user_name, marker=None, max_items=None):\r\n params = {'UserName' : user_name}\r\n if marker:\r\n params['Marker'] = marker\r\n if max_items:\r\n params['MaxItems'] = max_items\r\n return self.get_response('ListMFADevices',\r\n params, list_marker='MFADevices')", "def devices_for_user(self, user, confirmed=None):\n devices = self.model.objects.filter(user=user)\n if confirmed is not None:\n devices = devices.filter(confirmed=bool(confirmed))\n\n return devices", "def get_user_devices(self, userID):\n # https://api.relayr.io/users/%s/devices\n url = '{0}/users/{1}/devices'.format(self.host, userID)\n _, data = self.perform_request('GET', url, headers=self.headers)\n return data", "def list_tokens(user):\n return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user)", "def test_list_user_devices(self) -> None:\n # Request all devices of \"other user\"\n channel = self.make_request(\n \"GET\",\n f\"/_synapse/admin/v2/users/{self.other_user_id}/devices\",\n access_token=self.admin_user_token,\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)\n\n # Double-check we got the single device expected\n user_devices = channel.json_body[\"devices\"]\n self.assertEqual(len(user_devices), 1)\n self.assertEqual(channel.json_body[\"total\"], 1)\n\n # Check that all the attributes of the device reported are as expected.\n self._validate_attributes_of_device_response(user_devices[0])\n\n # Request just a single device for \"other user\" by its ID\n channel = self.make_request(\n \"GET\",\n f\"/_synapse/admin/v2/users/{self.other_user_id}/devices/\"\n f\"{self.other_user_device_id}\",\n access_token=self.admin_user_token,\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)\n\n # Check that all the attributes of the device reported are as expected.\n self._validate_attributes_of_device_response(channel.json_body)", "def get_mfa_totp_device(self, user_id, mfa_totp_device_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"get_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDeviceSummary\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDeviceSummary\")", "def get_a_users_available_devices(\n self) -> Union[List[Optional[dict]], ErrorObject]:\n\n url = f'https://api.spotify.com/v1/me/player/devices'\n query_params = {}\n json_body = {}\n response, error = self._get(url, query_params, json_body)\n if error:\n return ErrorObject(response)\n return self._convert_array_to_list(response, dict)", "def get_user_devices_filtered(self, userID, meaning):\n # https://api.relayr.io/users/%s/devices?meaning=%s\n url = '{0}/users/{1}/devices?meaning={2}'.format(self.host, userID, meaning)\n _, data = self.perform_request('GET', url, headers=self.headers)\n return data", "def user_phones(self, user):\n result = self.lastuser.call_resource(\n 'phone',\n all=1,\n _token=user.lastuser_token,\n _token_type=user.lastuser_token_type,\n )\n\n if result.get('status') == 'ok':\n return result['result']['all']\n else:\n return []", "def get_user_devices_task(self):\n\n from core.models import UserDevices\n from core.utils import get_user_devices, get_icloud_api_object\n\n api = get_icloud_api_object()\n get_user_data = get_user_devices(api)\n\n user_devices = [x for x in get_user_data if x]\n\n try:\n for user_device in user_devices:\n UserDevices.objects.get_or_create(\n device_name=user_device\n )\n\n except Exception as exc:\n logging.info('[Get user devices task] Error --> {exc}'.format(exc=exc))\n\n ExceptionStorage.objects.create(\n task_type=1,\n error_message=exc,\n timestamp=now()\n )\n\n self.retry(\n countdown=backoff(self.request.retries),\n exc=exc\n )", "def GetOTP(user):\r\n return _ComputeOTP(_GetUserSecret(user),\r\n long(time.time() / _GRANULARITY))", "def GetOTP(user):\n return _ComputeOTP(_GetUserSecret(user),\n long(time.time() / _GRANULARITY))", "def get_user_access_tokens(request, user):\n manager = internal_keystoneclient(request).oauth2.access_tokens\n\n return manager.list_for_user(user=user)", "def delete_all_nlp_devices_of_type_for_user(user, authtoken, system_authtoken):\n kvstore = KvStore(constants.REGISTERED_DEVICES_COLLECTION_NAME, authtoken, owner=user)\n r, devices = kvstore.get_all_items()\n devices = json.loads(devices)\n kvstore.delete_items_by_query({APP_ID_LABEL: NLP_APP_ID})\n\n for device in devices:\n if APP_ID_LABEL in device and device[APP_ID_LABEL] == NLP_APP_ID:\n delete_device_from_spacebridge(device['device_id'], system_authtoken)", "def GetDevices(self, device_type=None, filter_provisioned=None):\n devices = self.request.get(f'https://{self.BASE_URL}/hmsweb/users/devices')\n if device_type:\n devices = [ device for device in devices if device.get('deviceType') in device_type]\n\n if filter_provisioned is not None:\n if filter_provisioned:\n devices = [ device for device in devices if device.get(\"state\") == 'provisioned']\n else:\n devices = [ device for device in devices if device.get(\"state\") != 'provisioned']\n\n return devices", "def get_user_devices_adapter(json_response):\n\n if 'devices' in json_response:\n ret = {\"result\": []}\n for device in json_response['devices']:\n ret[\"result\"].append(\n {\"name\": device[\"name\"],\n \"type\": device[\"type\"],\n \"id\": device[\"id\"],\n \"is_active\": device[\"is_active\"]})\n return ret\n return json_response" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the network sources in your tenancy. You must specify your tenancy's OCID as the value for the compartment ID (remember that the tenancy is simply the root compartment). See `Where to Get the Tenancy's OCID and User's OCID`__.
def list_network_sources(self, compartment_id, **kwargs): resource_path = "/networkSources" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "page", "limit" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_network_sources got unknown kwargs: {!r}".format(extra_kwargs)) query_params = { "compartmentId": compartment_id, "page": kwargs.get("page", missing), "limit": kwargs.get("limit", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[NetworkSourcesSummary]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[NetworkSourcesSummary]")
[ "def get_connected_sources(self):\n body = {\"workspaceId\": AIRBYTE_WORKSPACE_ID}\n url = f\"{self.base_url}/api/v1/sources/list\"\n response = requests.post(url, json=body)\n try:\n response_json = response.json()['sources']\n except Exception as e:\n logging.exception(f\"Error getting sources for subscription_id: {self.subscription_id}\\nError {e}\")\n response_json = []\n return response_json", "def sources(self, **kwargs):\n kwargs['source'] = True\n kwargs.pop('s', None )\n kwargs['destination'] = False\n kwargs.pop('d', None )\n return general.listConnections(self, **kwargs)", "def network_list(request):\r\n\r\n organization = request.user.organization\r\n network_list = Organization.get_org_networks(organization)\r\n\r\n return render(request, 'editorial/networklist.html', {'network_list': network_list})", "def listsources():\r\n main_url = \" https://newsapi.org/v2/sources?apiKey=89c73c0b1b0f42639844b29ad695ef0b\"\r\n\r\n # fetching data in json format\r\n open_source = requests.get(main_url).json()\r\n\r\n # getting all articles in a string sources\r\n source = open_source[\"sources\"]\r\n\r\n # empty list which will\r\n # contain all trending news sources\r\n results = []\r\n\r\n for s in source:\r\n results.append(s[\"id\"])\r\n\r\n for i in results[0:4]:\r\n print(i)", "def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources:\")\n print(sources)\n return sources", "def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources\")\n print(sources)\n return sources", "def list_sources(self):\n return list(self.sources.keys())", "def listsources():\n\tmain_url = \" https://newsapi.org/v2/sources?apiKey=5f81b593f35d42a8980313250c03d7e7\"\n\n\t# fetching data in json format \n\topen_source = requests.get(main_url).json() \n\n\t# getting all articles in a string sources\n\tsource = open_source[\"sources\"] \n\n\t# empty list which will \n\t# contain all trending newssources \n\tresults = [] \n\t\n\tfor k in source: \n results.append(k[\"id\"])\n \n \t\n\tfor w in results[0:4]:\n print(w)", "def network_list(ctx):\n networks = ctx.obj['CLIENT'].network.list()\n click.echo('Networks: %s' % [network.name for network in networks])", "def network_list_for_tenant(request, tenant_id, include_external=False,\n include_pre_auto_allocate=False, page_data=None,\n **params):\n\n # Pagination is implemented consistently with nova and cinder views,\n # which means it is a bit hacky:\n # - it requests X units but displays X-1 units\n # - it ignores the marker metadata from the API response and uses its own\n # Here we have extra hacks on top of that, because we have to merge the\n # results of 3 different queries, and decide which one of them we are\n # actually paginating.\n # The 3 queries consist of:\n # 1. Shared=True networks\n # 2. Project non-shared networks\n # 3. External non-shared non-project networks\n # The main reason behind that order is to maintain the current behavior\n # for how external networks are retrieved and displayed.\n # The include_external assumption of whether external networks should be\n # displayed is \"overridden\" whenever the external network is shared or is\n # the tenant's. Therefore it refers to only non-shared non-tenant external\n # networks.\n # To accomplish pagination, we check the type of network the provided\n # marker is, to determine which query we have last run and whether we\n # need to paginate it.\n\n LOG.debug(\"network_list_for_tenant(): tenant_id=%(tenant_id)s, \"\n \"params=%(params)s, page_data=%(page_data)s\", {\n 'tenant_id': tenant_id,\n 'params': params,\n 'page_data': page_data,\n })\n\n page_data, marker_net = _configure_pagination(\n request, params, page_data, tenant_id=tenant_id)\n\n query_kwargs = {\n 'request': request,\n 'include_external': include_external,\n 'tenant_id': tenant_id,\n 'page_data': page_data,\n **params,\n }\n\n return _perform_query(\n _query_nets_for_tenant, query_kwargs, marker_net,\n include_pre_auto_allocate)", "async def get_sources(sources):\n url = URL + 'sources'\n params = {\"language\": 'en'}\n \n # AIOHTTP session start\n session = aiohttp.ClientSession()\n async with aiohttp.ClientSession() as session:\n async with session.get(url, ssl=False, params=params) as resp:\n data = await resp.json()\n print(\"Sources found\")\n sources.extend([src['id'].strip() for src in data['sources']])", "def get_source_urls(self) -> List[str]:\n return [self.account.SourceLink]", "def scan_tenancy(config: OCIConfig):\n compartment_list(config)\n resource_list(config)\n logging.info('{}'.format(pformat(config.compartments_tree)))", "def get_all_sources():\n sources_dict = get_sources_dict_db()\n return jsonify(sources_dict)", "def networks_objects(self):\n return self.__networks_objects_list", "def get_traffic_sources():\n\n session = requests.Session()\n result = []\n\n _logger.info(\"Start getting traffic sources from tracker...\")\n\n all_traffic_sources = requests_manager.get(\n session,\n settings.TRACKER_URL,\n params={\"page\": \"Traffic_Sources\", \"api_key\": settings.BINOM_API_KEY, \"status\": \"all\"},\n )\n\n if not isinstance(all_traffic_sources, requests.Response):\n _logger.error(f\"Network error occurred while trying to get traffic_sources from tracker: {all_traffic_sources}\")\n return []\n\n try:\n all_traffic_sources_number = len(all_traffic_sources.json())\n except json.JSONDecodeError as decode_error:\n _logger.error(\n f\"Can't decode response from tracker (traffic sources getting): \"\n f\"{decode_error.doc}\"\n )\n return []\n\n for user in User.objects.all():\n user_traffic_sources = requests_manager.get(\n session,\n settings.TRACKER_URL,\n params={\n \"page\": \"Traffic_Sources\",\n \"api_key\": settings.BINOM_API_KEY,\n \"user_group\": user.id,\n \"status\": \"all\",\n },\n )\n\n if not isinstance(user_traffic_sources, requests.Response):\n _logger.error(\n f\"Network error occurred while trying to get traffic sources from tracker: {user_traffic_sources}\")\n continue\n\n try:\n user_traffic_sources_json = user_traffic_sources.json()\n except json.JSONDecodeError as decode_error:\n _logger.error(\n f\"Can't decode response from tracker (traffic sources getting): \"\n f\"{decode_error.doc}\"\n )\n return []\n\n if user_traffic_sources_json and len(user_traffic_sources_json) != all_traffic_sources_number:\n try:\n result += [\n TrafficSource(\n id=int(traffic_source[\"id\"]),\n name=traffic_source[\"name\"],\n campaigns=int(traffic_source[\"camps\"]),\n tokens=1 if int(traffic_source[\"tokens\"]) else 0,\n user=user,\n )\n for traffic_source in user_traffic_sources_json\n ]\n except KeyError:\n _logger.error(\n f\"Can't parse response from tracker (traffic sources getting): {user_traffic_sources_json}\")\n return []\n\n _logger.info(\"Traffic sources were successfully get.\")\n return result", "def show_networks():\n return get_networks()", "def get_direct_sharing_sources():\n sources = [(project.id_label, project.name)\n for project in (DataRequestProject.objects\n .filter(approved=True)\n .exclude(returned_data_description=''))]\n\n return sorted(sources, key=lambda x: x[1].lower())", "def network_stories(request):\r\n\r\n org_id = request.user.organization_id\r\n organization = get_object_or_404(Organization, id=org_id)\r\n\r\n networks = Organization.get_org_networks(organization)\r\n\r\n shared_networkstories = []\r\n for network in networks:\r\n stories = Network.get_network_shared_stories(network)\r\n shared_networkstories.extend(stories)\r\n\r\n networkstories = set(shared_networkstories)\r\n\r\n return render(request, 'editorial/networkstories.html', {\r\n 'networkstories': networkstories,\r\n })" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the policies in the specified compartment (either the tenancy or another of your compartments). See `Where to Get the Tenancy's OCID and User's OCID`__. To determine which policies apply to a particular group or compartment, you must view the individual statements inside all your policies. There isn't a way to automatically obtain that information via the API.
def list_policies(self, compartment_id, **kwargs): resource_path = "/policies" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "page", "limit" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_policies got unknown kwargs: {!r}".format(extra_kwargs)) query_params = { "compartmentId": compartment_id, "page": kwargs.get("page", missing), "limit": kwargs.get("limit", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[Policy]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[Policy]")
[ "def list_policies(self):\n client = self.connect(VAULT_TOKEN)\n return client.list_policies()", "def get_policies():\n data = connect('GET', '/policies/')\n return dict((p['name'], p['template_uuid']) for p in data['policies'])", "def list_policies(policystore_url, verbose):\n\n if verbose:\n logging.info('Listing policies')\n\n list_url = policystore_url + POLICYSTORE_PREFIX + 'ListEntitlementPolicies'\n\n r = requests.post(list_url, headers=headers(), json={})\n if r.status_code != 200:\n logging.error(f'ERROR: Unexpected response: {r.status_code}')\n pprint.pprint(r.json())\n sys.exit('Failed to list policies')\n\n logging.info('SUCCESS: Listed policies')\n\n resp = r.json()\n\n if verbose:\n logging.info('Policies retrieved')\n pprint.pprint(resp)\n\n return resp", "def list_policies(profile=None, api_key=None):\n return salt.utils.pagerduty.list_items(\n \"escalation_policies\",\n \"id\",\n __salt__[\"config.option\"](profile),\n api_key,\n opts=__opts__,\n )", "def list_policies(self):\n return self.con.list_policies(\n Scope='Local'\n )", "def list_policies() -> List:\n return [\n policies_v0.__name__,\n policies_v1.__name__,\n policies_v2.__name__,\n policies_v3.__name__,\n ]", "def getPolicies(self):\n return self.api_get_request(self.NINJA_API_POLICIES)", "def policies(self):\n return self._data.get('policies')", "def list_auth_policies(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n attributes = ALL if verbose else [\"cn\", \"objectClass\"]\n\n self.display(\n self.engine.query(\n self.engine.AUTH_POLICIES_FILTER(),\n attributes, base=','.join([\"CN=AuthN Policy Configuration,CN=Services,CN=Configuration\", self.engine.base_dn])\n ),\n verbose\n )", "def getPolicies(self):\n if self.login(): \n req = '%s/policies'% self.baseAPI\n resp = requests.get(req, headers=self.headers, verify=False)\n if resp.status_code == 200: \n jresp = json.loads(resp.content.decode('utf-8'))\n self.policies = jresp[u'policies']\n self.policiesTimeStamp = dt.datetime.now()\n return self.policies \n else: \n return False \n else: \n return False", "def policies(self, request):\n policies = OtterPolicies(self.store, self.tenant_id, self.group_id,\n self.dispatcher)\n return policies.app.resource()", "def get_policies():\r\n policy = policies.values()\r\n return policy", "def policies(self):\n return self._policies", "def rbac_policy_list(request, **kwargs):\n policies = neutronclient(request).list_rbac_policies(\n **kwargs).get('rbac_policies')\n return [RBACPolicy(p) for p in policies]", "def policy_list(request, **kwargs):\n policies = neutronclient(request).list_qos_policies(\n **kwargs).get('policies')\n return [QoSPolicy(p) for p in policies]", "def list_identity_policies(Identity=None):\n pass", "def list_workload_policies(self, params=None):\n uri = 'proj/list_workload_policies'\n if params:\n uri += '?%s' % urllib.urlencode(params)\n \n resp, body = self.get(uri)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBodyList(resp, body['workload_policies'])", "def get_all_policies(self, full_information=True):\n final_result = list()\n\n for result in self.execute(self.client.list_policies, \"Policies\"):\n pol = IamPolicy(result)\n if full_information:\n self.update_policy_statements(pol)\n final_result.append(pol)\n return final_result", "def policies():\n if request.method == 'GET':\n policies = json.loads(redis.get('policies').decode('utf8'))\n return jsonify(policies=policies)\n else:\n data = request.get_json()\n send_policy(data['name'], data['args']);\n return jsonify(success=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the region subscriptions for the specified tenancy.
def list_region_subscriptions(self, tenancy_id, **kwargs): resource_path = "/tenancies/{tenancyId}/regionSubscriptions" method = "GET" expected_kwargs = ["retry_strategy"] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_region_subscriptions got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "tenancyId": tenancy_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="list[RegionSubscription]") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="list[RegionSubscription]")
[ "def get_regions(conf: OCIConfig):\n\n global identity_client\n # loop over the full list of regions as we don't know in advance what are the subscribed regions\n for r in REGIONS:\n conf.workon_region = r\n identity_client = oci.identity.IdentityClient(conf.config)\n try:\n rs = identity_client.list_region_subscriptions(conf.tenancy)\n conf.region_subscriptions = rs.data\n break\n except ServiceError as se:\n continue\n logging.info('Home region: {}'.format(conf.home_region))\n logging.info('Regions: {}'.format(conf.region_subscriptions))", "def list_subs(self, account_id):\n uri = f\"/v1/svc-subscription/subscriptions?catalogId=c-aaxBJkfg8u&account_id={account_id}&service_type=adns\"\n return self._make_request(uri=uri)", "def subscriptions(self):\r\n return subs.AccountSubscriptions(self)", "def list_subscriptions(profile=None):\n if profile is None:\n profile = subscription_profile()\n cred, _, _ = profile.get_login_credentials()\n sub_client = SubscriptionClient(cred)\n return [\n {\"Index\": i, \"Name\": sub.display_name, \"id\": sub.subscription_id}\n for i, sub in enumerate(sub_client.subscriptions.list())\n ]", "def list_regional_by_subscription(\n self, location, filter=None, top=None, custom_headers=None, raw=False, **operation_config):\n def prepare_request(next_link=None):\n if not next_link:\n # Construct URL\n url = self.list_regional_by_subscription.metadata['url']\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self.config.subscription_id\", self.config.subscription_id, 'str'),\n 'location': self._serialize.url(\"location\", location, 'str')\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n query_parameters['api-version'] = self._serialize.query(\"self.api_version\", self.api_version, 'str')\n if filter is not None:\n query_parameters['$filter'] = self._serialize.query(\"filter\", filter, 'str')\n if top is not None:\n query_parameters['$top'] = self._serialize.query(\"top\", top, 'int')\n\n else:\n url = next_link\n query_parameters = {}\n\n # Construct headers\n header_parameters = {}\n header_parameters['Accept'] = 'application/json'\n if self.config.generate_client_request_id:\n header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())\n if custom_headers:\n header_parameters.update(custom_headers)\n if self.config.accept_language is not None:\n header_parameters['accept-language'] = self._serialize.header(\"self.config.accept_language\", self.config.accept_language, 'str')\n\n # Construct and send request\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n def internal_paging(next_link=None):\n request = prepare_request(next_link)\n\n response = self._client.send(request, stream=False, **operation_config)\n\n if response.status_code not in [200]:\n exp = CloudError(response)\n exp.request_id = response.headers.get('x-ms-request-id')\n raise exp\n\n return response\n\n # Deserialize response\n header_dict = None\n if raw:\n header_dict = {}\n deserialized = models.EventSubscriptionPaged(internal_paging, self._deserialize.dependencies, header_dict)\n\n return deserialized", "def get_subscriptions(self):\n url = '{}/v2/subscriptions'.format(self.url)\n r = requests.get(url, headers=self.headers_v2)\n return r.json()", "def subscriptions(self):\n return self.properties.get('subscriptions',\n EntityCollection(self.context, Subscription,\n ResourcePath(\"subscriptions\", self.resource_path)))", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def get_subscriptions(self):\n return self.subscriptions.all()", "def list_subscriptions_in_project(project_id):\n # [START pubsub_list_subscriptions]\n from google.cloud import pubsub_v1\n\n # TODO(developer)\n # project_id = \"your-project-id\"\n\n subscriber = pubsub_v1.SubscriberClient()\n project_path = f\"projects/{project_id}\"\n\n # Wrap the subscriber in a 'with' block to automatically call close() to\n # close the underlying gRPC channel when done.\n with subscriber:\n for subscription in subscriber.list_subscriptions(\n request={\"project\": project_path}\n ):\n print(subscription.name)\n # [END pubsub_list_subscriptions]", "def get(self, **kwargs):\n subscriptions = Subscription.get_subscriptions(**kwargs)\n result = SubscriptionSchema().dump(subscriptions, many=True)\n return jsonify(result.data)", "def get_subscriptions(self):\n \n r = self.fitbit_service.get('http://api.fitbit.com/1/user/-/apiSubscriptions.json', header_auth=True)\n if r.status_code == 200:\n subs = r.json()['apiSubscriptions']\n return [s for s in subs if s['subscriberId'] == 'm-distance']", "def get_all_subscriptions(cls, **kwargs):\n return Subscription.query.filter(**kwargs).all()", "def scan_tenancy(config: OCIConfig):\n compartment_list(config)\n resource_list(config)\n logging.info('{}'.format(pformat(config.compartments_tree)))", "def list(self):\n return self._engine.exec(\"subscription-manager list\")", "def region_list(**kwargs):\n page = kwargs[\"page\"]\n per_page = 1000\n\n pagination = (\n AADFByDirection.query.with_entities(\n AADFByDirection.region_id, AADFByDirection.region_name\n )\n .group_by(AADFByDirection.region_id, AADFByDirection.region_name)\n .order_by(AADFByDirection.region_id)\n .paginate(page, per_page, False)\n )\n all_regions = list_region_schema.dump(pagination.items)\n\n return generate_response(all_regions, pagination)", "def list_subscriptions_async(\n future_session: \"FuturesSession\",\n connection,\n project_id,\n fields=None,\n offset=0,\n limit=-1,\n):\n params = {'offset': offset, 'limit': limit, 'fields': fields}\n url = f'{connection.base_url}/api/subscriptions'\n headers = {'X-MSTR-ProjectID': project_id}\n\n return future_session.get(url=url, headers=headers, params=params)", "def _get_cloudwatch_subscriptions(self):\n return self._get_subscriptions(self.cloudwatch_arn)", "def get_tenant_list(conn: dict) -> dict:\n return get(conn, PCC_TENANT + \"/list\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the SMTP credentials for the specified user. The returned object contains the credential's OCID, the SMTP user name but not the SMTP password. The SMTP password is returned only upon creation.
def list_smtp_credentials(self, user_id, **kwargs): resource_path = "/users/{userId}/smtpCredentials" method = "GET" expected_kwargs = ["retry_strategy"] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_smtp_credentials got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="list[SmtpCredentialSummary]") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="list[SmtpCredentialSummary]")
[ "def list_credentials(user):\n return Credentials.list_credentials(user)", "def credentials():\n user_name = request.args.get('user_name') # type: str\n if user_name is None or len(user_name) <= 0:\n raise BadRequest('Missing user_name parameter.')\n # TODO return fake credentials if user does not exist\n creds = user_controller.list_credentials(user_name)\n return jsonify([str(base64.b64encode(cred), 'ascii') for cred in creds])", "def list_credentials(self, **_params):\r\n return self.get(self.credentials_path, params=_params)", "def webauthn_credentials(user):\n return [AttestedCredentialData.create(**cbor.decode(cred.credential_data)) for cred in user.webauthn_credentials]", "def get_user_cred(self):\n if Config.eap_outer == 'PEAP' or Config.eap_outer == 'TTLS':\n self.__get_credentials_from_config()", "def create_smtp_credential(self, create_smtp_credential_details, user_id, **kwargs):\n resource_path = \"/users/{userId}/smtpCredentials\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_smtp_credential got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_smtp_credential_details,\n response_type=\"SmtpCredential\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_smtp_credential_details,\n response_type=\"SmtpCredential\")", "def show_credentials(cls):\n return cls.credential_list", "def find_credentials(user=None, scopes=None, admin=None):\n return UserCredentials.find(user, scopes, admin)", "def display_credentials(cls):\n return cls.credentials_list", "def user_credentials(self):\r\n credentials = {}\r\n if EMAIL_AUTHENTICATION:\r\n credentials[\"email\"] = self.cleaned_data[\"email\"]\r\n else:\r\n credentials[\"username\"] = self.cleaned_data[\"username\"]\r\n credentials[\"password\"] = self.cleaned_data[\"password1\"]\r\n return credentials", "def user_credentials(self):\r\n credentials = {}\r\n if EMAIL_AUTHENTICATION:\r\n credentials[\"email\"] = self.cleaned_data[\"email\"]\r\n else:\r\n credentials[\"username\"] = self.cleaned_data[\"username\"]\r\n credentials[\"password\"] = self.cleaned_data[\"password\"]\r\n return credentials", "def credentials(self) -> Sequence['outputs.DeviceCredentialResponse']:\n return pulumi.get(self, \"credentials\")", "def get_credentials(self):\n return PlainCredentials(self.user_name, self.password)", "def getCredentials(self):\n if self.result(): # Accepted?\n username = self.username_le.text()\n password = \"\"\n if self.askpassword:\n password = self.password_le.text()\n\n return username, password\n\n raise CredentialDialogReject()", "def credentials(self):\n return self._credentials", "def get_cred(site_id, user_id):\n log = current_app.log\n db = request.db\n Cred = db.tables.Cred\n cred = Cred.query.filter_by(cred_owner=user_id,\n site_id=site_id).first_or_404()\n log.info(\"Fetched cred for user %u at site %u.\", user_id, site_id)\n return jsonify(cred.cred_value)", "def creds():\n return obj_utils.Creds()", "def _authn_passwords(self, user, username, password, credentials):\n for cred in credentials:\n if isinstance(cred, Password):\n try:\n factor = vccs_client.VCCSPasswordFactor(password, str(cred.id), str(cred.salt))\n except ValueError as exc:\n self.logger.info(\"User {!r} password factor {!s} unusable: {!r}\".format(username, cred.id, exc))\n continue\n self.logger.debug(\"Password-authenticating {!r}/{!r} with VCCS: {!r}\".format(\n username, str(cred.id), factor))\n user_id = str(user.user_id)\n try:\n if self.auth_client.authenticate(user_id, [factor]):\n self.logger.debug(\"VCCS authenticated user {!r} (user_id {!r})\".format(user, user_id))\n # Verify that the credential had been successfully used in the last 18 monthts\n # (Kantara AL2_CM_CSM#050).\n if self.credential_expired(cred):\n self.logger.info('User {!r} credential {!s} has expired'.format(user, cred.key))\n raise eduid_idp.error.Forbidden('CREDENTIAL_EXPIRED')\n self.log_authn(user, success=[cred.id], failure=[])\n return user\n except vccs_client.VCCSClientHTTPError as exc:\n if exc.http_code == 500:\n self.logger.debug(\"VCCS credential {!r} might be revoked\".format(cred.id))\n continue\n else:\n self.logger.debug(\"Unknown credential: {!s}\".format(cred))\n self.logger.debug(\"VCCS username-password authentication FAILED for user {!r}\".format(user))\n self.log_authn(user, success=[], failure=[cred.id for cred in user.passwords.to_list()])\n return None", "def get_credentials(username):\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n args.json,\n scopes=SCOPES).create_delegated(username)\n\n return credentials" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the tag defaults for tag definitions in the specified compartment.
def list_tag_defaults(self, **kwargs): resource_path = "/tagDefaults" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "page", "limit", "id", "compartment_id", "tag_definition_id", "lifecycle_state" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_tag_defaults got unknown kwargs: {!r}".format(extra_kwargs)) if 'lifecycle_state' in kwargs: lifecycle_state_allowed_values = ["ACTIVE"] if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values: raise ValueError( "Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values) ) query_params = { "page": kwargs.get("page", missing), "limit": kwargs.get("limit", missing), "id": kwargs.get("id", missing), "compartmentId": kwargs.get("compartment_id", missing), "tagDefinitionId": kwargs.get("tag_definition_id", missing), "lifecycleState": kwargs.get("lifecycle_state", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[TagDefaultSummary]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[TagDefaultSummary]")
[ "def default_tags(self):\n return self._default_tags", "def initDefaults(self):\n return _libsbml.CompartmentGlyph_initDefaults(self)", "def initDefaults(self):\n return _libsbml.Compartment_initDefaults(self)", "def get_default_vpas(self, composition_space):\n\n default_vpas = {}\n for element in composition_space.get_all_elements():\n default_vpas[element.symbol] = self.all_default_vpas[\n element.symbol]\n return default_vpas", "def default_tags(self, default_tags):\n\n self._default_tags = default_tags", "def _parse_default_component_props(self):\n defaults_element = \\\n ET.fromstring(self.zipped_file.read(\"simulink/bddefaults.xml\")\n ).find(\"BlockParameterDefaults\")\n for comp in defaults_element:\n comp_type = comp.attrib[\"BlockType\"]\n self.default_component_props[comp_type] = \\\n {name: value for prop in comp\n for name, value in self._extract_properties(prop).items()}", "def get_default_config():\r\n def_dict = dict()\r\n for option in options.values():\r\n option.add_def_dict(def_dict)\r\n return def_dict", "def set_defaults(self):\n\n for component, data in self.components.items():\n for default_name, default_value in default_attrs[component].items():\n\n data[default_name] = default_value", "def defaults(self):\n plist = {preference: int(value) if isinstance(value, bool) else value\n for preference, value in self.default_values.items()}\n preference_items = [\n '{} = {};'.format(preference,\n plist[preference] if\n str(plist[preference]) else '\"\"')\n for preference in sorted(plist)]\n return '{{ {} }}'.format(' '.join(preference_items))", "def defaultItems(self):\n pass", "def defaults():\n global __preset_staging\n \n t = TreeDict('Default_Parameter_Tree', __defaultpresettree__ = True)\n __preset_staging[id(t)] = t\n return t", "def default_kernel_config(defn):\n # XXX(stephentu): should the default config also include cluster_hp?\n return list(it.chain(\n default_assign_kernel_config(defn),\n default_feature_hp_kernel_config(defn)))", "def get_knob_defaults(cls):\n\n return '\\n'.join(\n [\n '# {description}\\n# {knob}={default}\\n'.format(\n description=cls.get_registered_knob(name).description,\n knob=name,\n default=cls.get_registered_knob(name).default\n ) for name in sorted(cls._register.keys())\n ]\n )", "def get_tag_list_with_attrs(cls) -> Dict[str, List[str]]:\n # TODO(sll): Cache this computation and update it on each refresh.\n # Better still, bring this into the build process so it doesn't have\n # to be manually computed each time.\n component_list = list(cls.get_all_rte_components().values())\n\n component_tags = {}\n for component_specs in component_list:\n tag_name = 'oppia-noninteractive-%s' % (\n utils.camelcase_to_hyphenated(component_specs['backend_id']))\n\n component_tags[tag_name] = [\n '%s-with-value' % ca_spec['name']\n for ca_spec in component_specs['customization_arg_specs']]\n\n return component_tags", "def gather_default_attributes(obj, defaults):\n defaults = defaults.copy()\n for attr in obj.attribute:\n if attr.is_default:\n defaulted_attr = ir_pb2.Attribute()\n defaulted_attr.CopyFrom(attr)\n defaulted_attr.is_default = False\n defaults[attr.name.text] = defaulted_attr\n return {\"defaults\": defaults}", "def _get_default_config_list(parm_base=None):\n default_config_list = []\n if parm_base is None:\n parm_base = PARM_BASE\n\n conf_dir = os.path.join(parm_base,\n METPLUS_CONFIG_DIR)\n\n # if both are found, set old base confs first so the new takes precedence\n for base_conf in OLD_BASE_CONFS + BASE_CONFS:\n conf_path = os.path.join(conf_dir,\n base_conf)\n if os.path.exists(conf_path):\n default_config_list.append(conf_path)\n\n if not default_config_list:\n print(f\"FATAL: No default config files found in {conf_dir}\")\n sys.exit(1)\n\n return default_config_list", "def list_tag() -> Dict[str, str]:\n _check_active_model_version()\n return _active_model_version.list_tag() # type: ignore", "def get_default_bem_variant_list(self):\n return []", "def print_defaults():\n print 'area_bounds :', default_area_bounds\n print 'area_bounds_format :', default_area_bounds_format\n print 'area_bounds_range :', default_area_bounds_range\n print 'years_bounds :', default_years_are_bounds\n print 'dates_are_bounds :', default_dates_are_bounds\n print 'init_date_str_format :', default_init_date_str_format\n print 'member_name :', default_member_name\n print 'period_name :', default_period_name\n print 'initialistion_time_name :', default_initialistion_time_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the tag namespaces in the specified compartment.
def list_tag_namespaces(self, compartment_id, **kwargs): resource_path = "/tagNamespaces" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "page", "limit", "include_subcompartments", "lifecycle_state" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_tag_namespaces got unknown kwargs: {!r}".format(extra_kwargs)) if 'lifecycle_state' in kwargs: lifecycle_state_allowed_values = ["ACTIVE", "INACTIVE", "DELETING", "DELETED"] if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values: raise ValueError( "Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values) ) query_params = { "compartmentId": compartment_id, "page": kwargs.get("page", missing), "limit": kwargs.get("limit", missing), "includeSubcompartments": kwargs.get("include_subcompartments", missing), "lifecycleState": kwargs.get("lifecycle_state", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[TagNamespaceSummary]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[TagNamespaceSummary]")
[ "async def list_namespaces(self) -> list:\n return await self.AD.state.list_namespaces()", "def namespaceList(self):\n \n pass", "def list_namespace_tags(self, namespace, **params):\n url = 'metadefs/namespaces/%s/tags' % namespace\n if params:\n url += '?%s' % urllib.urlencode(params)\n resp, body = self.get(url)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def namespaces(self) -> List['_types.NameSpace']:\n\t\tcount = ctypes.c_ulonglong(0)\n\t\tnameSpaceList = core.BNGetNameSpaces(self.handle, count)\n\t\tassert nameSpaceList is not None, \"core.BNGetNameSpaces returned None\"\n\t\tresult = []\n\t\ttry:\n\t\t\tfor i in range(count.value):\n\t\t\t\tresult.append(_types.NameSpace._from_core_struct(nameSpaceList[i]))\n\t\t\treturn result\n\t\tfinally:\n\t\t\tcore.BNFreeNameSpaceList(nameSpaceList, count.value)", "def _get_namespaces(self):\n\n tags = self.get_all_tags()\n namespaces = {}\n for tag in tags:\n namespace = findall('({.{1,}})', tag)\n if len(namespace) > 0:\n namespace = namespace[0]\n formatted_tag = tag.replace(namespace, '')\n try:\n namespaces[namespace].append(formatted_tag)\n except KeyError:\n namespaces[namespace] = [formatted_tag]\n if namespaces:\n self.namespace_present = True\n self.namespaces = namespaces\n # return namespaces", "def test_namespaces_list(self):\n pass", "def watchNamespacelist(self, **kwargs):\n allParams = []\n\n params = locals()\n for (key, val) in params['kwargs'].iteritems():\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s'\"\n \" to method watchNamespacelist\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/api/v1beta1/watch/namespaces'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'GET'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n headerParams['Accept'] = 'application/json';\n headerParams['Content-Type'] = '*/*';\n\n \n\n \n\n \n\n \n\n \n\n postData = (formParams if formParams else bodyParam)\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, headerParams, files=files)\n\n \n if not response:\n return None\n\n responseObject = self.apiClient.deserialize(response, 'V1beta1_NamespaceList')\n return responseObject", "def get_vsperf_namespace_list():\n if os.path.isdir('/tmp/namespaces'):\n return tuple(os.listdir('/tmp/namespaces'))\n else:\n return []", "def namespaces(self):\n return self._namespaces", "def test_get_namespaces(self):\n\n status, body = self.api_get_namespaces(self.current_db)\n self.validate_get_list_response(status, body, 'Namespaces')", "def get_namespaces():\n return list(StaticAsset._load_namespaces().keys())", "def getNamespaces(self):\n return _libsbml.SBMLDocument_getNamespaces(self)", "def _fetch_all_namespaces():\n response = _fetch_herd_session() \\\n .get('{}://{}/{}/{}'.format(HERD_REST_PROTOCOL, HERD_BASE_URL,\n HERD_REST_BASE_PATH, 'namespaces')) \\\n .json()\n\n namespaces = []\n for namespaceKey in response['namespaceKeys']:\n namespaces.append(namespaceKey['namespaceCode'])\n\n _print_info('Retrieved {} namespaces.'.format(len(namespaces)))\n return namespaces", "def get_namespaces(adapter):\n return adapter._get_namespaces(adapter._node) # pylint: disable=protected-access", "def test_list_template_for_all_namespaces(self):\n pass", "def get_namespaces(self, label_selector=None):\n return self.core_client.list_namespace(label_selector=label_selector)", "def getNamespaces(self):\n return _libsbml.XMLToken_getNamespaces(self)", "def getNamespaces(self):\n return _libsbml.SBase_getNamespaces(self)", "def GetNamespaces(self):\n return list(self.type_namespaces_map.values())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the tagging work requests in compartment.
def list_tagging_work_requests(self, compartment_id, **kwargs): resource_path = "/taggingWorkRequests" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "page", "limit", "resource_identifier" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_tagging_work_requests got unknown kwargs: {!r}".format(extra_kwargs)) query_params = { "compartmentId": compartment_id, "page": kwargs.get("page", missing), "limit": kwargs.get("limit", missing), "resourceIdentifier": kwargs.get("resource_identifier", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[TaggingWorkRequestSummary]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[TaggingWorkRequestSummary]")
[ "def GetMergeRequests(self):\n # resource=\"merge_requests?state=opened\"\n # GET /projects/:id/merge_requests\n # GET /projects/:id/merge_requests?state=opened\n # GET /merge_requests?labels=bug,reproduced\n # GET /merge_requests?milestone=release\n resource = \"merge_requests\"\n params = {\"state\": \"opened\"}\n r = self._request(resource, params)\n\n self.merge_requests = []\n for req in r.json():\n myMR = MergeReq()\n myMR.ParseRequest(req)\n #myMR.Print()\n self.merge_requests.append(myMR)", "def tags(self, request, tag_list, group):\n return tag_list", "def listTagsByNotebook(self, authenticationToken, notebookGuid):\r\n pass", "def handle_tags(self, request):\n \"\"\"\n @api {get} /tags List tags\n @apiName GetTags\n @apiGroup Misc\n @apiVersion 1.0.0\n\n @apiDescription List currenty used tags\n\n @apiSuccessExample {json} Example response:\n [\n \"tag1\",\n \"tag2\"\n ]\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n tags = []\n\n for task in self.cluster.config.get('tasks').values():\n if 'tags' in task:\n tags += task['tags']\n\n tags = list(set(tags))\n\n return HTTPReply(code = 200, body = json.dumps(tags), headers = headers)", "def listTagsByNotebook(self, authenticationToken, notebookGuid):\r\n self.send_listTagsByNotebook(authenticationToken, notebookGuid)\r\n return self.recv_listTagsByNotebook()", "def tag_list(request, template_name='objectapp/tag_list.html'):\n tag_list = Tag.objects.usage_for_queryset(\n Gbobject.published.all(), counts=True)\n return render_to_response(template_name, {'object_list': tag_list},\n context_instance=RequestContext(request))", "def list(self):\n\n\t\treturn self._list(\"/tag\", \"tag\")", "def listRequests(self):\n reqmgr = RequestManagerImpl()\n retval = []\n for request in reqmgr.listRequests(self.endpoint):\n tmpRequest = Request()\n tmpRequest.setReqmgrUrl( self.endpoint )\n tmpRequest.setWorkflowName( request['request_name'] )\n retval.append( tmpRequest )\n return retval", "def get_job_list(self):\n return self.job_list", "def api_tag_list():\n \n tags_full_list = Tag.list_all()\n return jsonify(tags_full_list)", "def listQueue(self, request):\n pass", "def __gitTagList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), True)", "def job_list(self, state=''):\n jobs = []\n if state != '':\n url = self.endpoints['jobs'] + '?state=' + state\n else:\n url = self.endpoints['jobs']\n jobs = workable_depaginate(url, 'jobs')\n return jobs", "def list_all_tags(self,obs):", "def action_list_tags(self):\n if self.al_tags is None:\n class_simc = self.class_.simc\n spec_simc = self.spec.simc\n al_tags = ACTION_LIST_INFO.get(COMMON, {}).copy()\n al_tags.update(\n ACTION_LIST_INFO.get(class_simc, {}).get(COMMON, {})\n )\n al_tags.update(\n ACTION_LIST_INFO.get(class_simc, {}).get(spec_simc, {})\n )\n self.al_tags = al_tags\n return self.al_tags", "def getTagList(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALBehaviorManager\")\n return self.proxy.getTagList()", "def jobs(self, tags=None, tags_intersect=None):\n return list(self.all_jobs(tags=tags, tags_intersect=tags_intersect))", "def worklist():\n from wheelcms_axle.content import Content\n pending = Content.objects.filter(state=\"pending\", node__isnull=False)\n return pending", "def list_jobs(vc_ep: str):\n env = os.environ\n\n # cde cli should use the configuration from the ~/.cde/credentials\n # with the cdp access key if the CDE_USER is set in the env\n # it prompts to authenticate with it.\n try:\n env.pop(\"CDE_USER\")\n except KeyError:\n pass\n\n cmd = copy(list_cmd)\n cmd.append(\"--vcluster-endpoint\")\n cmd.append(vc_ep)\n print(\" \".join(cmd))\n cde_cli_parser = CdeCliJsonParser(cmd)\n job_names = cde_cli_parser.parse_job_names()\n return job_names" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the `UserGroupMembership` objects in your tenancy. You must specify your tenancy's OCID as the value for the compartment ID (see `Where to Get the Tenancy's OCID and User's OCID`__).
def list_user_group_memberships(self, compartment_id, **kwargs): resource_path = "/userGroupMemberships" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "user_id", "group_id", "page", "limit" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_user_group_memberships got unknown kwargs: {!r}".format(extra_kwargs)) query_params = { "compartmentId": compartment_id, "userId": kwargs.get("user_id", missing), "groupId": kwargs.get("group_id", missing), "page": kwargs.get("page", missing), "limit": kwargs.get("limit", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[UserGroupMembership]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[UserGroupMembership]")
[ "def view_group(request, group_id):\n users = models.UserProfile.all().order('email')\n if group_id:\n group = models.UserGroup.get_by_id(int(group_id))\n if group.users:\n users = models.UserProfile.get(group.users)\n else:\n users = []\n return utility.respond(request, 'admin/view_group', {'users': users})", "def get_queryset(self):\n user = self.request.user\n return user.group_set.all()", "def list_groups(request):\n groups = models.UserGroup.all().order('name')\n return utility.respond(request, 'admin/list_groups', {'groups': groups})", "def list_users(group_id):\n group = db.session.query(Group).filter_by(id=group_id).one_or_none()\n\n # The possible users are those with role permission and not are the members of the group\n conditions = []\n for role in ['Instructor', 'Mentor', 'Teacher', 'TeachingAssistant', 'TA']:\n conditions.append(User.roles.contains(role))\n\n available_users = db.session.query(User).filter(db.or_(*conditions),\n db.not_(User.groups.any(Group.id == group.id))).all()\n\n return render_template('members/members_add.html', group=group, users=available_users)", "def get_memberships(self):\n return UnitMembership.objects.filter(unit=self).select_related(\"user\")", "def list_members(group_id):\n title = \"Member list\"\n group = Group.query.filter_by(id=group_id).one_or_none()\n\n if group is None:\n flash('There is no such group')\n return redirect(request.referrer)\n\n members = group.members\n\n # Check the permission to manage membership\n allow_manage = False\n if group.self_admin and current_user in group.members:\n allow_manage = True\n else:\n ancestors = group.path_to_root().all()\n for ancestor in ancestors[1:]:\n if current_user in ancestor.members:\n allow_manage = True\n break\n\n return render_template('members/member_list.html', title=title, user=current_user,\n group=group, members=members, allow_manage=allow_manage)", "def get_user_groups(user):\n auth_groups = user.groups.all()\n # groups = [group.profile for group in auth_group] # not working\n # todo implement better\n groups = [GroupProfile.objects.filter(group=group)[0] for group in auth_groups if GroupProfile.objects.filter(group=group).count()]\n return groups", "def getUsers(self):\n return [m.user for m in self.membership]", "def all_memberships(request):\n\n memberships = Membership.objects.all()\n\n content = {\n 'memberships': memberships,\n }\n\n return render(request, 'membership/membership.html', content)", "def list(self):\n return self._list(\"/extras/security_groups\", \"security_groups\")", "def get_membersof(self, kwargs):\n group = kwargs[\"group\"]\n verbose = kwargs.get(\"verbose\", False)\n\n results = list(self.engine.query(self.engine.GROUP_DN_FILTER(group), [\"distinguishedName\", \"objectSid\"]))\n if results:\n group_dn = results[0][\"distinguishedName\"]\n else:\n error(\"Group {group} does not exists\".format(group=group))\n\n primary_group_id = results[0][\"objectSid\"].split('-')[-1]\n results = self.engine.query(self.engine.ACCOUNTS_IN_GROUP_FILTER(primary_group_id, group_dn))\n self.display(results, verbose)", "def _get_org_members(self):\n url = f\"{BASE_URL}/orgs/{ORG}/members\"\n return self.fetch_all_pages(url, flatten=True, query_params={\"per_page\": 100})", "def show_group_list(request):\n group_list = request.user.groups.all()\n # construct dictionary for rendering\n display_groups = {}\n for group in group_list:\n # only display the 'real' group\n group_info = group.groupinfo\n if group_info.real_flag:\n display_groups[group_info.id] = group_info.name\n return render(request, \n 'group_info/group_list_page.html',\n {'display_groups': display_groups})", "def showORGusers(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n ORG_ID = kwargs['ORG_ID']\n strCSPProdURL = kwargs['strCSPProdURL']\n jsonResponse = get_csp_users_json(strCSPProdURL, ORG_ID, sessiontoken)\n if jsonResponse == None:\n print(\"API Error\")\n sys.exit(1)\n\n users = jsonResponse['results']\n table = PrettyTable(['First Name', 'Last Name', 'User Name'])\n for i in users:\n table.add_row([i['user']['firstName'],i['user']['lastName'],i['user']['username']])\n print (table.get_string(sortby=\"Last Name\"))", "def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)", "def list_user_groups(self, token):\n requestUser = self.get_username_from_token(token)\n dataBase = self.read_database()\n groups = dataBase['userGroups']\n groupList = list()\n for group in groups:\n members = groups[group]['members']\n owners = groups[group]['owners']\n if requestUser in members or requestUser in owners:\n groupList.append(group)\n return groupList", "def fetch_their_members(our_group):\n\tgroup_id = our_group[\"groupId\"]\n\turl = f'{BASE_URL}/groups/{group_id}/members'\n\tparams = {'$select': 'userPrincipalName,id'}\n\treturn call_api(url, params)", "def get_organization_users(authn, organization_uuid):\n org = Organization.from_uuid(organization_uuid)\n authn_user = User.from_uuid(authn.sub) if authn else None\n if (authn_user and authn_user.uuid in org.reader_uuids()) or org.is_public:\n result = {\n 'users': [user.serializable() for user in org.users],\n }\n return result, 200\n raise PermissionDenied('You do not have permission to see that group.')", "def test_list_collaborations_groups(self):\n group_id = None # Change me!!\n\n r = self.client.list_collaborations_groups(group_id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the work requests in compartment.
def list_work_requests(self, compartment_id, **kwargs): resource_path = "/workRequests" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "page", "limit", "resource_identifier" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_work_requests got unknown kwargs: {!r}".format(extra_kwargs)) query_params = { "compartmentId": compartment_id, "page": kwargs.get("page", missing), "limit": kwargs.get("limit", missing), "resourceIdentifier": kwargs.get("resource_identifier", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[WorkRequestSummary]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[WorkRequestSummary]")
[ "def listRequests(self):\n reqmgr = RequestManagerImpl()\n retval = []\n for request in reqmgr.listRequests(self.endpoint):\n tmpRequest = Request()\n tmpRequest.setReqmgrUrl( self.endpoint )\n tmpRequest.setWorkflowName( request['request_name'] )\n retval.append( tmpRequest )\n return retval", "def worklist():\n from wheelcms_axle.content import Content\n pending = Content.objects.filter(state=\"pending\", node__isnull=False)\n return pending", "def ListSyncJobs(ctx):\n if ctx.element is None:\n ctx.logger.error(\"You must establish at least one connection and specify which you intend to use.\")\n exit()\n\n\n\n ctx.logger.info(\"\")\n try:\n ListSyncJobsResult = ctx.element.list_sync_jobs()\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n\n cli_utils.print_result(ListSyncJobsResult, ctx.logger, as_json=ctx.json, depth=ctx.depth, filter_tree=ctx.filter_tree)", "def list_requesters():\n from mephisto.core.local_database import LocalMephistoDB\n from tabulate import tabulate\n\n db = LocalMephistoDB()\n requesters = db.find_requesters()\n dict_requesters = [r.to_dict() for r in requesters]\n click.echo(tabulate(dict_requesters, headers=\"keys\"))", "def list():\n\treturn _jobs.all()", "def getAvailableRequests(self):\n thisStatus = \"staged\"\n self.logger.info(\"Contacting ReqMgr for workflows in status: %s\", thisStatus)\n tempResults = self.reqMgr2.getRequestByStatus(thisStatus)\n filteredResults = []\n for requests in tempResults:\n for request in viewvalues(requests):\n filteredResults.append(request)\n filteredResults.sort(key=itemgetter('RequestPriority'), reverse=True)\n filteredResults.sort(key=lambda r: r[\"Team\"])\n\n results = [(x[\"Team\"], x[\"RequestName\"], x[\"RequestWorkflow\"]) for x in filteredResults]\n\n return results", "def list_tagging_work_requests(self, compartment_id, **kwargs):\n resource_path = \"/taggingWorkRequests\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"resource_identifier\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_tagging_work_requests got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"resourceIdentifier\": kwargs.get(\"resource_identifier\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[TaggingWorkRequestSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[TaggingWorkRequestSummary]\")", "def queryAllRequests(self):\n logging.info(\"Querying all requests at ReqMgr instance ...\")\n r = self.reqMgrService.getRequestNames()\n print \"Found %s requests:\" % len(r)\n for req in r:\n print req", "def list_resources(self):\n self.workersResponded = 0\n print(\"Main thread\", threading.get_ident())\n\n for addr in self.equipment_model.get_addr_list():\n self.equipment_model.reset_index(addr)\n self.equipment_model.set_connected(addr, 2)\n\n if not self.worker_pool.is_init():\n w = self.worker_pool.create_worker(addr)\n #Signals from worker\n w.signal_connected.connect(self.slot_connected)\n w.signal_not_connected.connect(self.slot_not_connected)\n w.signal_write_success.connect(self.parent.slot_write_success)\n w.signal_query_success.connect(self.parent.slot_query_success)\n w.signal_error.connect(self.parent.slot_error)\n\n self.next_connection(addr)\n \n self.worker_pool.set_init(True)", "def get_components(self, req):\n request_name = req.request\n\n names = []\n if(request_name == \"\"):\n comps = self.rt_proxy.get_available_components() # get all\n else:\n comps = self.rt_proxy.get_available_components(request_name)\n\n for c in comps:\n names.append(str(c))\n\n resp = ListComponentsResponse(names)\n\n return resp", "def get_job_list(self):\n return self.job_list", "def list(self):\n self.background_scheduler.print_jobs()", "def retrieve_all_work_arrangements(self):", "def member_requests_list():\n context = {'user': toolkit.g.get('user') or toolkit.g.get('author')}\n id = toolkit.request.args.get('id', None)\n try:\n member_requests = toolkit.get_action(\n 'member_requests_list')(context, {})\n message = None\n if id:\n message = toolkit._(\"Member request processed successfully\")\n extra_vars = {\n 'member_requests': member_requests, 'message': message}\n return toolkit.render('request/list.html', extra_vars=extra_vars)\n except logic.NotAuthorized:\n toolkit.abort(401, not_auth_message)", "def listQueue(self, request):\n pass", "def list_jobs(self) -> list:\n return self.conn.get_jobs()", "def workloads(self):\n return self._workloads", "def get_active_requests_from_wmstats(self):\n self.logger.info('Will get list of requests which are currently putting data to DBS')\n url = '/wmstatsserver/data/filtered_requests?mask=RequestName'\n request_list = make_cmsweb_request(url).get('result', [])\n request_list = [request['RequestName'] for request in request_list]\n\n self.logger.info('Found %d requests which are currently putting data to DBS' % (len(request_list)))\n return request_list", "def list_jobs(self):\n\n return dict(self._from_json(self.manage.run(override=\"list-jobs\")))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move the compartment to a different parent compartment in the same tenancy. When you move a compartment, all its contents (subcompartments and resources) are moved with it. Note that the `CompartmentId` that you specify in the path is the compartment that you want to move.
def move_compartment(self, compartment_id, move_compartment_details, **kwargs): resource_path = "/compartments/{compartmentId}/actions/moveCompartment" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match", "opc_request_id", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "move_compartment got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "compartmentId": compartment_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing), "opc-request-id": kwargs.get("opc_request_id", missing), "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=move_compartment_details) else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=move_compartment_details)
[ "def test_patch_project_move_child(self):\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, self.category\n )\n self.make_assignment(new_category, self.user, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def moveItem(self, oldParentPath: unicode, newParentPath: unicode, oldItemName: unicode, newItemName: unicode) -> None:\n ...", "def move_branch(self, old_address, new_parent_address):\n old = old_address\n if isinstance(old, str):\n old = old.split(self.get_sep())\n else:\n old = list(old)\n\n new_p = new_parent_address\n if isinstance(new_p, str):\n new_p = new_p.split(self.get_sep())\n else:\n new_p = list(new_p)\n old_n = self.find_node(old)\n new_n = self.find_node(new_p)\n self.remove_branch(old, stop=False)\n self.__rename_nodes(new_n.__address, old_n)\n self.add_actor(new_n.address,old_n, new_n.is_local)", "def move_resources(request, to_move, new_parent, name=None):\n old_parent = to_move.__parent__\n if old_parent is None:\n raise TypeError(\"The object you're trying to move doesn't have a parent.\")\n if name is None:\n name = to_move.__name__\n name = generate_slug(new_parent, name)\n #Notify reference guards of attempted move\n for obj in find_all_db_objects(to_move):\n request.reference_guards.moving(obj.uid)\n del old_parent[to_move.__name__]\n new_parent[name] = to_move", "def move_resource(resource, newparent):\n udcli('moveResource -resource \"%s\" -parent \"%s\"' % (resource, newparent))", "def move_to_node(self,node):\n path=self.get_path(self.current_node,node)\n self.move_to(path)", "def move_node(self, node, parent):\r\n return self._send({'name': 'moveNode', 'args': [node, parent]})", "def remove_compartment(compartment_id):\n logger = logging.getLogger(__name__)\n # Get the compartment then removes it\n compartment = Compartment.objects.get(id=compartment_id)\n try:\n with transaction.atomic():\n compartment.delete()\n except DatabaseError as remove_error:\n logger.error(remove_error)\n pass", "def moveNode(nodeID, newParentID, index=None):", "def moveToParent (self):\n \n p = self\n \n if not p: return p\n \n if p.v._parent and len(p.v._parent.t.vnodeList) == 1:\n p.v = p.v._parent\n elif p.stack:\n p.v = p.stack.pop()\n else:\n p.v = None\n return p", "def move_node(self, node_id, new_parent_id, connection=None):\n\n connection = connection or self.engine.connect()\n\n self.detach_node(node_id=node_id, connection=connection)\n self.attach_node(node_id=node_id, new_parent_id=new_parent_id, connection=connection)", "def move(self, name: str, newparent, newname: str = None, registry=None):\n if newname is None:\n newname = name\n if registry is None:\n registry = get_current_registry()\n ob = self.remove(name, moving=newparent, registry=registry)\n newparent.add(newname, ob, moving=self, registry=registry)\n return ob", "def setCompartment(self, *args):\n return _libsbml.CompartmentReference_setCompartment(self, *args)", "def test_to_same_parent_circular(self):\n host = Host(self.test_root)\n book = Book(host)\n book.meta = {\n 'item1': {},\n 'item2': {},\n 'item3': {},\n 'item3-1': {},\n 'item3-2': {},\n 'item4': {},\n }\n book.toc = {\n 'root': [\n 'item1',\n ],\n 'item1': [\n 'item2',\n ],\n 'item2': [\n 'item3-1',\n 'item3',\n 'item3-2',\n ],\n 'item3': [\n 'item4',\n ],\n 'item4': [\n 'item1',\n ],\n }\n\n self.assertEqual(book.move_item('item2', 1, 'item2', 3), 2)\n self.assertEqual(book.toc, {\n 'root': [\n 'item1',\n ],\n 'item1': [\n 'item2',\n ],\n 'item2': [\n 'item3-1',\n 'item3-2',\n 'item3',\n ],\n 'item3': [\n 'item4',\n ],\n 'item4': [\n 'item1',\n ],\n })", "def move_node(self, node_id, new_parent_id, connection=None):\n raise NotImplementedError", "def reparent(self, nodePath, oldParent, newParent):\n # Does the node path correspond to a DNA Object\n dnaNode = self.findDNANode(nodePath)\n if dnaNode:\n # Find old parent DNA\n oldParentDNANode = self.findDNANode(oldParent)\n # Remove DNA from old parent\n if oldParentDNANode:\n oldParentDNANode.remove(dnaNode)\n if newParent:\n # Update active parent just to be safe\n self.setActiveParent(newParent)\n # Move DNA to new parent (if active parent set)\n if self.DNAParent != None:\n self.DNAParent.add(dnaNode)\n # It is, is it a DNA_NODE (i.e. it has pos/hpr/scale)?\n # Update pose to reflect new relationship\n if DNAIsDerivedFrom(dnaNode, DNA_NODE):\n # Update DNA\n self.updatePose(dnaNode, nodePath)\n elif newParent:\n # See if this node path is a suit edge\n suitEdge, oldVisGroup = self.np2EdgeDict.get(nodePath.id(), (None, None))\n # And see if the new parent is a vis group\n newVisGroupNP, newVisGroupDNA = self.findParentVisGroup(newParent)\n if suitEdge and DNAClassEqual(newVisGroupDNA, DNA_VIS_GROUP):\n # If so, remove suit edge from old vis group and add it to the new group\n oldVisGroup.removeSuitEdge(suitEdge)\n # Update suit edge to reflect new zone ID\n suitEdge.setZoneId(newVisGroupDNA.getName())\n newVisGroupDNA.addSuitEdge(suitEdge)\n # Update np2EdgeDict to reflect changes\n self.np2EdgeDict[nodePath.id()] = [suitEdge, newVisGroupDNA]", "def _swap_with_parent(self) -> bool:\n if self.parent is None:\n return False\n if self.parent.get_chainwork() >= self.get_chainwork():\n return False\n self.print_error(\"swap\", self.forkpoint, self.parent.forkpoint) #Calvin: We should see in the logs when a swap happens\n parent_branch_size = self.parent.height() - self.forkpoint + 1\n forkpoint = self.forkpoint # type: Optional[int]\n parent = self.parent # type: Optional[Blockchain]\n child_old_id = self.get_id()\n parent_old_id = parent.get_id()\n # swap files\n # child takes parent's name\n # parent's new name will be something new (not child's old name) Calvin: This makes sense, otherwise the hash would be invalid\n self.assert_headers_file_available(self.path())\n child_old_name = self.path()\n with open(self.path(), 'rb') as f:\n my_data = f.read()\n self.assert_headers_file_available(parent.path())\n assert forkpoint > parent.forkpoint, (f\"forkpoint of parent chain ({parent.forkpoint}) \"\n f\"should be at lower height than children's ({forkpoint})\")\n with open(parent.path(), 'rb') as f:\n # Calvin: forkpoint - parent.forkpoint is technically the height of this blockchain, why not use height method?\n # Calvin: Answer: There is a main_chain, this uses the blockchain with the greatest chainwork as the main_chain\n f.seek((forkpoint - parent.forkpoint)*HEADER_SIZE) # Calvin: This excludes the forkpoint_hash, why? Technically the forkpoints have the same first header! Saves a few bytes of writing.\n parent_data = f.read(parent_branch_size*HEADER_SIZE)\n self.write(parent_data, 0) # Calvin: writes the parents block data into this (current child)\n parent.write(my_data, (forkpoint - parent.forkpoint)*HEADER_SIZE) # Calvin: writes the child's block data into parents file\n # swap parameters # Calvin: Swaps the childs parents to be the parent's parent and the parent's parent is now the previous child\n self.parent, parent.parent = parent.parent, self # type: Optional[Blockchain], Optional[Blockchain]\n self.forkpoint, parent.forkpoint = parent.forkpoint, self.forkpoint\n self._forkpoint_hash, parent._forkpoint_hash = parent._forkpoint_hash, hash_raw_header(bh2u(parent_data[:HEADER_SIZE])) # Swaps the forkpoint_hash values\n self._prev_hash, parent._prev_hash = parent._prev_hash, self._prev_hash\n # parent's new name\n os.replace(child_old_name, parent.path())\n self.update_size()\n parent.update_size()\n # update pointers\n blockchains.pop(child_old_id, None)\n blockchains.pop(parent_old_id, None)\n blockchains[self.get_id()] = self\n blockchains[parent.get_id()] = parent\n return True", "def move_to(i3: i3ipc.Connection, workspace: int):\n i3.command(f\"move container to workspace number {workspace}\")", "def movein (self):\n new_name = self.base_sync_dir + \"/\" + self.base_name\n\n try:\n shutil.move (self.full_name, new_name)\n except Exception, e:\n print \"Failed to move an object !\", e\n sys.exit (1)\n\n logging.info ('[movein] [%s] %s => %s', self.otype, self.full_name, new_name)\n self.full_name = new_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets the OAuth2 client credentials for the SCIM client associated with this identity provider.
def reset_idp_scim_client(self, identity_provider_id, **kwargs): resource_path = "/identityProviders/{identityProviderId}/actions/resetScimClient" method = "POST" expected_kwargs = ["retry_strategy"] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "reset_idp_scim_client got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "identityProviderId": identity_provider_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json" } retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="ScimClientCredentials") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="ScimClientCredentials")
[ "def ClearCredentials(self):\r\n self._consumer_key = None\r\n self._consumer_secret = None\r\n self._access_token_key = None\r\n self._access_token_secret = None\r\n self._bearer_token = None\r\n self.__auth = None # for request upgrade\r", "def logout(self):\n self._client.clear_credentials()", "def reset(cls):\n cls.__client = None", "def reset_secret(self, save=False):\n client = cas.get_client()\n client.revoke_application_tokens(self.client_id, self.client_secret)\n self.client_secret = generate_client_secret()\n\n if save:\n self.save()\n return True", "def remove_client_credentials(self):\n if self._dry_run:\n return\n os.unlink(self._store_pathname)", "def revoke_oauth_credential(self):\n if self.session.token_type == auth.SERVER_TOKEN_TYPE:\n return\n\n credential = self.session.oauth2credential\n revoke_access_token(credential)", "def erase_credentials(self):\r\n if self.erase_on_connect:\r\n LOGGER.info(\"Erasing stored credential values\")\r\n self.username = None\r\n self.password = None", "def _reset(self):\r\n ClientStream._reset(self)\r\n self.available_auth_methods = None\r\n self.auth_stanza = None\r\n self.registration_callback = None", "def resetCredentials(self, request, response):\n response.unauthorized()", "def reset_credentials(self):\n credentials = {}\n with open(self.credentials_file, 'w') as fh_credentials:\n fh_credentials.write(json.dumps(credentials))", "def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n click.echo(\n 'Success! All OATH credentials have been cleared from your YubiKey.')", "def unauthenticate(self):\n self.client.force_authenticate(user=None)", "def _ClearAuthentication(self):\r\n auth_file = self._AuthFilePath()\r\n if os.path.exists(auth_file):\r\n try:\r\n os.remove(auth_file)\r\n except:\r\n logging.fatal('Could not clear authorization file %s', auth_file, exc_info=True)\r\n raise ScenarioLoginError('Error clearing auth file for client %s.' % self.name)", "def test_single_async_resetAccessToken(self):\n self.try_function(\n 'resetAccessToken',\n 'post',\n argumentNames=['clientId', ],\n )", "def _refresh_api_client_token(self):\n if getattr(self, '_is_refresh_token', None):\n return\n\n new_token = get_gcp_access_token()\n self._existing_config.api_key['authorization'] = new_token", "def test_multi_async_resetAccessToken(self):\n self.try_async_function(\n 'resetAccessToken',\n 'post',\n argumentNames=['clientId', ],\n )", "def refresh_oauth_credential(self):\n if self.session.token_type == auth.SERVER_TOKEN_TYPE:\n return\n\n credential = self.session.oauth2credential\n if credential.is_stale():\n refresh_session = refresh_access_token(credential)\n self.session = refresh_session", "def revoke(\n client_id,\n client_secret,\n client_email=None,\n private_key=None,\n access_token=None,\n refresh_token=None,\n identity=None,\n prefix=None,\n suffix=None,\n):\n\n if client_email and private_key:\n raise ValueError(\"Two-legged OAuth does not use revokable tokens.\")\n\n credentials = oauth.Credentials.find(\n complete=True,\n interactive=False,\n identity=identity,\n client_id=client_id,\n client_secret=client_secret,\n access_token=access_token,\n refresh_token=refresh_token,\n prefix=prefix,\n suffix=suffix,\n )\n\n retval = credentials.revoke()\n return retval", "def revoke(client_id, client_secret,\n client_email=None, private_key=None,\n access_token=None, refresh_token=None,\n identity=None, prefix=None, suffix=None):\n\n if client_email and private_key:\n raise ValueError('Two-legged OAuth does not use revokable tokens.')\n \n credentials = oauth.Credentials.find(\n complete=True,\n interactive=False,\n identity=identity,\n client_id=client_id,\n client_secret=client_secret,\n access_token=access_token,\n refresh_token=refresh_token,\n prefix=prefix,\n suffix=suffix,\n )\n\n retval = credentials.revoke()\n keyring.delete(credentials.identity)\n return retval" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the specified compartment's description or name. You can't update the root compartment.
def update_compartment(self, compartment_id, update_compartment_details, **kwargs): resource_path = "/compartments/{compartmentId}" method = "PUT" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "update_compartment got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "compartmentId": compartment_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=update_compartment_details, response_type="Compartment") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=update_compartment_details, response_type="Compartment")
[ "def update_collaboration(self, name=None,\r\n description=None, config=None):\r\n data_path = \"%s/updateInfo\" % self._basepath\r\n params = {\"f\" : \"json\"}\r\n if name:\r\n params['name'] = name\r\n if description:\r\n params['description'] = description\r\n if config:\r\n params['config'] = config\r\n con = self._portal.con\r\n return con.post(path=data_path, postdata=params)", "def update_object(self, name: str) -> None:", "def update(args, config):\n print('Updates an HPC fleet with name \"{}\"'.format(args.fleet_name))", "def updateDescription(self, descr):\n self.description = descr", "def update_catalog(self, old_catalog_name, new_catalog_name, description):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n org = self.resource\n links = get_links(\n org, rel=RelationType.DOWN, media_type=EntityType.CATALOG.value)\n for link in links:\n if old_catalog_name == link.name:\n catalog = self.client.get_resource(link.href)\n href = catalog.get('href')\n admin_href = href.replace('/api/catalog/',\n '/api/admin/catalog/')\n admin_view_of_catalog = self.client.get_resource(admin_href)\n if new_catalog_name is not None:\n admin_view_of_catalog.set('name', new_catalog_name)\n if description is not None:\n admin_view_of_catalog['Description'] = E.Description(\n description)\n return self.client.put_resource(\n admin_href,\n admin_view_of_catalog,\n media_type=EntityType.ADMIN_CATALOG.value)\n raise Exception('Catalog not found.')", "def add_compartment(self, compartment):\n self.compartments[compartment.id] = compartment", "def update(self, job_name, param_name, value, description=None):\n if job_name in self._jobs:\n getattr(self._jobs[job_name], param_name).update(value, description)\n else:\n self.log.error(\"Invalid job name: %s\", job_name)", "def update_description_by_name(item_name, new_description):\n print('-------------------- Item - update_description_by_name')\n item = Item.query.filter_by(name=item_name).first()\n if item:\n item.set_name(new_description)\n item.set_last_update()\n db.session.merge()\n db.session.commit()\n return True\n return False", "def call_update_component(self, env_name, component_name, image_tag):\n env = self.get_environment(env_name)\n comp = env.get_component(component_name)\n comp.set_image_tag(image_tag)\n self.call_recreate_component(env_name, component_name)", "def updateNameAndDescription(self, name, desc):\n self.magneticfield.name = name\n self.magneticfield.description = desc\n\n self.magneticfield.writeFile()", "def album_update_description(self, index, description):\n\n self.albums[index]['description'] = description\n self.album_save(self.albums[index])", "def update_by_name(name):\n pass", "def request_description_update():\n global should_update_description\n should_update_description = True", "def update(self, name, new_name):\n _list = self._repo.get_list()\n for idx, discipline in enumerate(_list):\n if discipline.name == name:\n discipline.name = new_name\n break\n return name, new_name", "def test_update_context_path_comp_service_end_point_name_name_by_id(self):\n name = NameAndValue()\n response = self.client.open(\n '/restconf/config/context/path-comp-service/{uuid}/end-point/{local_id}/name/{value_name}/'.format(uuid='uuid_example', local_id='local_id_example', value_name='value_name_example'),\n method='PUT',\n data=json.dumps(name),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "async def updateOfferByName(self, name=None, body=\"\"):\n payload = {}\n \n if name:\n payload[\"name\"] = name\n \n\n # Parameter validation\n schema = RewardsValidator.updateOfferByName()\n schema.dump(schema.load(payload))\n \n # Body validation\n from .models import Offer\n schema = Offer()\n schema.dump(schema.load(body))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/rewards/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/offers/{name}/\", \"\"\"{\"required\":[{\"name\":\"name\",\"in\":\"path\",\"description\":\"The name given to the offer.\",\"required\":true,\"schema\":{\"type\":\"string\"}},{\"name\":\"company_id\",\"in\":\"path\",\"description\":\"company id\",\"required\":true,\"schema\":{\"type\":\"string\"}},{\"name\":\"application_id\",\"in\":\"path\",\"description\":\"application id\",\"required\":true,\"schema\":{\"type\":\"string\"}}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"name\":\"name\",\"in\":\"path\",\"description\":\"The name given to the offer.\",\"required\":true,\"schema\":{\"type\":\"string\"}},{\"name\":\"company_id\",\"in\":\"path\",\"description\":\"company id\",\"required\":true,\"schema\":{\"type\":\"string\"}},{\"name\":\"application_id\",\"in\":\"path\",\"description\":\"application id\",\"required\":true,\"schema\":{\"type\":\"string\"}}]}\"\"\", name=name, )\n query_string = await create_query_string(name=name, )\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"PUT\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"put\", await create_url_without_domain(f\"/service/platform/rewards/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/offers/{name}/\", name=name, ), query_string, headers, body, exclude_headers=exclude_headers), data=body)", "def compartment_id(self, compartment_id):\n self._compartment_id = compartment_id", "def update_address_book(AddressBookArn=None, Name=None, Description=None):\n pass", "def SetDescription(self, name, description):\n callResult = self._Call(\"SetDescription\", name, description)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the specified dynamic group.
def update_dynamic_group(self, dynamic_group_id, update_dynamic_group_details, **kwargs): resource_path = "/dynamicGroups/{dynamicGroupId}" method = "PUT" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "update_dynamic_group got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "dynamicGroupId": dynamic_group_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=update_dynamic_group_details, response_type="DynamicGroup") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=update_dynamic_group_details, response_type="DynamicGroup")
[ "def update_group(self, group_name):\n self._runtime_error_if_called_during_showtime('update_group')\n self._current_update_group = group_name", "def update_group(self, group, **attrs):\n return self._update(_group.Group, group, prepend_key=False, **attrs)", "def update_group():\n _id = request.form['_id']\n name = request.form['name']\n data, code, message = FIELD_SERVICE.update_group(_id, name)\n return __result(data, code, message)", "async def update_group(\n payload: GroupIn,\n group_id: int = Path(..., gt=0),\n _=Security(get_current_access, scopes=[AccessType.admin])\n):\n return await crud.update_entry(groups, payload, group_id)", "def update_dynamic_thing_group(thingGroupName=None, thingGroupProperties=None, expectedVersion=None, indexName=None, queryString=None, queryVersion=None):\n pass", "def group_update(*, login_manager: LoginManager, group_id: str, **kwargs: Any):\n groups_client = login_manager.get_groups_client()\n\n # get the current state of the group\n group = groups_client.get_group(group_id)\n\n # assemble put data using existing values for any field not given\n # note that the API does not accept the full group document, so we must\n # specify name and description instead of just iterating kwargs\n data = {}\n for field in [\"name\", \"description\"]:\n if kwargs.get(field) is not None:\n data[field] = kwargs[field]\n else:\n data[field] = group[field]\n\n response = groups_client.update_group(group_id, data)\n\n formatted_print(response, simple_text=\"Group updated successfully\")", "def update_group(self, group_name, new_group_name=None, new_path=None):\r\n params = {'GroupName' : group_name}\r\n if new_group_name:\r\n params['NewGroupName'] = new_group_name\r\n if new_path:\r\n params['NewPath'] = new_path\r\n return self.get_response('UpdateGroup', params)", "def request_group_update():\n target_group = Group.query.filter_by(id=request.args['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n return Response(\n render_template(\n 'admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/update\",\n id=target_group.id,\n name=target_group.name,\n meter=target_group.group_meter_id,\n group_production_meter_id_first=target_group.group_production_meter_id_first,\n group_production_meter_id_second=target_group.group_production_meter_id_second),\n mimetype='text/html')", "def update(self):\r\n return self.connection._update_group('UpdateAutoScalingGroup', self)", "def update(self, group_key, bindings):\n if self._variable in bindings:\n if group_key not in self._groups:\n self._groups[group_key] = 0\n self._groups[group_key] += 1", "def update_gateway_group(GatewayGroupArn=None, Name=None, Description=None):\n pass", "def test_modify_group(self):\n response = self.client.modify_group(\"ABC123\")\n self.assertEqual(response[\"method\"], \"POST\")\n self.assertEqual(response[\"uri\"], \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(response[\"body\"]), {\"account_id\": [self.client.account_id]})", "def update_group(self, group_id, new_description):\n url = self.groups_url + \"/\" + group_id\n new_data = json.dumps({\"description\": new_description})\n\n return requests.put(url, new_data, headers=self.headers)", "def test_update_group(self):\n # Create group\n group_name = data_utils.rand_name('group')\n description = data_utils.rand_name('Description')\n group = self.client.create_group(\n name=group_name, group_type='exclusivity',\n description=description)\n\n self.addCleanup(self.client.delete_group, group['id'])\n\n group_id = group.get('id')\n\n new_desc = data_utils.rand_name('UpdateDescription')\n updated_group = self.client.update_group(\n group_id, new_desc)\n\n self.assertEqual(updated_group['description'], new_desc)", "def modify(self, key: int, data: Dict[str, Any]) -> APIResponse:\n return self._put(\"detail\", {\"group_pk\": key}, data)", "def test_update_group(self):\n pass", "async def update_contact_group(dbcon: DBConnection, contact_group_id: int, data: Dict[str, str]) -> None:\n\n async def _run(cur: Cursor) -> None:\n for key, value in data.items():\n if key not in ['name', 'active']:\n raise errors.IrisettError('invalid contact key %s' % key)\n q = \"\"\"update contact_groups set %s=%%s where id=%%s\"\"\" % key\n q_args = (value, contact_group_id)\n await cur.execute(q, q_args)\n\n if not await contact_group_exists(dbcon, contact_group_id):\n raise errors.InvalidArguments('contact group does not exist')\n await dbcon.transact(_run)", "def update_thing_group(thingGroupName=None, thingGroupProperties=None, expectedVersion=None):\n pass", "def test_update_user_group(self):\n query_string = [('name', 'marketing team'),\n ('description', 'The marketing team.')]\n response = self.client.open(\n '/api/v1/user_groups/{user_group_id}'.format(user_group_id=1),\n method='PATCH',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the specified identity provider.
def update_identity_provider(self, identity_provider_id, update_identity_provider_details, **kwargs): resource_path = "/identityProviders/{identityProviderId}" method = "PUT" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "update_identity_provider got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "identityProviderId": identity_provider_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=update_identity_provider_details, response_type="IdentityProvider") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=update_identity_provider_details, response_type="IdentityProvider")
[ "def update_resource_provider(context, provider_id, values):\n return _get_dbdriver_instance().update_resource_provider(\n context, provider_id, values)", "def update_identity_provider(module, sdk, cloud, idp):\n\n description = module.params.get('description')\n enabled = module.params.get('enabled')\n domain_id = module.params.get('domain_id')\n remote_ids = module.params.get('remote_ids')\n\n attributes = {}\n\n if (description is not None) and (description != idp.description):\n attributes['description'] = description\n if (enabled is not None) and (enabled != idp.is_enabled):\n attributes['enabled'] = enabled\n if (domain_id is not None) and (domain_id != idp.domain_id):\n attributes['domain_id'] = domain_id\n if (remote_ids is not None) and (remote_ids != idp.remote_ids):\n attributes['remote_ids'] = remote_ids\n\n if not attributes:\n return False, idp\n\n if module.check_mode:\n return True, None\n\n try:\n new_idp = cloud.identity.update_identity_provider(idp, **attributes)\n except sdk.exceptions.OpenStackCloudException as ex:\n module.fail_json(msg='Failed to update identity provider: {0}'.format(str(ex)))\n return (True, new_idp)", "def update_provider(\n provider_id:UUID = Form(...),\n name:str = Form(...),\n qualification:str = Form(...),\n speciality:str = Form(...),\n phone:str = Form(...),\n department:Optional[str] = Form(\"N/A\"),\n organization:str = Form(...),\n location:Optional[str] = Form(\"N/A\"),\n address:str = Form(...),\n active:bool = Form(...)\n ):\n\n post_data = {\n \"name\": name,\n \"qualification\": qualification,\n \"speciality\": speciality,\n \"phone\": phone,\n \"department\": department,\n \"organization\": organization,\n \"location\": location,\n \"address\": address,\n \"active\": active\n }\n provider_data = open_for_reading()\n provider_data[str(provider_id)] = post_data\n open_for_writing(data=provider_data)\n return {\"msg\": \"updated\"}", "def update(self,\n provider_id,\n l3_vpn_context,\n ):\n return self._invoke('update',\n {\n 'provider_id': provider_id,\n 'l3_vpn_context': l3_vpn_context,\n })", "def update_conference_provider(ConferenceProviderArn=None, ConferenceProviderType=None, IPDialIn=None, PSTNDialIn=None, MeetingSetting=None):\n pass", "def update_identity_metadata(self,\n requestor_id,\n identity_id,\n metadata,\n version):\n response = requests.put(\n url=\"{base_url}{resource}/{identity_id}\".format(\n base_url=self.DELTA_URL,\n resource=self.RESOURCE_IDENTITIES,\n identity_id=identity_id),\n headers={\n \"if-match\": str(version)\n },\n json=dict(metadata=metadata),\n auth=self.signer(requestor_id))\n response.raise_for_status()", "def update_oidc_provider_config(\n self, provider_id, client_id=None, issuer=None, display_name=None,\n enabled=None, client_secret=None, id_token_response_type=None,\n code_response_type=None):\n _validate_oidc_provider_id(provider_id)\n req = {}\n if display_name is not None:\n if display_name == _user_mgt.DELETE_ATTRIBUTE:\n req['displayName'] = None\n else:\n req['displayName'] = _auth_utils.validate_string(display_name, 'display_name')\n if enabled is not None:\n req['enabled'] = _auth_utils.validate_boolean(enabled, 'enabled')\n if client_id:\n req['clientId'] = _validate_non_empty_string(client_id, 'client_id')\n if issuer:\n req['issuer'] = _validate_url(issuer, 'issuer')\n\n response_type = {}\n if id_token_response_type is False and code_response_type is False:\n raise ValueError('At least one response type must be returned.')\n if id_token_response_type is not None:\n response_type['idToken'] = _auth_utils.validate_boolean(\n id_token_response_type, 'id_token_response_type')\n if code_response_type is not None:\n response_type['code'] = _auth_utils.validate_boolean(\n code_response_type, 'code_response_type')\n if code_response_type:\n req['clientSecret'] = _validate_non_empty_string(client_secret, 'client_secret')\n if response_type:\n req['responseType'] = response_type\n\n if not req:\n raise ValueError('At least one parameter must be specified for update.')\n\n update_mask = _auth_utils.build_update_mask(req)\n params = 'updateMask={0}'.format(','.join(update_mask))\n url = '/oauthIdpConfigs/{0}'.format(provider_id)\n body = self._make_request('patch', url, json=req, params=params)\n return OIDCProviderConfig(body)", "def update(self,\n provider_id,\n provider_deployment_map_id,\n provider_deployment_map,\n ):\n return self._invoke('update',\n {\n 'provider_id': provider_id,\n 'provider_deployment_map_id': provider_deployment_map_id,\n 'provider_deployment_map': provider_deployment_map,\n })", "def update(self,identity,params=None, headers=None):\n path = self._sub_url_params('/mandates/:identity', {\n \n 'identity': identity,\n })\n \n if params is not None:\n params = {self._envelope_key(): params}\n\n response = self._perform_request('PUT', path, params, headers,\n retry_failures=True)\n return self._resource_for(response)", "def provider_id(self, provider_id):\n self._provider_id = provider_id", "def update(providers):\n if not providers:\n providers = settings.JELLYROLL_PROVIDERS\n else:\n providers = set(providers).intersection(set(settings.JELLYROLL_PROVIDERS))\n\n for provider in providers:\n log.debug(\"Updating from provider %r\", provider)\n try:\n mod = __import__(provider, '', '', [''])\n except ImportError, e:\n log.error(\"Couldn't import %r: %s\" % (provider, e))\n continue\n\n if not mod.enabled():\n log.info(\"Skipping %r: enabled() returned False\", provider)\n continue\n\n log.info(\"Running '%s.update()'\", provider)\n try:\n mod.update()\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception, e:\n log.error(\"Failed during '%s.update()'\", provider)\n log.exception(e)\n continue\n\n log.info(\"Done with provider %r\", provider)", "def provider_id(self, provider_id):\n\n self._provider_id = provider_id", "def update(self,identity,params=None, headers=None):\n path = self._sub_url_params('/payouts/:identity', {\n \n 'identity': identity,\n })\n \n if params is not None:\n params = {self._envelope_key(): params}\n\n response = self._perform_request('PUT', path, params, headers,\n retry_failures=True)\n return self._resource_for(response)", "def provider(self, provider):\n\n self._provider = provider", "def update_saml_provider_config(\n self, provider_id, idp_entity_id=None, sso_url=None, x509_certificates=None,\n rp_entity_id=None, callback_url=None, display_name=None, enabled=None):\n _validate_saml_provider_id(provider_id)\n idp_config = {}\n if idp_entity_id is not None:\n idp_config['idpEntityId'] = _validate_non_empty_string(idp_entity_id, 'idp_entity_id')\n if sso_url is not None:\n idp_config['ssoUrl'] = _validate_url(sso_url, 'sso_url')\n if x509_certificates is not None:\n idp_config['idpCertificates'] = _validate_x509_certificates(x509_certificates)\n\n sp_config = {}\n if rp_entity_id is not None:\n sp_config['spEntityId'] = _validate_non_empty_string(rp_entity_id, 'rp_entity_id')\n if callback_url is not None:\n sp_config['callbackUri'] = _validate_url(callback_url, 'callback_url')\n\n req = {}\n if display_name is not None:\n if display_name == _user_mgt.DELETE_ATTRIBUTE:\n req['displayName'] = None\n else:\n req['displayName'] = _auth_utils.validate_string(display_name, 'display_name')\n if enabled is not None:\n req['enabled'] = _auth_utils.validate_boolean(enabled, 'enabled')\n if idp_config:\n req['idpConfig'] = idp_config\n if sp_config:\n req['spConfig'] = sp_config\n\n if not req:\n raise ValueError('At least one parameter must be specified for update.')\n\n update_mask = _auth_utils.build_update_mask(req)\n params = 'updateMask={0}'.format(','.join(update_mask))\n url = '/inboundSamlConfigs/{0}'.format(provider_id)\n body = self._make_request('patch', url, json=req, params=params)\n return SAMLProviderConfig(body)", "def saml_provider(self, saml_provider):\n\n self._saml_provider = saml_provider", "def _update_provider_context(networks):\n with setup_flask_app().app_context():\n sm = get_storage_manager()\n ctx = sm.get(models.ProviderContext, PROVIDER_CONTEXT_ID)\n old_networks = ctx.context['cloudify']['cloudify_agent']['networks']\n old_networks.update(networks)\n flag_modified(ctx, 'context')\n sm.update(ctx)", "def add_or_update_provider(self, provider_name, provider_type, endpoints, zone, provider_region,\n validate_provider_auth = True, initiate_refresh = True):\n zone_id = self.find_zone_by_name(zone or 'default')\n # check if provider with the same name already exists\n provider_id = self.find_provider_by_name(provider_name)\n if provider_id: # provider exists\n existing_config = self.get_provider_config(provider_id)\n\n # ManageIQ Euwe / CFME 5.7 API and older versions don't support certificate authority field in endpoint.\n # If it wasn't returned from existing provider configuration this means it is either unsupported or null,\n # in both cases we can remove null/empty certificate_authority from endpoints we want to update.\n self.filter_unsupported_fields_from_config(endpoints, existing_config['endpoints'], {'certificate_authority'})\n\n updates = self.required_updates(provider_id, endpoints, zone_id, provider_region, existing_config)\n\n if not updates:\n return dict(changed=self.changed,\n msg=\"Provider %s already exists\" % provider_name)\n\n old_validation_details = self.auths_validation_details(provider_id)\n operation = \"update\"\n self.update_provider(provider_id, provider_name, endpoints, zone_id, provider_region)\n roles_with_changes = set(updates[\"Added\"]) | set(updates[\"Updated\"])\n else: # provider doesn't exists, adding it to manageiq\n\n # ManageIQ Euwe / CFME 5.7 API and older versions don't support certificate authority field in endpoint.\n # filter empty fields if none on creation - No existing endpoints for new provider\n self.filter_unsupported_fields_from_config(endpoints, [{}], {'certificate_authority'})\n updates = None\n old_validation_details = {}\n operation = \"addition\"\n provider_id = self.add_new_provider(provider_name, provider_type,\n endpoints, zone_id, provider_region)\n roles_with_changes = [e['endpoint']['role'] for e in endpoints]\n\n if validate_provider_auth:\n authtypes_to_verify = []\n for e in endpoints:\n if e['endpoint']['role'] in roles_with_changes:\n authtypes_to_verify.append(e['authentication']['authtype'])\n result, details = self.verify_authenticaion_validation(provider_id, old_validation_details, authtypes_to_verify)\n else:\n result = \"Skipped Validation\"\n details = result\n\n if result == \"Invalid\":\n self.module.fail_json(msg=\"Failed to Validate provider authentication after {operation}. details: {details}\".format(operation=operation, details=details))\n elif result == \"Valid\" or result == \"Skipped Validation\":\n if initiate_refresh:\n self.refresh_provider(provider_id)\n message = \"Successful {operation} of {provider} provider. Authentication: {validation}. Refreshing provider inventory\".format(operation=operation, provider=provider_name, validation=details)\n else:\n message = \"Successful {operation} of {provider} provider. Authentication: {validation}.\".format(operation=operation, provider=provider_name, validation=details)\n elif result == \"Timed out\":\n message = \"Provider {provider} validation after {operation} timed out. Authentication: {validation}\".format(operation=operation, provider=provider_name, validation=details)\n return dict(\n provider_id=provider_id,\n changed=self.changed,\n msg=message,\n updates=updates\n )", "def update(identity_url, subject, content):\n return Client.get_client().update(identity_url, subject, content)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the specified network source.
def update_network_source(self, network_source_id, update_network_source_details, **kwargs): resource_path = "/networkSources/{networkSourceId}" method = "PUT" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "update_network_source got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "networkSourceId": network_source_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=update_network_source_details, response_type="NetworkSources") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=update_network_source_details, response_type="NetworkSources")
[ "def update_dataset_source(self, source=DATASET_SOURCES[0]):\n self.datum.source = source", "def update_state(self,\n source_id: str,\n serial_num: int,\n state: int,\n last_received: datetime.datetime\n ):\n self._sources[source_id] = Source(\n id_=source_id,\n serial_num=serial_num,\n state=state,\n last_received=last_received,\n )", "def update_sources(self, *args, **kwargs):\n tasks.update_sources()\n return Response({})", "def SetSource(self, source):\r\n self._default_params['source'] = source", "def set_source(self, source):\n self.data['source'] = source", "def _update_sources(self, sources):\n for source in sources:\n source.update()", "def update_sources() -> None:", "def set_source(self, source):\n self.set('config', 'source', source)", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def _update_network(self, network_id, name, timestamp):\n _, state = self._create_network_nodes(name)\n self.graph_db.update_node(network_id, timestamp, state)", "def update(self, src, labels): # real signature unknown; restored from __doc__\n pass", "def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):\n return self.network_set.update(body, uri, api, headers)", "def update_feed_source(request):\n try:\n feed = FeedSource.objects.get(id=request.id)\n feed.status = not feed.status\n feed.save()\n except (ValidationError, FeedSource.DoesNotExist) as e:\n exc = e\n logger(__name__, \"Could not update Feed Source due to {}\".format(str(exc)))\n errors = _get_errors(exc)\n return feeds_pb2.OperationStatus(\n op_status=feeds_pb2.Status.Value('FAILURE'),\n details={'errors': feeds_pb2.RepeatedString(data=errors)},\n )\n return feeds_pb2.OperationStatus(\n op_status=feeds_pb2.Status.Value('SUCCESS'),\n )", "def update_network(self, context, net_id, network):\n LOG.debug(_(\"NeutronRestProxyV2.update_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n session = context.session\n with session.begin(subtransactions=True):\n new_net = super(NeutronRestProxyV2, self).update_network(\n context, net_id, network)\n self._process_l3_update(context, new_net, network['network'])\n\n # update network on network controller\n self._send_update_network(new_net, context)\n return new_net", "def update_data_source(DataSourceId=None, DataSourceName=None):\n pass", "def update(src):", "def update_source(self):\n if self.verbose:\n print(\"Updating source\")\n self.source.data = self.source_data\n if self.source.selected is not None:\n self.source.selected.indices = self.selection\n for c in self.callbacks[\"update_source\"]:\n c()\n self.pending_update = False\n if self.update_buffer is not None:\n self.context.doc.add_next_tick_callback(self.update_buffer)\n self.update_buffer = None", "def update_source(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.update_source_with_http_info(id, **kwargs)\n else:\n (data) = self.update_source_with_http_info(id, **kwargs)\n return data", "def _send_update_network_request(self, db_session, network):\n LOG.debug(_('_send_update_network_request: %s'), network['id'])\n profile = n1kv_db_v2.get_network_profile(\n db_session, network[n1kv_profile.PROFILE_ID])\n body = {'name': network['name'],\n 'id': network['id'],\n 'networkDefinition': profile['name'],\n 'vlan': network[providernet.SEGMENTATION_ID]}\n n1kvclient = n1kv_client.Client()\n n1kvclient.update_network_segment(network['name'], body)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the specified tag default. If you specify that a value is required, a value is set during resource creation (either by the user creating the resource or another tag defualt). If no value is set, resource creation is blocked. If the `isRequired` flag is set to \"true\", the value is set during resource creation. If the `isRequired` flag is set to \"false\", the value you enter is set during resource creation.
def update_tag_default(self, tag_default_id, update_tag_default_details, **kwargs): resource_path = "/tagDefaults/{tagDefaultId}" method = "PUT" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match", "opc_request_id" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "update_tag_default got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "tagDefaultId": tag_default_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing), "opc-request-id": kwargs.get("opc_request_id", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=update_tag_default_details, response_type="TagDefault") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=update_tag_default_details, response_type="TagDefault")
[ "def _update_default(self, default_value):\n name = \"\" if not self.name else f\"{self.name!r} \"\n msg_prefix = f\"Default value of Input {name}\"\n if not self._is_primitive_type and default_value is not None:\n msg = f\"{msg_prefix}cannot be set: Non-primitive type Input has no default value.\"\n raise UserErrorException(msg)\n if isinstance(default_value, float) and not math.isfinite(default_value):\n # Since nan/inf cannot be stored in the backend, just ignore them.\n # logger.warning(\"Float default value %r is not allowed, ignored.\" % default_value)\n return\n # pylint: disable=pointless-string-statement\n \"\"\"Update provided default values.\n Here we need to make sure the type of default value is allowed or it could be parsed..\n \"\"\"\n if default_value is not None:\n if type(default_value) not in IOConstants.PRIMITIVE_TYPE_2_STR:\n msg = (\n f\"{msg_prefix}cannot be set: type must be one of \"\n f\"{list(IOConstants.PRIMITIVE_TYPE_2_STR.values())}, got '{type(default_value)}'.\"\n )\n raise UserErrorException(msg)\n\n if not isinstance(default_value, self._allowed_types):\n try:\n default_value = self._parse(default_value)\n # return original validation exception which is custom defined if raised by self._parse\n except ValidationException as e:\n raise e\n except Exception as e:\n msg = f\"{msg_prefix}cannot be parsed, got '{default_value}', type = {type(default_value)!r}.\"\n raise UserErrorException(msg) from e\n self.default = default_value", "def set_default_value(structure_items, structure_item_name, default_value):\n si = structure_items.get(structure_item_name, None)\n if si and si.content:\n si.content = default_value\n si.scores = [get_pay_money_probability(structure_items)]", "def SetDefaultVersion(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def _update_annotation_with_default(anno, name, default):\n # Create instance if is type class\n complete_annotation = anno\n if _is_dsl_type_cls(anno):\n complete_annotation = anno()\n complete_annotation.name = name\n if default is Input._EMPTY:\n return complete_annotation\n if isinstance(complete_annotation, Input):\n # Non-parameter Input has no default attribute\n if complete_annotation._is_primitive_type and complete_annotation.default is not None:\n # logger.warning(\n # f\"Warning: Default value of f{complete_annotation.name!r} is set twice: \"\n # f\"{complete_annotation.default!r} and {default!r}, will use {default!r}\"\n # )\n pass\n complete_annotation._update_default(default)\n return complete_annotation", "def delete_tag_default(self, tag_default_id, **kwargs):\n resource_path = \"/tagDefaults/{tagDefaultId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_request_id\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_tag_default got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagDefaultId\": tag_default_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-request-id\": kwargs.get(\"opc_request_id\", missing),\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)", "def _update_annotation_with_default(anno, name, default):\n # Create instance if is type class\n complete_annotation = anno\n if _is_dsl_type_cls(anno):\n complete_annotation = anno()\n complete_annotation.name = name\n if default is Input._EMPTY:\n return complete_annotation\n if isinstance(complete_annotation, Input):\n # Non-parameter Input has no default attribute\n if complete_annotation._is_parameter_type and complete_annotation.default is not None:\n # logger.warning(\n # f\"Warning: Default value of f{complete_annotation.name!r} is set twice: \"\n # f\"{complete_annotation.default!r} and {default!r}, will use {default!r}\"\n # )\n pass\n complete_annotation._update_default(default)\n return complete_annotation", "def create_tag_default(self, create_tag_default_details, **kwargs):\n resource_path = \"/tagDefaults\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\",\n \"opc_request_id\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_tag_default got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing),\n \"opc-request-id\": kwargs.get(\"opc_request_id\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_tag_default_details,\n response_type=\"TagDefault\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_tag_default_details,\n response_type=\"TagDefault\")", "def default_value(self, value: Any) -> None:\n self.sdc_resource.set_input_default_value(self, value)\n self._default_value = value", "def default_tags(self, default_tags):\n\n self._default_tags = default_tags", "def SetDefault(self, parser, default):\n flag = self.__GetFlag(parser)\n if flag:\n kwargs = {flag.dest: default}\n parser.set_defaults(**kwargs)", "def default(self, default):\n self._default = default", "def _update_annotation_with_default(\n anno: Union[Annotation, Input, Output], name: str, default: Any\n ) -> Union[Annotation, Input, Output]:\n # Create instance if is type class\n complete_annotation = anno\n if _is_dsl_type_cls(anno):\n complete_annotation = anno()\n complete_annotation._port_name = name\n if default is Input._EMPTY:\n return complete_annotation\n if isinstance(complete_annotation, Input):\n # Non-parameter Input has no default attribute\n if complete_annotation._is_primitive_type and complete_annotation.default is not None:\n # logger.warning(\n # f\"Warning: Default value of f{complete_annotation.name!r} is set twice: \"\n # f\"{complete_annotation.default!r} and {default!r}, will use {default!r}\"\n # )\n pass\n complete_annotation._update_default(default)\n if isinstance(complete_annotation, Output) and default is not None:\n msg = (\n f\"Default value of Output {complete_annotation._port_name!r} cannot be set:\"\n f\"Output has no default value.\"\n )\n raise UserErrorException(msg)\n return complete_annotation", "def default(self, default):\n\n self._default = default", "def validate_default(self, value):\n return self.__validate(value, self.validate_default_element)", "def default_value(self, default_value):\n \n self._default_value = default_value", "def default_value(self, default_value):\n self._default_value = default_value", "def with_default(self, default):\n self.default = default\n return self", "def setdefault_attribute(self, name, default=None):\n self.attrs.setdefault(name, default)\n return self", "def set_boolean_default_value(self, value=None):\n raise NotImplementedError(\n 'operation set_boolean_default_value(...) not yet implemented')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the capabilities of the specified user.
def update_user_capabilities(self, user_id, update_user_capabilities_details, **kwargs): resource_path = "/users/{userId}/capabilities" method = "PUT" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "if_match" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "update_user_capabilities got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "if-match": kwargs.get("if_match", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=update_user_capabilities_details, response_type="User") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=update_user_capabilities_details, response_type="User")
[ "def update_service_capabilities(self, capabilities):\n self.last_capabilities = capabilities", "def UpdateCapability(self, session, name, **kwargs):\n CController._capabilityModel.Update(name, **kwargs)\n return True, True", "def update_user(AccountId=None, UserId=None, LicenseType=None):\n pass", "def getCapabilities4User(session_key, user=None):\n\n roles = []\n capabilities = []\n\n # Get user info\n if user is not None:\n logger.debug('Retrieving role(s) for current user: %s', user)\n userEntities = entity.getEntities('authentication/users/%s' % user, count=-1, sessionKey=session_key)\n\n for stanza, settings in userEntities.items():\n if stanza == user:\n for key, val in settings.items():\n if key == 'roles':\n logger.debug('Successfully retrieved role(s) for user: %s', user)\n roles = val\n\n # Get capabilities\n for role in roles:\n logger.debug('Retrieving capabilities for current user: %s', user)\n roleEntities = entity.getEntities('authorization/roles/%s' % role, count=-1, sessionKey=session_key)\n\n for stanza, settings in roleEntities.items():\n if stanza == role:\n for key, val in settings.items():\n if key == 'capabilities' or key == 'imported_capabilities':\n logger.debug('Successfully retrieved %s for user: %s', key, user)\n capabilities.extend(val)\n\n return capabilities", "def update_user_entitlement(self, document, user_id):\n route_values = {}\n if user_id is not None:\n route_values['userId'] = self._serialize.url('user_id', user_id, 'str')\n content = self._serialize.body(document, '[JsonPatchOperation]')\n response = self._send(http_method='PATCH',\n location_id='8480c6eb-ce60-47e9-88df-eca3c801638b',\n version='6.0-preview.3',\n route_values=route_values,\n content=content,\n media_type='application/json-patch+json')\n return self._deserialize('UserEntitlementsPatchResponse', response)", "def capabilities(self, capabilities):\n \n self._capabilities = capabilities", "def set_capabilities(self, capabilities: WlSeat.capability) -> None:\n lib.wlr_seat_set_capabilities(self._ptr, capabilities)", "def capabilities(self, capabilities):\n\n self._capabilities = capabilities", "def update_caps(self, caps, source):\n return ObjectCapabilities.update_capabilities(self, caps, source)", "def update_user(self, service, user, generation=None, client_state=None):", "def update_user(self, data):\n self.login()\n patch_url = \"/sobjects/User/%s\" % globals()[self.username][\"user_id\"]\n result = self.patch(patch_url, data)\n self.result = result", "def user_capacity(self, user_capacity: SmartSsdUserCapacity):\n\n self._user_capacity = user_capacity", "def update_capabilities(self):\n LOG.debug((\"Store %s doesn't support updating dynamic \"\n \"storage capabilities. Please overwrite \"\n \"'update_capabilities' method of the store to \"\n \"implement updating logics if needed.\") %\n reflection.get_class_name(self))", "def update_user(name, jobs, rep):\n DB_CURSOR.execute('UPDATE users SET jobs = ?, reputation = ? WHERE username = ?', [jobs, rep, name])\n DB_CONN.commit()", "def set_api_permissions_for_user(user):\n models = [UserProfile, XForm, MergedXForm, Project, Team, OrganizationProfile, Note]\n for model in models:\n for perm in get_perms_for_model(model):\n assign_perm(f\"{perm.content_type.app_label}.{perm.codename}\", user)", "def set_oplevel(self, user, oplevel):\n\t\tuser = user.lower()\n\t\tif user not in self.users.keys():\n\t\t\traise KeyError(\"That user does not exist\")\n\t\tself.users[user][1] = oplevel", "def set_capabilities(self, *dynamic_capabilites):\n for cap in dynamic_capabilites:\n self._capabilities |= int(cap)", "def update_share_server_capabilities(self, share_servers, capabilities,\n value=False):\n share_servers = [server.strip() for server in share_servers.split(\",\")]\n capabilities = [cap.strip() for cap in capabilities.split(\",\")]\n supported_capabilities = ['security_service_update_support',\n 'network_allocation_update_support']\n\n values = dict()\n for capability in capabilities:\n if capability not in supported_capabilities:\n print(\"One or more capabilities are invalid for this \"\n \"operation. The supported capability(ies) is(are) %s.\"\n % supported_capabilities)\n sys.exit(1)\n values[capability] = value\n\n ctxt = context.get_admin_context()\n db.share_servers_update(ctxt, share_servers, values)\n print(\"The capability(ies) %s of the following share server(s)\"\n \" %s was(were) updated to %s.\" %\n (capabilities, share_servers, value))", "def update_any(self, user, session):\n\n user_serialized = {\n \"username\": user.username,\n \"email\": user.username,\n \"name_last\": user.name_last,\n \"name_first\": user.name_first,\n \"phone\": user.phone,\n \"address\": user.address,\n \"profile_picture_path\": user.profile_picture_path,\n \"is_enabled\": user.is_enabled\n }\n\n # Updates user in db\n row_count = session \\\n .query(AdvitoUser) \\\n .filter(AdvitoUser.id == user.id) \\\n .update(user_serialized)\n\n # Validates that a change occurred\n if row_count == 0:\n raise NotFoundError(\"Could not find user with specified id \" + str(user.id))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return the classroom that has given classroomId. Otherwise return None
def getClassroomById(classroomId): for classroom in classroomEntities: if classroom["classroomId"] == classroomId: return classroom.copy() return None
[ "def find_by_room_id(self, room_id: str) -> Optional[Room]:\n return next((room for room in self if room.uid == room_id), None)", "def find_class(self, class_id):\n for clas in self.classes:\n if clas.id == class_id:\n return clas\n return None", "def get_related_classroom_id(self):\n\n # The video ID in the URL is mandatory.\n return self.kwargs.get(\"classroom_id\")", "def find_by_client_id(self, client_id: str) -> Optional[Room]:\n try:\n return self.client_to_room[client_id]\n except KeyError:\n return None", "def get_room(self, name=None, id=None):\n \n if(name):\n return self.rooms[name] if name in self.rooms else None\n if(id):\n return next((v for (k,v) in self.rooms.items() if v.id == id), None)\n return None", "def getRoomById(self, id):\n for room in self.rooms:\n if room.id == id:\n return room\n\n return None", "def find_general_class(self, class_id):\n for class_ in my_classes:\n if class_.class_id == class_id:\n return class_\n\n return None", "def get_room_by_id(self, id):\n if not isinstance(id, int):\n id = int(id)\n if self.rooms.has_key(id):\n return self.rooms[id]\n raise RuntimeError, \"Room not known\"", "def get_skill_class(cursor, _class):\n cursor.execute('SELECT id FROM classes WHERE temp_id = ?', (_class,))\n data = cursor.fetchone()\n try:\n return data[0]\n except TypeError:\n l.error(\"The Class {} doesn't exists.\".format(_class))", "def get_course(self, id):\n id = str(id)\n for i in range(len(self.courses)):\n if self.courses[i].id == id:\n return self.courses[i]", "def deleteClassroom(classroomId):\n for classroom in classroomEntities:\n if classroom[\"classroomId\"] == classroomId:\n selectedClassroom = classroom\n classroomEntities.remove(selectedClassroom)\n return True\n return False", "def get(self, cls, id):\n if cls not in classes.values():\n return None\n\n all_of_class = models.storage.all(cls)\n for item in all_of_class.values():\n if item.id == id:\n return item\n\n return None", "def get_single(self, cls_or_name, id=None, strict=False):\r\n if strict or isinstance(cls_or_name, str):\r\n results = self.stack.get(cls_or_name, [])\r\n else:\r\n results = chain(\r\n *map(\r\n itemgetter(1),\r\n filter(matching_class0(cls_or_name), self.stack.items()),\r\n )\r\n )\r\n\r\n if id:\r\n results = [obj for obj in results if obj.id == id]\r\n else:\r\n results = list(results)\r\n\r\n return None if len(results) != 1 else results[0]", "def get_room(self, name):\n for obj in self.rooms:\n if obj.get_name() == name:\n return obj\n return False", "def get_chatroom(cls, chatroom_id):\n chatroom = cls.query.join(User, Chatroom.author_id == User.id).filter(Chatroom.id == chatroom_id).first()\n return chatroom", "def find_room(uuid):\n return ROOMS.get(uuid, None)", "def get_for_type(class_, vehicle):\n Category = class_\n found = session.query(Category).filter_by(name=vehicle.get_category_id()).first()\n return found", "def get_cell(self, cell_id: str) -> Optional[Cell]:\n\n for cell in self.cells:\n if cell.id == cell_id:\n return cell\n return None", "def findCategoryById(self, id):\n for cat in self.getCategories():\n if str(cat.getId()) == str(id):\n return cat\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
store the classroom inside the classroom data list. return True if operation is successful
def addClassroom(classroomName, capacity,location): for classroom in classroomEntities: if classroom["classroomName"] == classroomName: print("Two classrooms can not have same name") return False if classroomEntities==[]: lastSavedIdNumber = "0" else: lastSavedId=classroomEntities[-1]["classroomId"] #update classroomId as first element in classroomEntities list lastSavedIdNumber=lastSavedId[2:] numberOfDigitsInID = 3 if lastSavedIdNumber == "9" * len(lastSavedIdNumber): numberOfDigitsInID = len(lastSavedIdNumber) + 1 classroomId="CR"+str(int(lastSavedIdNumber)+1).rjust(numberOfDigitsInID,"0") # add the new Classroom newClassroom = {} newClassroom["classroomId"] = classroomId newClassroom["classroomName"] = classroomName newClassroom["capacity"] = capacity newClassroom["location"] = location classroomEntities.append(newClassroom) print(f"Class Room is added into the system, Class Room id is {classroomId}.") return True
[ "def modifyClassroom(classroomId, classroomName, capacity,location):\n for classroom in classroomEntities:\n if classroom[\"classroomId\"] == classroomId:\n selectedClassroom = classroom\n selectedClassroom[\"classroomName\"] = classroomName\n selectedClassroom[\"capacity\"] = capacity\n selectedClassroom[\"location\"] = location\n return True\n return False", "def post(self):\n name, classroom_id = self.datum('name', 'classroom_id')\n student = models.Student.upsert(self.school_key, name)\n success = models.Classroom.assign_student(self.school_key, classroom_id, student)\n self.check(success, 406, \"Student already in classroom\")\n self.write(student)", "def saveClassroomData():\n with open(\"ClassRoomData.txt\",\"wb\") as classroomData:\n pickle.dump(classroomEntities,classroomData)", "def save_room(self):\n pass", "def test_update_room_classification(self):\n pass", "def commit(self):\n self.classification_time = datetime.datetime.now()\n db_dict = self.dbdict()\n #*** Write classification to database collection:\n self.clsfn.insert_one(db_dict)", "def save_data(self):\n db.session.add(self)\n db.session.commit( )", "def saveAttendance(self, data_login) -> bool:\n\n attendance = self.to_mount_attendance(self, data_login)\n data = Attendance.objects.filter(id_attendance=attendance['id_attendance'])\n\n if data:\n get_ = data.first()\n\n try:\n get_.cpf = attendance.get('cpf')\n get_.session = attendance.get('session')\n get_.intent = attendance.get('intent')\n get_.confidance = attendance.get('confidance')\n get_.id_node = attendance.get('id_node')\n get_.step = attendance.get('step')\n get_.longitude = attendance.get('longitude')\n get_.latitude = attendance.get('latitude')\n get_.protocol = attendance.get('protocol')\n get_.ip = attendance.get('ip')\n get_.token = attendance.get('client_token')\n get_.id_citizen = attendance['citizen'].get('id_citizen')\n get_.save()\n return True\n\n except:\n return False\n else:\n\n try:\n data = Citizen.objects.filter(email=data_login.get('client_email')).first()\n Attendance.objects.create(\n cpf=attendance.get('cpf'),\n session=attendance.get('session'),\n intent=attendance.get('intent'),\n confidance=attendance.get('confidance'),\n id_node=attendance.get('id_node'),\n step=attendance.get('step'),\n longitude=attendance.get('longitude'),\n latitude=attendance.get('latitude'),\n protocol=attendance.get('protocol'),\n ip=attendance.get('ip'),\n token=data_login.get('client_token'),\n id_citizen=data.id_citizen)\n return True\n\n except Exception as err:\n print(err)\n return False", "def update(self, data, classification):\n pass", "def store_all_to_database(self, session):\n\n description = 'Established in 1974, JSM is a family-owned provider of quality apartments. We offer a variety of units from studios to five bedrooms with every location benefitting from our award winning amenities, responsive 24 hour maintenance, and friendly property management staff. JSM Development began in Champaign, IL, and manages roughly 1,500 apartments and 450,000 sq/ft of commercial space. JSM has been a major contributor to the development of Campustown in Champaign and the East Campus area in Urbana at the University of Illinois. These popular locations are now home to major national retailers such as Urban Outfitters, Chipotle, Panera, Cold Stone Creamery, and Noodles & Co.'\n\n # Insert a JSM company instance into the database\n current_company = Company(\n name='JSM',\n baseurl='https://apartments.jsmliving.com/',\n description = description\n )\n session.add(current_company)\n\n # Iterate over the apartments, storing each in the database\n for apartment in self.apartment_data:\n logging.info(\"Inserting %s to database\", apartment['name'])\n new_apartment = Apartment(\n company=current_company,\n url=apartment['url'],\n name=apartment['name'],\n bedrooms=apartment['bedrooms'],\n bathrooms=apartment['bathrooms'],\n price=apartment['price'],\n leasing_period=apartment['leasing_period'],\n description=apartment['description'],\n address=apartment['address'],\n lat=apartment['lat'],\n lng=apartment['lng']\n )\n session.add(new_apartment)\n\n # Insert images for the given apartment\n for index, image_url in enumerate(apartment['image_urls']):\n new_image = Image(\n url=image_url,\n apartment_id=new_apartment.id,\n type=0,\n image_index=index\n )\n session.add(new_image)\n\n # Connect images to apartment\n new_apartment.images.append(new_image)\n\n # Insert floorplan image, if it exists\n if apartment['floorplan_url'] != 0:\n new_floorplan_image = Image(\n url=apartment['floorplan_url'],\n apartment_id=new_apartment.id,\n type=1,\n image_index=len(apartment['image_urls'])\n )\n session.add(new_floorplan_image)\n\n # Connect images to apartment\n new_apartment.images.append(new_floorplan_image)\n\n # Insert amenities for the given apartment\n for amenity in apartment['amenities']:\n new_amenity = Amenity(\n apartment_id=new_apartment.id,\n amenity=amenity\n )\n session.add(new_amenity)\n\n # Connect amenity to apartment\n new_apartment.amenities.append(new_amenity)\n\n # Write all queries to the database\n session.commit()", "def store(self, details):", "def deleteClassroom(classroomId):\n for classroom in classroomEntities:\n if classroom[\"classroomId\"] == classroomId:\n selectedClassroom = classroom\n classroomEntities.remove(selectedClassroom)\n return True\n return False", "def put(self, classroom_id: str) -> None:\n assert self.normalized_payload is not None\n classroom = self.normalized_payload['classroom_dict']\n if classroom_id != classroom.classroom_id:\n raise self.InvalidInputException(\n 'Classroom ID of the URL path argument must match with the ID '\n 'given in the classroom payload dict.'\n )\n\n classroom_config_services.update_or_create_classroom_model(classroom)\n self.render_json(self.values)", "def test_if_data_can_be_saved(self):\n object_count = Room.query.count()\n\n room = Room(name='Jinja', room_type='meeting',\n capacity=5,\n location_id=1,\n calendar_id='andela.com_3836323338323230343935@resource.calendar.google.com', # noqa: E501\n image_url=\"https://www.officelovin.com/wp-content/uploads/2016/10/andela-office-main-1.jpg\") # noqa: E501\n room.save()\n\n new_count = Room.query.count()\n\n self.assertNotEquals(object_count, new_count)\n assert object_count < new_count", "def create_room(self, room_type, room_name):\n if room_name.upper() in [room.name for room in Amity.all_rooms]:\n print(\"Sorry, Room already exists!!!\")\n else:\n mapping = {'O': OfficeSpace, 'L': LivingSpace}\n new_room = mapping[room_type.upper()](room_name.upper())\n Amity.all_rooms.append(new_room)\n if room_type.upper() == \"L\":\n Amity.living_spaces[room_name.upper()] = []\n elif room_type.upper() == \"O\":\n Amity.office_spaces[room_name.upper()] = []\n print(room_name.upper() + \" created successfully.\")", "def save_data(scrapper_data):\n print(f'Persisting {len(scrapper_data)} results')\n\n restaurant_persistor, city_persistor = RestaurantHelper(), CityHelper()\n\n city_persistor.insert(scrapper_data[0].city)\n city_persistor.commit()\n\n for restaurant in scrapper_data:\n restaurant_persistor.insert(restaurant)\n\n restaurant_persistor.commit()\n\n print('Done.')", "def add_class(self):\n try:\n self.db.add_class(self.subject_text.get(), self.class_text.get())\n self.rmv_windows_class_subj()\n self.show_list_us()\n except ValueError:\n messagebox.showerror(\"Warning\", \"Wrong data!\")", "def save(self, case) -> bool:\n if case:\n key = case_key(case)\n case.key = key\n self.cases[key] = case\n the_redis = DARedis()\n the_redis.set_data(self.user_cases_key, self.cases)\n return True", "def __insert(self):\n try:\n logger.debug(\"Values: \")\n logger.debug(\"\\troom_id: {}\".format(self.room_id))\n logger.debug(\"\\thost_id: {}\".format(self.host_id))\n conn = self.config.connect()\n cur = conn.cursor()\n sql = \"\"\"\n insert into room (\n room_id, host_id, room_type, country, city,\n neighborhood, address, reviews, overall_satisfaction,\n accommodates, bedrooms, bathrooms, price, deleted,\n minstay, latitude, longitude, survey_id,\n coworker_hosted, extra_host_languages, name,\n property_type, currency, rate_type, license\n )\n \"\"\"\n sql += \"\"\"\n values (%s, %s, %s, %s, %s, \n %s, %s, %s, %s,\n %s, %s, %s, %s, %s,\n %s, %s, %s, %s,\n %s, %s, %s,\n %s, %s, %s, %s\n )\"\"\"\n insert_args = (\n self.room_id, self.host_id, self.room_type, self.country, self.city,\n self.neighborhood, self.address, self.reviews, self.overall_satisfaction,\n self.accommodates, self.bedrooms, self.bathrooms, self.price, self.deleted,\n self.minstay, self.latitude, self.longitude, self.survey_id,\n self.coworker_hosted, self.extra_host_languages, self.name,\n self.property_type, self.currency, self.rate_type, self.license\n )\n cur.execute(sql, insert_args)\n cur.close()\n conn.commit()\n logger.debug(\"Room \" + str(self.room_id) + \": inserted\")\n logger.debug(\"(lat, long) = ({lat:+.5f}, {lng:+.5f})\".format(lat=self.latitude, lng=self.longitude))\n except psycopg2.IntegrityError:\n # logger.info(\"Room \" + str(self.room_id) + \": insert failed\")\n conn.rollback()\n cur.close()\n raise\n except:\n conn.rollback()\n raise" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
modify content of a already stored classroom. return True if operation is successful
def modifyClassroom(classroomId, classroomName, capacity,location): for classroom in classroomEntities: if classroom["classroomId"] == classroomId: selectedClassroom = classroom selectedClassroom["classroomName"] = classroomName selectedClassroom["capacity"] = capacity selectedClassroom["location"] = location return True return False
[ "def addClassroom(classroomName, capacity,location):\n for classroom in classroomEntities:\n if classroom[\"classroomName\"] == classroomName:\n print(\"Two classrooms can not have same name\")\n return False\n\n if classroomEntities==[]:\n lastSavedIdNumber = \"0\"\n else:\n lastSavedId=classroomEntities[-1][\"classroomId\"] #update classroomId as first element in classroomEntities list\n lastSavedIdNumber=lastSavedId[2:]\n numberOfDigitsInID = 3\n if lastSavedIdNumber == \"9\" * len(lastSavedIdNumber):\n numberOfDigitsInID = len(lastSavedIdNumber) + 1\n classroomId=\"CR\"+str(int(lastSavedIdNumber)+1).rjust(numberOfDigitsInID,\"0\")\n\n # add the new Classroom\n newClassroom = {}\n newClassroom[\"classroomId\"] = classroomId\n newClassroom[\"classroomName\"] = classroomName\n newClassroom[\"capacity\"] = capacity\n newClassroom[\"location\"] = location\n classroomEntities.append(newClassroom)\n print(f\"Class Room is added into the system, Class Room id is {classroomId}.\")\n return True", "def post(self):\n name, classroom_id = self.datum('name', 'classroom_id')\n student = models.Student.upsert(self.school_key, name)\n success = models.Classroom.assign_student(self.school_key, classroom_id, student)\n self.check(success, 406, \"Student already in classroom\")\n self.write(student)", "def do_update(self, args):\n\n if not args:\n print(\"** class name missing **\")\n return\n\n token = args.split()\n\n if token[0] not in theClasses:\n print(\"** class doesn't exist **\")\n elif len(token) == 1:\n print(\"** instance id missing **\")\n else:\n all_objs = storage.all()\n for key, val in all_objs.items():\n ob_name = val.__class__.__name__\n ob_id = val.id\n if ob_name == token[0] and ob_id == token[1].strip('\"'):\n if len(token) == 2:\n print(\"** attribute name missing **\")\n elif len(token) == 3:\n print(\"** value missing **\")\n else:\n setattr(val, token[2], token[3])\n storage.save()\n return\n print(\"** no instance found **\")", "def do_update(self, arg):\n args = arg.split()\n if len(args) == 0:\n print('** class name missing **')\n return\n elif args[0] not in self.classes:\n print(\"** class doesn't exist **\")\n return\n elif len(args) == 1:\n print('** instance id missing **')\n return\n elif len(args) == 3:\n print(\"** value missing **\")\n return\n else:\n object_name = '{}.{}'.format(args[0], args[1])\n dict_of_objects = storage.all()\n if object_name in dict_of_objects.keys():\n if len(args) == 2:\n print(\"** attribute name missing **\")\n return\n else:\n value = args[3].replace('\"', '')\n object = dict_of_objects.get(object_name)\n object.__setattr__(args[2], value)\n storage.save()\n else:\n print(\"** no instance found **\")\n return", "def update(self, data, classification):\n pass", "def put(self, classroom_id: str) -> None:\n assert self.normalized_payload is not None\n classroom = self.normalized_payload['classroom_dict']\n if classroom_id != classroom.classroom_id:\n raise self.InvalidInputException(\n 'Classroom ID of the URL path argument must match with the ID '\n 'given in the classroom payload dict.'\n )\n\n classroom_config_services.update_or_create_classroom_model(classroom)\n self.render_json(self.values)", "def test_update_room_classification(self):\n pass", "def save_room(self):\n pass", "def update(self, user):\n\n\t\tif self == user.classroom:\n\t\t\treturn\n\n\t\tself.size += user.classroom.size\n\t\tuser.set_classroom(self)", "def do_update(self, *args):\n if len(args) == 1:\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) < 2:\n print(\"** instance id missing **\")\n return\n elif len(args) < 3:\n print(\"** attribute name missing **\")\n return\n elif len(args) < 4:\n print(\"** value missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n obj = dict_objs[key]\n if args[2] in obj.__class__.__dict__:\n obj.__dict__[args[2]] =\\\n type(obj.__class__.__dict__[args[2]])(args[3])\n else:\n obj.__dict__[args[2]] = args[3]\n storage.save()\n else:\n print(\"** no instance found **\")", "def saveClassroomData():\n with open(\"ClassRoomData.txt\",\"wb\") as classroomData:\n pickle.dump(classroomEntities,classroomData)", "def update(self):\n\t\tglobal firebase\n\t\tjson_format = self.serialize()\n\t\tfirebase.put('/game','master',json_format)", "def Persist(self) -> bool:", "def update_has_data(self):\n self.main()", "def save(self):\n self.logger.debug(\"In save.\")\n\n if not self.is_valid():\n self.logger.error(\"Cannot save, data is invalid\")\n return False\n\n session = iHMPSession.get_session()\n self.logger.info(\"Got iHMP session.\")\n\n success = False\n\n if self.id is None:\n # The document has not yet been saved\n prep_data = self._get_raw_doc()\n self.logger.info(\"Got the raw JSON document.\")\n\n try:\n self.logger.info(\"Attempting to save a new node.\")\n node_id = session.get_osdf().insert_node(prep_data)\n self.logger.info(\"Save for HostSeqPrep %s successful.\", node_id)\n self.logger.info(\"Setting ID for HostSeqPrep %s.\", node_id)\n\n self._set_id(node_id)\n self._version = 1\n success = True\n except Exception as insert_exception:\n self.logger.error(\"An error occurred while inserting \" + \\\n \"%s %s. Reason: %s\", __name__, self._id,\n insert_exception\n )\n else:\n prep_data = self._get_raw_doc()\n\n try:\n self.logger.info(\"Attempting to update %s with ID: %s.\", __name__, self._id)\n session.get_osdf().edit_node(prep_data)\n self.logger.info(\"Update for %s %s successful.\", __name__, self._id)\n success = True\n except Exception as edit_exception:\n self.logger.error(\"An error occurred while updating %s \" + \\\n \" %s. Reason: %s\", __name__, self._id,\n edit_exception\n )\n\n return success", "def save(self):\n self.logger.debug(\"In save.\")\n\n # If node previously saved, use edit_node instead since ID\n # is given (an update in a way)\n # can also use get_node to check if the node already exists\n if not self.is_valid():\n self.logger.error(\"Cannot save, data is invalid.\")\n return False\n\n session = iHMPSession.get_session()\n self.logger.info(\"Got iHMP session.\")\n\n osdf = session.get_osdf()\n\n success = False\n\n if self._id is None:\n self.logger.info(\"About to insert a new %s OSDF node.\", __name__)\n\n # Get the JSON form of the data and load it\n self.logger.debug(\"Converting %s to parsed JSON form.\", __name__)\n data = json.loads(self.to_json())\n\n try:\n node_id = osdf.insert_node(data)\n\n self._set_id(node_id)\n self._version = 1\n success = True\n except Exception as save_exception:\n self.logger.exception(save_exception)\n self.logger.error(\"An error occurred when saving %s.\", self)\n else:\n self.logger.info(\"%s already has an ID, so we \" + \\\n \"do an update (not an insert).\", __name__)\n\n try:\n node_data = self._get_raw_doc()\n self.logger.info(\"%s already has an ID, so we do an \" + \\\n \"update (not an insert).\", __name__)\n node_id = self._id\n self.logger.debug(\"%s OSDF ID to update: %s.\", __name__, node_id)\n osdf.edit_node(node_data)\n\n node_data = osdf.get_node(node_id)\n latest_version = node_data['ver']\n\n self.logger.debug(\"The version of this %s is now: %s\",\n __name__, latest_version\n )\n self._version = latest_version\n success = True\n except Exception as update_exception:\n self.logger.exception(update_exception)\n self.logger.error(\"An error occurred when updating %s.\", self)\n\n return success", "def deleteClassroom(classroomId):\n for classroom in classroomEntities:\n if classroom[\"classroomId\"] == classroomId:\n selectedClassroom = classroom\n classroomEntities.remove(selectedClassroom)\n return True\n return False", "def test_update_room_attribute(self):\n pass", "def test_update_room_space(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
delete a classroom from the system. return True if operation is successful
def deleteClassroom(classroomId): for classroom in classroomEntities: if classroom["classroomId"] == classroomId: selectedClassroom = classroom classroomEntities.remove(selectedClassroom) return True return False
[ "def delete(self, classroom_id: str) -> None:\n classroom_config_services.delete_classroom(classroom_id)\n self.render_json(self.values)", "def test_remove_classroom_specific_for_coach_pt1(self):\n self.assertTrue(self.coach1.has_perm('auth.remove_classroom', self.classrooms[0]))", "def delete_room(context):\n\n room = context.get('spark.room')\n bearer = context.get('spark.CISCO_SPARK_PLUMBERY_BOT')\n\n print(\"Deleting Cisco Spark room '{}'\".format(room))\n\n url = 'https://api.ciscospark.com/v1/rooms'\n headers = {'Authorization': 'Bearer '+bearer}\n response = requests.get(url=url, headers=headers)\n\n if response.status_code != 200:\n print(response.json())\n raise Exception(\"Received error code {}\".format(response.status_code))\n\n actual = False\n for item in response.json()['items']:\n\n if room in item['title']:\n print(\"- found it\")\n print(\"- DELETING IT\")\n\n url = 'https://api.ciscospark.com/v1/rooms/{}'.format(item['id'])\n headers = {'Authorization': 'Bearer '+bearer}\n response = requests.delete(url=url, headers=headers)\n\n if response.status_code != 204:\n raise Exception(\"Received error code {}\".format(response.status_code))\n\n actual = True\n\n if actual:\n print(\"- room will be re-created in Cisco Spark\")\n else:\n print(\"- no room with this name yet\")\n\n context.set('spark.room_id', None)", "def delete_class():\n\n class_id = request.form.get('class_id')\n this_class = Class.query.get(class_id)\n\n users = this_class.users\n\n # Check to see if the user is the teacher of the class\n authorized = False\n for user in users:\n if user.is_teacher and user.user_id == session['user_id']:\n authorized = True\n\n if authorized:\n db.session.delete(this_class)\n db.session.commit()\n return \"The class, {}, has been deleted.\".format(this_class.class_name)\n\n else:\n return \"You are not authorized to make this change.\"", "def test_remove_classroom_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_classroom', self.classrooms[1]))", "def delete_room(RoomArn=None):\n pass", "def delete_room(room, reason=''):\n\n if room.custom_server:\n return\n\n def _delete_room(xmpp):\n muc = xmpp.plugin['xep_0045']\n muc.destroy(room.jid, reason=reason)\n\n current_plugin.logger.info('Deleting room %s', room.jid)\n _execute_xmpp(_delete_room)\n delete_logs(room)", "def room_delete(room_id):\n room = Room.query.get(room_id)\n if room is None:\n abort(404, 'room not found')\n\n get_db().delete(room)\n get_db().commit()\n\n return '', 204", "def delete(room_id):\n\n entry = Room.objects.filter(room_id=room_id).first()\n if entry is not None:\n entry.delete()\n\n entries = Players.objects.filter(room_id=room_id)\n if entries.count():\n entries.delete()\n\n round.dialog.delete_rounds(room_id=room_id, called_from=__path__+\":\"+utils.fname())", "def delete(self, home_id: int) -> bool:\n\n with self.get_connection() as conn:\n with conn.cursor() as cursor:\n sql = \"DELETE FROM `ab_house` WHERE `home_id`=%s;\"\n try:\n cursor.execute(sql, (home_id,))\n conn.commit()\n except Exception as ex:\n print(\n f\"There was a DB error when trying to delete home {home_id}. EX: {ex}\",\n flush=True,\n )\n return False\n return True", "def test_program_enrollment_delete(self):\n program_enrollment = ProgramEnrollmentFactory.create()\n assert es.search()['total'] == 1\n program_enrollment.user.delete()\n assert es.search()['total'] == 0", "def remove_student_from_classroom(student: Dict, classroom: Dict):\n classroom[\"student_list\"].remove(student)\n pass", "def test_delete_lecture(lecture_class, course, valid_datetime):\n id = lecture_class.create_lecture(course, valid_datetime)\n assert id != None\n assert lecture_class.delete_lecture()", "def _delete_course():\n raise NotImplementedError", "def delete(self):\n db.session.delete(self)\n try:\n db.session.commit()\n return True\n except Exception as error:\n db.session.rollback()\n print(error.args)\n return False", "def test_remove_students():\n classroom = setup_for_test()\n student = Student(\"Andrew Tsukuda\")\n classroom.add_student(student)\n assert len(classroom.student_dir) == 1\n assert classroom.student_dir[0].ID == 1\n classroom.remove_student(\"Andrew Tsukuda\")\n assert len(classroom.student_dir) == 0", "def do_destroy(self, *args):\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) != 2:\n print(\"** instance id missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n del dict_objs[key]\n storage.save()\n else:\n print(\"** no instance found **\")", "def post(self, request):\n room_id = request.POST[\"room\"]\n\n if Room.objects.filter(id=room_id).exists():\n Room.objects.get(id=room_id).delete()\n\n return HttpResponseRedirect(\"/room_building_delete/\")", "def delete(self, alergen) -> bool:\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
saves classroomEntities in the ClassRoomData file
def saveClassroomData(): with open("ClassRoomData.txt","wb") as classroomData: pickle.dump(classroomEntities,classroomData)
[ "def save(cls):\n playerdata = getAttributes(cls)\n Data.object_dump(playerdata, \"savedata.dat\")\n del playerdata", "def saveTeachersData():\n with open(\"TeacherData.txt\",\"wb\") as teacherData:\n pickle.dump(teacherEntities,teacherData)", "def persist(self):\n pass", "def save_data(self):\n db.session.add(self)\n db.session.commit( )", "def save_room(self):\n pass", "def save(self):\n logger.info('Saving all student information to disk...')\n self.students.to_disk()", "def addClassroom(classroomName, capacity,location):\n for classroom in classroomEntities:\n if classroom[\"classroomName\"] == classroomName:\n print(\"Two classrooms can not have same name\")\n return False\n\n if classroomEntities==[]:\n lastSavedIdNumber = \"0\"\n else:\n lastSavedId=classroomEntities[-1][\"classroomId\"] #update classroomId as first element in classroomEntities list\n lastSavedIdNumber=lastSavedId[2:]\n numberOfDigitsInID = 3\n if lastSavedIdNumber == \"9\" * len(lastSavedIdNumber):\n numberOfDigitsInID = len(lastSavedIdNumber) + 1\n classroomId=\"CR\"+str(int(lastSavedIdNumber)+1).rjust(numberOfDigitsInID,\"0\")\n\n # add the new Classroom\n newClassroom = {}\n newClassroom[\"classroomId\"] = classroomId\n newClassroom[\"classroomName\"] = classroomName\n newClassroom[\"capacity\"] = capacity\n newClassroom[\"location\"] = location\n classroomEntities.append(newClassroom)\n print(f\"Class Room is added into the system, Class Room id is {classroomId}.\")\n return True", "def save(self):\n # bloc_dict = self.bloc.get()\n # for apartment_id in bloc_dict.keys():\n # bloc_dict[apartment_id] = Apartment.to_dictionary(bloc_dict[apartment_id])\n #\n fp = open(\"apartments.json\", \"w\")\n fp.write(json.dumps(Bloc.to_dictionary(self.bloc)))\n fp.close()", "def save_file(self):\n # paginate over deputies and senators getting their fields\n fieldnames = set([])\n congressmen = self.deputies + self.senators\n for data in congressmen:\n fieldnames = fieldnames.union(data.dump().keys())\n\n\n with open(IDENTITY_FILE_UPDATED, 'a') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=list(fieldnames), delimiter=';')\n writer.writeheader()\n\n for data in congressmen:\n writer.writerow(data.dump())", "def save(self):\n joblib.dump(\n self.classifier, \"data/models/badlymappedfinder/badlymappedfinder.joblib\",\n )", "def save_rooms(self):\n\n # save the rooms found in the file\n try:\n with open(self.file_name, 'w') as f:\n f.write(json.dumps(self.rooms))\n\n # catch exceptions in case it cannot create the file or something wrong\n # with json.dumps\n except (IOError, ValueError) as e:\n logging.error(e.strerror, extra={'engine': self.__class__.__name__, 'function': 'save_rooms'})", "def save(self):\n joblib.dump(\n self.classifier, \"data/models/repeatsfinder/repeatsfinder.joblib\",\n )", "def persist_classes(self):\n\n self.request.session[constants.ExperimentClass] = self.ExperimentClass\n self.request.session[constants.TreatmentClass] = self.TreatmentClass\n self.request.session[constants.ParticipantClass] = self.ParticipantClass\n self.request.session[constants.MatchClass] = self.MatchClass", "def save(self):\n self.lock.acquire()\n try:\n self.xml.set(\"name\",self.name)\n self.xml.set(\"room\",self.room)\n self.xml.set(\"type\",self.type)\n self.xml.find(\"address\").text = \":\".join([str(x) for x in self.address])\n if self.pos is not None:\n self.xml.find(\"pos\").text = \" \".join([str(x) for x in self.pos])\n self.xml.find(\"icon\").text = self.icon\n \n finally:\n self.lock.release()\n \n self.house.save_devices()", "def save_class_dict(args, dict_class):\n dict_path = os.path.join(args.embedding_path, args.dataset + DICT_CLASS)\n dict_class = {v: k for k, v in dict_class.items()}\n with open(dict_path, \"w\") as fp:\n json.dump(dict_class, fp)", "def save_as(module, file):\n\n try:\n airshots_cleanup(module)\n\n # TODO Reflecting the mapping instead of merging might make the code more efficient\n\n items = get_many(module) # temporarily stores the saved items\n\n module.session.close()\n module.engine.dispose()\n\n new_data_management(module, file)\n\n for item in get_many(module): # overwrites the selected file\n module.session.delete(item)\n\n items = [module.session.merge(item) for item in items] # conforms the stored items to the new session\n module.session.add_all(items)\n\n module.session.commit()\n\n except AttributeError:\n create_data_management(module)\n save_as(module, file)", "def save(self):\n\n self.__persistence_provider.set(self.__board_id, self.json())", "def save(self, data):\n raise NotImplementedError", "def save_annotations(self):\n r = requests.get(\n f'{self.api_host}/v1/entity-annotations?'\n f'annotation_type=Source reliability (binary)&size=100',\n headers=self.get_request_headers()\n )\n\n entity_annotations = r.json().get('entity_annotations')\n\n for annotation in entity_annotations:\n annotation_id = annotation.get('entity_id')\n with open(\n f'{self.data_folder}/annotations/{annotation_id}.json',\n 'w'\n ) as f:\n json.dump(annotation, f)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If the environment variable TERM is unset try with `fallback` if not empty. vt100 is a popular terminal supporting ANSI X3.64.
def load_terminfo(terminal_name=None, fallback='vt100'): terminal_name = os.getenv('TERM') if not terminal_name: if not fallback: raise TerminfoError('Environment variable TERM is unset and no fallback was requested') else: terminal_name = fallback if os.getenv('TERMINFO'): # from man terminfo(5): # if the environment variable TERMINFO is set, # only that directory is searched terminfo_locations = [os.getenv('TERMINFO')] else: terminfo_locations = [] # from most to least important if os.getenv('TERMINFO_DIRS'): for i in os.getenv('TERMINFO_DIRS').split(':'): # from man terminfo(5) # An empty directory name is interpreted as /usr/share/terminfo. terminfo_locations.append(i or '/usr/share/terminfo') terminfo_locations += [ os.path.expanduser('~/.terminfo'), '/etc/terminfo', '/usr/local/ncurses/share/terminfo', '/lib/terminfo', '/usr/share/terminfo' ] # remove duplicates preserving order terminfo_locations = list(OrderedDict.fromkeys(terminfo_locations)) terminfo_path = None for dirpath in terminfo_locations: path = os.path.join(dirpath, terminal_name[0], terminal_name) if os.path.exists(path): terminfo_path = path break if not path: raise TerminfoError("Couldn't find a terminfo file for terminal '%s'" % terminal_name) from terminfo_index import BOOLEAN_CAPABILITIES, NUMBER_CAPABILITIES, STRING_CAPABILITIES data = open(terminfo_path, 'rb').read() # header (see man term(5), STORAGE FORMAT) header = struct.unpack('<hhhhhh', data[:12]) # 2 bytes == 1 short integer magic_number = header[0] # the magic number (octal 0432) size_names = header[1] # the size, in bytes, of the names section size_booleans = header[2] # the number of bytes in the boolean section num_numbers = header[3] # the number of short integers in the numbers section num_offsets = header[4] # the number of offsets (short integers) in the strings section size_strings = header[5] # the size, in bytes, of the string table if magic_number != 0o432: raise TerminfoError('Bad magic number') # sections indexes idx_section_names = 12 idx_section_booleans = idx_section_names + size_names idx_section_numbers = idx_section_booleans + size_booleans if idx_section_numbers % 2 != 0: idx_section_numbers += 1 # must start on an even byte idx_section_strings = idx_section_numbers + 2 * num_numbers idx_section_string_table = idx_section_strings + 2 * num_offsets # terminal names terminal_names = data[idx_section_names:idx_section_booleans].decode('ascii') terminal_names = terminal_names[:-1].split('|') # remove ASCII NUL and split terminfo = Terminfo(terminal_names[0], terminal_names[1:]) # booleans for i, idx in enumerate(range(idx_section_booleans, idx_section_booleans + size_booleans)): cap = BooleanCapability(*BOOLEAN_CAPABILITIES[i], value=data[i] == b'\x00') terminfo.booleans[cap.variable] = cap # numbers numbers = struct.unpack('<'+'h' * num_numbers, data[idx_section_numbers:idx_section_strings]) for i,strnum in enumerate(numbers): cap = NumberCapability(*NUMBER_CAPABILITIES[i], value=strnum) terminfo.numbers[cap.variable] = cap # strings offsets = struct.unpack('<'+'h' * num_offsets, data[idx_section_strings:idx_section_string_table]) idx = 0 for offset in offsets: k = 0 string = [] while True and offset != -1: char = data[idx_section_string_table + offset + k:idx_section_string_table + offset + k + 1] if char == b'\x00': break string.append(char.decode('iso-8859-1')) k += 1 string = u''.join(string) cap = StringCapability(*STRING_CAPABILITIES[idx], value=string) terminfo.strings[cap.variable] = cap idx += 1 terminfo._reset_index() return terminfo
[ "def TerminalSupportsAnsiColors():\n return (sys.stdout.isatty() and sys.platform[:3] != \"win\")", "def get_term_colors():\n term = getenv('TERM')\n if not is_term() or not term:\n return 1\n if term in ('xterm-color', 'ansi', 'screen'):\n return 16\n if term in ('xterm-256color'):\n return 256\n return 1", "def term_support_color():\n return OS_VERSION[0] == \"Linux\" or OS_VERSION[0] == \"Darwin\"", "def initialize_console():\n if os.name == 'nt':\n if sys.stdout.encoding != 'utf-8':\n sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'replace')\n print('Reconfigured stdout to use utf-8 encoding.')\n if sys.stderr.encoding != 'utf-8':\n sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'replace')\n print('Reconfigured stderr to use utf-8 encoding.')", "def set_terminal_encoding(encoding='utf_8'):\n sys.stdin = codecs.getreader(encoding)(sys.stdin)\n sys.stdout = codecs.getwriter(encoding)(sys.stdout)\n sys.stderr = codecs.getwriter(encoding)(sys.stderr)", "def set_encoding(self) -> None:\n\n self.__session.env['ENCODING'] = 'cp437'\n term = self.__session.env.get('TERM', '')\n\n logging.info('set encoding Term: ' + self.to_str(term))\n\n local = self.__option_parser.check_local_option\n remote = self.__option_parser.check_remote_option\n\n if local(BINARY) and remote(BINARY):\n if self.to_str(term) != 'ansi':\n self.__session.env['ENCODING'] = 'utf8'\n else:\n self.__session.env['ENCODING'] = 'cp437'\n else:\n # Default to cp437 or maybe just ascii\n self.__session.env['ENCODING'] = 'cp437'", "def reset_term_colors():\n sys.stdout.write(ENDC)", "def terminal_supports_color():\n plat = sys.platform\n supported_platform = plat != \"Pocket PC\" and (\n plat != \"win32\" or \"ANSICON\" in os.environ\n )\n # isatty is not always implemented, #6223.\n is_a_tty = hasattr(sys.stdout, \"isatty\") and sys.stdout.isatty()\n if not supported_platform or not is_a_tty:\n return False\n return True", "def preferredRenderer(*args, fallback: Union[AnyStr, bool]=\"\", makeCurrent: bool=True, q=True,\n query=True, **kwargs)->Union[None, Any]:\n pass", "def get_terminal_size(fallback=(80, 24)):\n # columns, lines are the working values\n try:\n columns = int(os.environ['COLUMNS'])\n except (KeyError, ValueError):\n columns = 0\n\n try:\n lines = int(os.environ['LINES'])\n except (KeyError, ValueError):\n lines = 0\n\n # only query if necessary\n if columns <= 0 or lines <= 0:\n try:\n size = os.get_terminal_size(sys.__stdout__.fileno())\n except (AttributeError, ValueError, OSError):\n # stdout is None, closed, detached, or not a terminal, or\n # os.get_terminal_size() is unsupported\n size = os.terminal_size(fallback)\n if columns <= 0:\n columns = size.columns\n if lines <= 0:\n lines = size.lines\n\n return os.terminal_size((columns, lines))", "def terminal(self, cmd):\n term = os.environ.get(\"XTERM\", \"\")\n if (os.system(\"which %s &>/dev/null\" % term) != 0):\n for term in (\"terminal\", \"konsole\", \"xterm\", \"rxvt\", \"urxvt\"):\n if (os.system(\"which %s &>/dev/null\" % term) != 0):\n term = None\n else:\n break\n\n assert term, _(\"No terminal emulator found\")\n if (term == \"terminal\"):\n term += \" -x \"\n else:\n term += \" -e \"\n\n plog(\"TERMINAL: %s\" % cmd)\n self.process = Popen(term + cmd, shell=True)\n plog(self.process.communicate()[0])\n self.process = None\n assert self.interrupt == None", "def _get_terminal_exec(self):\n\n terminal = None\n\n try:\n with open(CONFIG_FILE_PATH) as conffile:\n config = yaml.load(conffile, yaml.SafeLoader)\n terminal = config.get('terminal', None)\n except yaml.YAMLError:\n print(\"Nautiterm: invalid configuration file at {path}, falling back\" +\n \" to {d}\".format(path=CONFIG_FILE_PATH, d=DEFAULT_TERMINAL_EXEC),\n file=sys.stderr)\n except IOError as ioe:\n # catch-all for permission errors and file not founds to be compatible\n # with Python 2 which doesn't have FileNotFoundError or PermissionError\n pass\n\n if not terminal:\n terminal = DEFAULT_TERMINAL_EXEC\n\n return terminal", "def supports_color():\n plat = sys.platform\n supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ)\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n return supported_platform and is_a_tty", "def _term_handler(text: str or None, input_type='pyku', output_type='ANSI'):\n return text", "def _default_color_enabled() -> bool:\n import platform\n\n # If we're not attached to a terminal, go with no-color.\n if not sys.__stdout__.isatty():\n return False\n\n # On windows, try to enable ANSI color mode.\n if platform.system() == 'Windows':\n return _windows_enable_color()\n\n # We seem to be a terminal with color support; let's do it!\n return True", "def test_count_ansi_colors_hardcoded(self):\n for term_name, count in (\n ('xterm-256color', 256),\n ('xterm-16color', 16),\n ('xterm', 8),\n ('screen-256color', 256),\n ('screen-16color', 16),\n ('screen', 8),\n ('unknown', 0),\n ):\n self.assertEqual(count, tty.count_ansi_colors(term_name))", "def test_color_auto_is_true_for_tty():\n with _mock_isatty(True), _mock_windows_color(True):\n formatter = base.BaseFormatter(options(color=\"auto\"))\n assert formatter.color is True", "def test_term_chars_default(self, instrument):\n assert instrument.term_chars == b'\\r'", "def defaultProcessOutputEncodingDecider(context, executable, **forfutureuse):\n\treturn __DEFAULT_PROCESS_ENCODING # stdout encoding will be None unless in a terminal" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function will create foreign table under the existing dummy schema.
def create_foreign_table(server, db_name, schema_name, fsrv_name, foreign_table_name): try: connection = get_db_connection(db_name, server['username'], server['db_password'], server['host'], server['port']) old_isolation_level = connection.isolation_level connection.set_isolation_level(0) pg_cursor = connection.cursor() pg_cursor.execute( "CREATE FOREIGN TABLE " + schema_name + "." + foreign_table_name + "(emp_name text NULL) SERVER %s" % fsrv_name) connection.set_isolation_level(old_isolation_level) connection.commit() # Get 'oid' from newly created foreign table pg_cursor.execute( "SELECT ftrelid FROM pg_foreign_table WHERE ftserver = " "(SELECT oid FROM pg_foreign_server WHERE srvname = '%s') ORDER BY " "ftrelid ASC limit 1" % fsrv_name) oid = pg_cursor.fetchone() ft_id = '' if oid: ft_id = oid[0] connection.close() return ft_id except Exception: traceback.print_exc(file=sys.stderr)
[ "def create_local_table(self, cur, schema, new_table, foreign_table):\n\n stmt = (\"create table {}.{} (like {}.{} including all);\"\n .format(self.quote_ident(schema), self.quote_ident(new_table),\n self.quote_ident(schema),\n self.quote_ident(foreign_table)))\n\n if self.verbose:\n print(stmt)\n\n cur.execute(stmt)", "def create_table(self):\n pass", "def create_tables():\n db.create_all()", "def _buildFakeFKTable(cls, fakeTableName):\n countCols = 0\n attrs = {}\n for col in cls._imdbpySchema.cols:\n countCols += 1\n if col.name == 'id':\n continue\n if not col.foreignKey:\n # A non-foreign key column - add it as usual.\n attrs[col.name] = MAP_COLS[col.kind](**col.params)\n continue\n # XXX: Foreign Keys pointing to TableName.ColName not yet supported.\n thisColName = col.name\n if thisColName.endswith('ID'):\n thisColName = thisColName[:-2]\n\n fks = col.foreignKey.split('.', 1)\n foreignTableName = fks[0]\n if len(fks) == 2:\n foreignColName = fks[1]\n else:\n foreignColName = 'id'\n # Unused...\n #fkName = 'fk_%s_%s_%d' % (foreignTableName, foreignColName,\n # countCols)\n # Create a Foreign Key column, with the correct references.\n fk = ForeignKey(foreignTableName, name=thisColName, default=None)\n attrs[thisColName] = fk\n # Build a _NEW_ SQLObject subclass, with foreign keys, if needed.\n newcls = type(fakeTableName, (SQLObject,), attrs)\n return newcls", "def create_tables():\n db.create_all()", "def create_example_test_table(conn):\n execute_sql_script(conn, \"06_create_example_test_table.sql\")", "def createTable(ifNotExists=False, createJoinTables=True,\n createIndexes=True, connection=None):", "def create_schema(db):\n execute_sql('create_tables.sql', db)", "def create_table(self):\n return None", "def create_schema(self):\n\t\tself.pg_eng.create_schema()\n\t\tself.logger.info(\"Importing mysql schema\")\n\t\tself.pg_eng.build_tab_ddl()\n\t\tself.pg_eng.create_tables()", "def create_base_table(self, table_name):\n print('new')\n # Create table at first.\n select_stm = self.construct_base_table()\n exec_query('DROP TABLE IF EXISTS %s;' % table_name) \n sql = \"\"\"\n CREATE TABLE %s AS\n %s\n \"\"\" % (table_name, select_stm)\n exec_query(sql)", "def create_schema(self):\n\n self.assert_is_test_env()\n self.db.drop_all()\n self.db.create_all()", "def create_questions_table(conn):\n execute_sql_script(conn, \"04_create_questions_table.sql\")", "def create_table(self, create_table_sql):\n try:\n c = self.conn.cursor()\n c.execute(create_table_sql)\n except Error as e:\n print(e)", "def create_tables():\n for ddl in create_table.ddl_queries:\n with get_db_cursor() as cur:\n cur.execute(ddl)", "def check_and_create_table(self) -> None:\n table_ids = [t.table_id for t in self.instance.list_tables()]\n\n if not self.table_id in table_ids:\n self.table.create()\n f = self.table.column_family(self.family_id)\n f.create()\n\n f_inc = self.table.column_family(self.incrementer_family_id,\n gc_rule=MaxVersionsGCRule(1))\n f_inc.create()\n\n f_log = self.table.column_family(self.log_family_id)\n f_log.create()\n\n f_ce = self.table.column_family(self.cross_edge_family_id,\n gc_rule=MaxVersionsGCRule(1))\n f_ce.create()\n\n print(\"Table created\")", "def create_table(self, create_table_sql):\n connection = self.__create_connection()\n try:\n c = connection.cursor()\n c.execute(create_table_sql)\n except Error as e:\n print(e)", "def populate_table_from_foreign_table(self, cur, schema, table,\n foreign_table):\n\n stmt = (\"insert into {}.{} select * from {}.{};\"\n .format(self.quote_ident(schema), self.quote_ident(table),\n self.quote_ident(schema),\n self.quote_ident(foreign_table)))\n\n if self.verbose:\n print(stmt)\n\n cur.execute(stmt)", "def test_foreign_keys(self):\n Test = db.mock_model(model_name='Test', db_table='test5a',\n db_tablespace='', pk_field_name='ID',\n pk_field_type=models.AutoField, pk_field_args=[])\n cursor = connection.cursor()\n db.start_transaction()\n db.create_table(\"test5a\", [('ID', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True))])\n db.create_table(\"test5b\", [\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('UNIQUE', models.ForeignKey(Test)),\n ])\n db.execute_deferred_sql()\n db.rollback_transaction()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function will verify current foreign table.
def verify_foreign_table(server, db_name, fsrv_name): try: connection = get_db_connection(db_name, server['username'], server['db_password'], server['host'], server['port']) pg_cursor = connection.cursor() pg_cursor.execute( "SELECT ftrelid FROM pg_foreign_table WHERE ftserver = " "(SELECT oid FROM pg_foreign_server WHERE srvname = '%s') ORDER BY " "ftrelid ASC limit 1" % fsrv_name) fts = pg_cursor.fetchone() connection.close() return fts except Exception: traceback.print_exc(file=sys.stderr)
[ "def foreign_key_check(self):\n # MyRocks doesn't support foreign key\n if self.is_myrocks_table:\n log.info(\n \"SKip foreign key check because MyRocks doesn't support \" \"this yet\"\n )\n return True\n foreign_keys = self.query(\n sql.foreign_key_cnt,\n (\n self.table_name,\n self._current_db,\n self.table_name,\n self._current_db,\n ),\n )\n if foreign_keys:\n fk = \"CONSTRAINT `{}` FOREIGN KEY (`{}`) REFERENCES `{}` (`{}`)\".format(\n foreign_keys[0][\"constraint_name\"],\n foreign_keys[0][\"col_name\"],\n foreign_keys[0][\"ref_tab\"],\n foreign_keys[0][\"ref_col_name\"],\n )\n raise OSCError(\n \"FOREIGN_KEY_FOUND\",\n {\"db\": self._current_db, \"table\": self.table_name, \"fk\": fk},\n )", "def verify_foreign_table(server, db_name, fsrv_name):\n\n try:\n connection = get_db_connection(db_name,\n server['username'],\n server['db_password'],\n server['host'],\n server['port'])\n pg_cursor = connection.cursor()\n\n pg_cursor.execute(\n \"SELECT ftrelid FROM pg_catalog.pg_foreign_table WHERE ftserver = \"\n \"(SELECT oid FROM pg_catalog.pg_foreign_server \"\n \"WHERE srvname = '%s') \"\n \"ORDER BY ftrelid ASC limit 1\" % fsrv_name)\n fts = pg_cursor.fetchone()\n connection.close()\n return fts\n except Exception:\n traceback.print_exc(file=sys.stderr)", "def verify_table(self):\n metadata = MetaData()\n metadata.reflect(bind = StatusSource.engine)\n mine = str(self.table.columns)\n verified = str(metadata.tables[self.tablename].columns)\n if mine != verified:\n raise DbException(\"Table '%s' in the database has schema %s whereas the query's schema is %s\" % (self.tablename, verified, mine))", "def verify_table(self):\r\n metadata = MetaData()\r\n metadata.reflect(bind = DbInsertStatusHandler.engine)\r\n mine = str(self.table.columns)\r\n verified = str(metadata.tables[self.tablename].columns)\r\n if mine != verified:\r\n raise DbException(\"Table '%s' in the database has schema %s whereas the query's schema is %s\" % (self.tablename, verified, mine))", "def can_introspect_foreign_keys(self):\n return self._mysql_storage_engine == 'InnoDB'", "def check_foreign_key_exists(self, table_name, column_name, referenced_table, referenced_column):\n ans = self.execute(self.commands.foreign_key_exists(self.db.name, table_name, column_name, referenced_table, referenced_column))\n if not ans:\n return False\n return True", "def _validate_table(self, table):\n\n pass", "def verify_table_exists(self):\n if ModelBase.model_data[self.child_class].get('table_exists'): return True\n if ModelBase.db_interface.does_table_exist(self.get_tablename()):\n ModelBase.model_data[self.child_class]['table_exists'] = True\n return True\n col_dict = {}\n for col in self.get_columns():\n col_dict[col] = getattr(self.child_class,col)[2]\n ModelBase.db_interface.create_table(self.get_tablename(),col_dict)\n ModelBase.model_data[self.child_class]['table_exists'] = True\n return True", "def check_constraints(self, table_names=None):\n ref_query = \"\"\"\n SELECT REFERRING.`{0}`, REFERRING.`{1}` FROM `{2}` as REFERRING\n LEFT JOIN `{3}` as REFERRED\n ON (REFERRING.`{4}` = REFERRED.`{5}`)\n WHERE REFERRING.`{6}` IS NOT NULL AND REFERRED.`{7}` IS NULL\"\"\"\n cursor = self.cursor()\n if table_names is None:\n table_names = self.introspection.table_names(cursor)\n for table_name in table_names:\n primary_key_column_name = \\\n self.introspection.get_primary_key_column(cursor, table_name)\n if not primary_key_column_name:\n continue\n key_columns = self.introspection.get_key_columns(cursor,\n table_name)\n for column_name, referenced_table_name, referenced_column_name \\\n in key_columns:\n cursor.execute(ref_query.format(primary_key_column_name,\n column_name, table_name,\n referenced_table_name,\n column_name,\n referenced_column_name,\n column_name,\n referenced_column_name))\n for bad_row in cursor.fetchall():\n msg = (\"The row in table '{0}' with primary key '{1}' has \"\n \"an invalid foreign key: {2}.{3} contains a value \"\n \"'{4}' that does not have a corresponding value in \"\n \"{5}.{6}.\".format(table_name, bad_row[0],\n table_name, column_name,\n bad_row[1], referenced_table_name,\n referenced_column_name))\n raise utils.IntegrityError(msg)", "def checkForeignKeys(self, engine: Engine) -> None:\n missing = (sqlalchemy_utils.functions\n .non_indexed_foreign_keys(self._metadata, engine=engine))\n\n for table, keys in missing.items():\n for key in keys:\n logger.warning(\"Missing index on ForeignKey %s\" % key.columns)", "def self_referential(self):\n return self.table_name == self.fk_table_name", "def _is_foreign_key(self, key):\n return self._in_keys(key, self._foreign_keys)", "def do_db_check(self):", "def check_tables(self):\n self.check_table('users')\n self.check_table('menu')\n self.check_table('orders')", "def validate(self):\n super_message = super().validate()\n if super_message is not None:\n return super_message\n if not self._tables:\n return\n with sqlite3.connect(str(self.path)) as conn:\n result = conn.execute(\"select name from sqlite_master where type='table'\")\n tables = {x[0] for x in result}\n\n if self._tables - tables:\n return f\"found {tables}, expected {self._tables}\"", "def test_foreign_keys(self):\n Test = db.mock_model(model_name='Test', db_table='test5a',\n db_tablespace='', pk_field_name='ID',\n pk_field_type=models.AutoField, pk_field_args=[])\n cursor = connection.cursor()\n db.start_transaction()\n db.create_table(\"test5a\", [('ID', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True))])\n db.create_table(\"test5b\", [\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('UNIQUE', models.ForeignKey(Test)),\n ])\n db.execute_deferred_sql()\n db.rollback_transaction()", "def test_(schema_name, schema, schemas, expected_foreign_keys):\n returned_foreign_keys = foreign_key._get_schema_foreign_keys(\n schemas, schema_name, schema\n )\n\n assert list(returned_foreign_keys) == expected_foreign_keys", "def table_check(tablename, path):\n instance = arcno(path)\n tablelist = [i for i,j in instance.actual_list.items()]\n return True if tablename in tablelist else False", "def enforce_foreign_keys(self):\n with self._get_db_connection() as conn:\n try:\n c = conn.cursor()\n c.execute('PRAGMA foreign_keys=ON')\n except Exception:\n conn.rollback()\n raise Exception(sys.exc_info())\n else:\n conn.commit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns partition sum without electronic contribution, v = vibrational frequency in m^1, m = mass in kg, I=moment of inertia either number or list of three [kgm^2], V= Volume in m^3, sym=number of similar rotation axis
def partition(v,m,I,V,sym): T = s.Symbol("T") return qvib(v) + qtrans(m,V) + qrot(I,sym)
[ "def get_effective_mass():\n\n H_BAR = 6.582119514e-16 # eV*s\n M_0 = 9.10938356e-31 # kg\n N_KPTS = 6 # Number of k-points included in the parabola.\n\n spin_up = Spin(1)\n\n band_structure = Vasprun('vasprun.xml').get_band_structure()\n\n # Locations of CBM and VBM in band_structure.bands\n cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]\n cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]\n\n vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]\n vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]\n\n k = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n E = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n\n e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords\n h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords\n\n for n in range(-N_KPTS, 1):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['left'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['left'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['left'].append(e_energy)\n E['hole']['left'].append(h_energy)\n\n for n in range(1, 1 + N_KPTS):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['right'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['right'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['right'].append(e_energy)\n E['hole']['right'].append(h_energy)\n\n # 2nd order fits\n e_l_fit = np.poly1d(\n np.polyfit(k['electron']['left'], E['electron']['left'], 2))\n e_r_fit = np.poly1d(\n np.polyfit(k['electron']['right'], E['electron']['right'], 2))\n h_l_fit = np.poly1d(\n np.polyfit(k['hole']['left'], E['hole']['left'], 2))\n h_r_fit = np.poly1d(\n np.polyfit(k['hole']['right'], E['hole']['right'], 2))\n\n # Curvatures\n e_l_curvature = e_l_fit.deriv().deriv()[0]\n e_r_curvature = e_r_fit.deriv().deriv()[0]\n h_l_curvature = h_l_fit.deriv().deriv()[0]\n h_r_curvature = h_r_fit.deriv().deriv()[0]\n\n # Unit conversion\n e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0\n e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0\n h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0\n h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0\n\n return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},\n 'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}", "def inertia(self):\n #vehicle mass in station coordinates\n self.m=0\n self.CoM=np.array([0,0,0])\n for mass in self.masses:\n self.m=self.m+mass.m\n self.CoM=self.CoM+mass.CoM*mass.m\n #Divide the weighted sum of center of masses by the total mass to get\n #the actual center of mass\n self.CoM=self.CoM/self.m\n #Now that we know where the total center of mass is, use the parallel\n #axis theorem to figure the moment of inertia of each element\n #relative to that center of mass\n #https://en.wikipedia.org/wiki/Parallel_axis_theorem#Tensor_generalization\n I=np.matrix([[0,0,0],[0,0,0],[0,0,0]])\n for mass in self.masses:\n R=mass.CoM-self.CoM\n I=I+mass.nI*mass.m+mass.m*(np.inner(R,R)*vehicle.E3-np.outer(R,R))\n self.nI=I/self.m\n return (self.m,I,self.CoM)", "def decomposition_into_s_n_irreducibles(self, n):\r\n w5 = partitions_list(n)\r\n M5 = form_matrix_yt(w5)\r\n card = math.factorial(n)\r\n vec_dic = {}\r\n for k in range(self.dimension()+1):\r\n D = {}\r\n uu = []\r\n vv = []\r\n p = k \r\n A = self.matrix_simmetric_representate(p)\r\n if (p >0 and (p <= self.dimension())):\r\n null = nullspace(A)\r\n w3 = []\r\n for i in range(len(null[0])):\r\n w = []\r\n for j in range(len(null)):\r\n w.append(null[j][i])\r\n w3.append(w) \r\n null = w3\r\n M = np.matrix(w3, dtype= np.float64).transpose()\r\n Mi = np.linalg.pinv(M)\r\n else:\r\n if (p == 0):\r\n M = A\r\n null = []\r\n for i in range(A.shape[0]):\r\n aux = []\r\n for j in range(A.shape[1]):\r\n aux.append(M[i,j])\r\n null.append(aux)\r\n M = np.matrix(null, dtype=np.float64)\r\n Mi = M\r\n p = k + 1\r\n if (p>0 and (p <= self.dimension())):\r\n A1=self.matrix_simmetric_representate(p)\r\n col = columnspace(A1)\r\n w4 = []\r\n for i in range(len(col[0])):\r\n w = []\r\n for j in range(len(col)):\r\n w.append(col[j][i])\r\n w4.append(w)\r\n col = w4\r\n M1 = np.matrix(w4, dtype=np.float64).transpose()\r\n Mii = np.linalg.pinv(M1)\r\n for h in w5:\r\n p = k \r\n if (p >0 and (p <= self.dimension())):\r\n if (all(elem == 0 for elem in null[0])):\r\n l1 = 0\r\n else:\r\n he = self.basis_group_oriented_p_chains(p) \r\n on1 = np.ones(len(list(he.dic.keys())), dtype=np.float64) \r\n v = P_chains([],[])\r\n v = P_chains(list(he.dic.keys()),on1)\r\n v1 = permutation_in_simplex_test(v, make_permutation(h))\r\n D1={}\r\n c1 = 0\r\n for i in list(v1.dic.keys()):\r\n c2 = 1\r\n for j in list(he.dic.keys()):\r\n if (i == j):\r\n if (v1.dic[i] == he.dic[j]):\r\n D1[c1] = c2\r\n else:\r\n D1[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M.shape[0]\r\n cc = M.shape[1]\r\n Ma = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Ma[i,:] = (M[(abs(D1[i])-1),:]*(np.sign(D1[i])))\r\n l1 = 0\r\n for j in range(cc):\r\n l1 = np.dot(Mi[j,:],Ma[:,j])[0,0] + l1\r\n else:\r\n if (p == 0):\r\n he = self.basis_group_oriented_p_chains(p) \r\n on1 = np.ones(len(list(he.dic.keys())), dtype=np.float64) \r\n v = P_chains([],[])\r\n v = P_chains(list(he.dic.keys()),on1)\r\n v1 = permutation_in_simplex_test(v, make_permutation(h))\r\n D1={}\r\n c1 = 0\r\n for i in list(v1.dic.keys()):\r\n c2 = 1\r\n for j in list(he.dic.keys()):\r\n if (i == j):\r\n if (v1.dic[i] == he.dic[j]):\r\n D1[c1] = c2\r\n else:\r\n D1[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M.shape[0]\r\n cc = M.shape[1]\r\n Ma = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Ma[i,:] = (M[(abs(D1[i])-1),:]*(np.sign(D1[i])))\r\n l1 = 0\r\n for j in range(cc):\r\n l1 = np.dot(Mi[j,:],Ma[:,j])[0,0] + l1\r\n else:\r\n l1 = 0\r\n p = k + 1\r\n if (p>0 and (p <= self.dimension())):\r\n hi = self.basis_group_oriented_p_chains(p-1) \r\n on1i = np.ones(len(list(hi.dic.keys())), dtype=np.float64) \r\n vi = P_chains([],[])\r\n vi = P_chains(list(hi.dic.keys()),on1i)\r\n v1i = permutation_in_simplex_test(vi, make_permutation(h))\r\n D1i={}\r\n c1 = 0\r\n for i in list(v1i.dic.keys()):\r\n c2 = 1\r\n for j in list(hi.dic.keys()):\r\n if (i == j):\r\n if (v1i.dic[i] == hi.dic[j]):\r\n D1i[c1] = c2\r\n else:\r\n D1i[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M1.shape[0]\r\n cc = M1.shape[1]\r\n Mai = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Mai[i,:] = (M1[(abs(D1i[i])-1),:]*(np.sign(D1i[i])))\r\n l2 = 0\r\n for j in range(cc):\r\n l2 = np.dot(Mii[j,:],Mai[:,j])[0,0] + l2\r\n else:\r\n l2 = 0\r\n uu.append(l1-l2) \r\n vv.append(size_conjugacy_class(h,n))\r\n for i in range(M5.shape[0]):\r\n Ip = 0\r\n for j in range(M5.shape[1]):\r\n Ip = Ip + M5[i,j]*uu[j]*vv[j]\r\n Ip = Ip/card\r\n D[tuple(w5[i])] = abs(round(Ip))\r\n '''Note that I am using round, only because the results obtained are \r\n not esthetics'''\r\n vec_dic[k] = D\r\n return vec_dic", "def partition_by_eigenvector(graph):\n ###TODO\n pass", "def Mdyn_virial(R_kpc, v_kms):\n M = 2.8e5 * (v_kms)**2 * R\n M10 = M / 1e10\n return M10", "def diameters(partition):\n return np.array([np.max(Euclidean()(p)) for p in partition])", "def _calc_vacancy_number(self, partitions, a, i):\n I = self.index_set()\n ia = I[a]\n vac_num = 0\n\n gamma = self._folded_ct.scaling_factors()\n g = gamma[ia]\n for b in range(self._cartan_matrix.ncols()):\n ib = I[b]\n q = partitions[b].get_num_cells_to_column(g*i, gamma[ib])\n vac_num -= self._cartan_matrix[a,b] * q / gamma[ib]\n\n return vac_num", "def volume(evas, multiplicity=None, dim=None, m=m_generic):\n if not dim:\n dim = dimension(evas, m)\n if not multiplicity:\n multiplicity = estimate_multiplicity(dim)\n evasq = [l**2 for l in evas]\n L = max(evasq)\n scaledevasq = [lamb*eps(L, m) for lamb in evasq]\n tot = sum([f0(lamb, dim) for lamb in scaledevasq])\n tot = tot*(eps(L, m)**(dim/2))\n return float((4*math.pi)**(dim/2)*tot / multiplicity)", "def _query_summed_volume(svt, diam):\n return (\n svt[diam[0]:, diam[1]:, diam[2]:] - svt[diam[0]:, diam[1]:, :-diam[2]] -\n svt[diam[0]:, :-diam[1], diam[2]:] - svt[:-diam[0], diam[1]:, diam[2]:] +\n svt[:-diam[0], :-diam[1], diam[2]:] + svt[:-diam[0], diam[1]:, :-diam[2]]\n + svt[diam[0]:, :-diam[1], :-diam[2]] -\n svt[:-diam[0], :-diam[1], :-diam[2]])", "def vert_divg_mass_bal(omega, p, dp):\n div = vert_divg(omega, p)\n return field_vert_int_bal(div, dp)", "def HarmonicOscillator(inv_mass_matrix, k=1.0, m=1.0):\n\n def potential_energy(q):\n return jnp.sum(0.5 * k * jnp.square(q[\"x\"]))\n\n def kinetic_energy(p):\n v = jnp.multiply(inv_mass_matrix, p[\"x\"])\n return jnp.sum(0.5 * jnp.dot(v, p[\"x\"]))\n\n return potential_energy, kinetic_energy", "def isentropic_expansion(self,\n P_i, #Initial pressure in Pa\n v_i, #Initial volume in M^3\n v_f,\n mass=1.e2 # Piston mass in grams\n ):\n cross_section = 1e-4 # one cm^2 in M^2\n Kg2g = 1.0e3\n N = 1000\n step = (v_f-v_i)/N\n _v = np.arange(v_i,v_f+step/2,step) # Array of N+1 volumes\n def func(P,v):\n \"\"\" Returns the derivitive of P wrt v on isentrope\"\"\"\n return -(7.0*P)/(5.0*v)\n _P = scipy.integrate.odeint(\n func, # Derivative function\n np.array([P_i]), # Initial condition\n _v) # Returns array of pressures in Pa\n _P_bar = (_P[:-1]+_P[1:])/2\n _KE = _P_bar.cumsum()*step # In Joules\n _vel = np.sqrt(2*_KE*Kg2g/mass) # In M/s\n _dvdt = _vel * cross_section # In M^3/s\n _dt = step/_dvdt\n Dt = _dt.sum() # In s\n # Now use analytic results\n E_i = self.Pv2E(P_i,v_i)\n E_f = E_i*(v_i/v_f)**.4\n return (_KE[-1],Dt,E_i-E_f)", "def primary_decomposition(self):\n k = self.base_ring()\n n = self.degree()\n if n == 0:\n return []\n if not (self.is_unitary() and self.is_commutative()\n and (self._assume_associative or self.is_associative())):\n raise TypeError(\"algebra must be unitary, commutative and associative\")\n # Start with the trivial decomposition of self.\n components = [Matrix.identity(k, n)]\n for b in self.table():\n # Use the action of the basis element b to refine our\n # decomposition of self.\n components_new = []\n for c in components:\n # Compute the matrix of b on the component c, find its\n # characteristic polynomial, and factor it.\n b_c = c.solve_left(c * b)\n fact = b_c.characteristic_polynomial().factor()\n if len(fact) == 1:\n components_new.append(c)\n else:\n for f in fact:\n h, a = f\n e = h(b_c) ** a\n ker_e = e.kernel().basis_matrix()\n components_new.append(ker_e * c)\n components = components_new\n quotients = []\n for i in range(len(components)):\n I = Matrix(k, 0, n)\n for j,c in enumerate(components):\n if j != i:\n I = I.stack(c)\n quotients.append(self.quotient_map(self.ideal(I, given_by_matrix=True)))\n return quotients", "def KE(v_tot, m = 39.95):\n return 6.24E18 * 0.5 * 1.66E-27* m *(v_tot*1E5)**2", "def computeVolume(self):\n return (1 if self.clockwise else -1)*np.sum(np.linalg.det(np.dstack((self.vertices[self.faces[:,0]],self.vertices[self.faces[:,1]],self.vertices[self.faces[:,2]]))))/6", "def find_partitions(V,k):\n k_subs = k_subset(V,k)\n k_subs = uniq_subsets(k_subs)\n\n return k_subs", "def mass(self):\n\t\treturn self.volume*self.density", "def valor_medio(x):\r\n\r\n N = len(x)\r\n u = 1 / N * sum(x)\r\n return u", "def trans_elec_mom(self, i, data):\n mLogger.info(\"cartesian components of electron size [Ang]\",\n extra={\"Parsed\": V.trans_elec_mom})\n j = 1\n vec = []\n while True:\n ls = data[i+j].split()\n if len(ls) == 0:\n break\n elif \"Electron size [Ang]:\" in data[i+j] and \\\n \"Cartesian components [Ang]:\" in data[i+j+1]:\n vec = extract_floats(data[i+j+1])\n break\n j += 1\n return vec" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function subscribes to the secondaryCam topic and updates its state in the global scope.
def secondayCamCallback(msg): global secondaryCamString secondaryCamString = msg.data
[ "def _on_connect(self, client, userdata, flags, rc):\n self.subscribe(self.topic)", "def _subscribe_update_callback(self, client, userdata, message):\n logger.info('Message recieved from {} topic'.format(message.topic))\n payload = message.payload\n try:\n payload_dict = json.loads(payload)\n light_data = payload_dict['current']['state']['desired']\n if self.light.needs_updating(light_data):\n self.light.update_lights(light_data)\n reported_payload = {\n 'state': {\n 'reported': self.light.current_settings()\n }\n }\n JSON_payload = json.dumps(reported_payload)\n self.shadowClient.publish(update_topic, JSON_payload, 0)\n except ValueError:\n logger.error('Value error')\n logger.info(payload)\n except Exception as e:\n logger.error(e.message)", "def on_connect(self, client, userdata, flags, rc):\n print(\"connected\")\n self.client.subscribe(self.config[\"Topic\"])", "def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )", "def onClientSubscribed(self, proto, topicUri):\r\n pass", "def init_subs(self, topic):\n # Init subscriber for receiving detections\n self.say_sub = rospy.Subscriber(topic, DetectionWithPose, self.callback)", "def subscribe(receiver, updateInterval=None):", "def subscribe(self, update_cb):\n self.subscribers.add(update_cb)", "def subscribe(self, topic):\n\t\tself.topic=topic\n\t\tself.client.subscribe(self.topic)", "def sub_cb(topic, msg):\n\n print(\"- Message received!\")\n print(\" * %s: %s\" % (topic.decode(\"utf-8\"), msg.decode(\"utf-8\")))", "def output_topic_callback(self, msg):\n with self.callback_lock:\n if self._time_received_input != 0:\n # Get actual time from ROS\n time_now = self.node.get_clock().now().nanoseconds\n\n # Compute the amount of time elapsed from receiving the last\n # message in the input topic\n measure = time_now - self._time_received_input\n\n # Transform from nanoseconds to milliseconds\n measure = measure / (1000 * 1000)\n\n publish_msg = Int64()\n publish_msg.data = int(measure)\n\n # Publish the measurement\n self._publisher.publish(publish_msg)\n\n self._time_received_input = 0", "def on_connect(client, userdata, flags, rc):\n logging.debug(\"Connected flags\"+str(flags)+\"result code \"\\\n +str(rc)+\"client1_id\")\n if rc==0:\n \n client.connected_flag=True #old clients use this\n client.bad_connection_flag=False\n if client.sub_topic!=\"\": #single topic\n logging.debug(\"subscribing \"+str(client.sub_topic))\n print(\"subscribing in on_connect\")\n topic=client.sub_topic\n if client.sub_qos!=0:\n qos=client.sub_qos\n client.subscribe(topic,qos)\n elif client.sub_topics!=\"\":\n print(\"subscribing in on_connect multiple\")\n client.subscribe(client.sub_topics)\n\n else:\n print(\"set bad connection flag\")\n client.bad_connection_flag=True #\n client.bad_count +=1\n client.connected_flag=False #", "def subscribe(self, topic, callback):\n raise NotImplementedError('You must override this')", "def _register_listener(self):\n self.dust_comm.register_listener(\"ivi_topic_out\", self.incoming_DUST_message)\n print(\"listening on topic: \", \"ivi_topic_out\")", "def on_connect(client, userdata, flags, rc):\r\n logging.debug(\"Connected flags\"+str(flags)+\"result code \"\\\r\n +str(rc)+\"client1_id\")\r\n\r\n if rc==0:\r\n client.connected_flag=True #old clients use this\r\n client.bad_connection_flag=False\r\n if client.sub_topic!=\"\": #single topic\r\n print(\"subscribing \"+str(client.sub_topic))\r\n topic=client.sub_topic\r\n if client.sub_qos==0:\r\n qos=client.sub_qos\r\n client.subscribe(topic,qos)\r\n elif client.sub_topics!=\"\":\r\n\r\n client.subscribe(client.sub_topics)\r\n print(\"Connected and subscribed to \",client.sub_topics)\r\n\r\n else:\r\n client.bad_connection_flag=True #\r\n client.bad_count +=1\r\n client.connected_flag=False #\r", "def camera_listener(self):\n camera_sub_cb_grp = ReentrantCallbackGroup()\n self.create_subscription(CameraMsg,\n constants.CAMERA_MSG_TOPIC,\n self.camera_callback,\n 10,\n callback_group=camera_sub_cb_grp)\n display_img_sub_cb_grp = ReentrantCallbackGroup()\n self.create_subscription(Image,\n constants.DISPLAY_MSG_TOPIC,\n self.display_callback,\n 10,\n callback_group=display_img_sub_cb_grp)", "def on_subscribe(self, mqtt_client, userdata, mid, granted_qos):\n logging.debug(\"DEBUG - subscribe ack received\")", "def starup(self, sender, **kwargs):\n self._initialize_devices()\n for device_topic in device_topic_dict:\n _log.debug('Subscribing to ' + device_topic)\n self.vip.pubsub.subscribe(peer='pubsub',\n prefix=device_topic,\n callback=self.on_analysis_message)", "def mqtt_sub_callback(self, client, userdata, message):\n\t#def mqtt_sub_callback(self, message):\n\n\t\t# Decode the message using UTF-8 and convert it\n\t\t# to 'string' datatype\n\t\tpayload = str(message.payload.decode(\"utf-8\"))\n\n\t\trospy.loginfo(\"[BRIDGE] Message Received from MQTT\")\n\n\t\t# Give the appropiate values to the contents of the message\n\t\t# that will be published to '/ros_iot_bridge/mqtt/sub'\n\t\tmsg_mqtt_sub = msgMqttSub()\n\t\tmsg_mqtt_sub.timestamp = rospy.Time.now()\n\t\tmsg_mqtt_sub.topic = message.topic\n\t\tmsg_mqtt_sub.message = payload\n\n\t\t# Publish the message\n\t\tself._handle_ros_pub.publish(msg_mqtt_sub)\n\n\t\t# Upload to Google Sheet\n\t\tret = self.update_gsheet(\"None\", True, payload)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function first determines which camera is the primary and which is secondary. The image streams from the respective primary and seconday cameras are resized and republished
def resizeAndRepubThread(): # reference globals global primaryCamString global secondaryCamString global armCamImage global headCamImage # initialize image publishers primaryPub = rospy.Publisher(primaryCamRepub, Image, queue_size=1) secondaryPub = rospy.Publisher(secondaryCamRepub, Image, queue_size=1) # create CvBridge object for converting CV2 images to sensor_msgs/Image messages backBridge = CvBridge() while(True): primaryImage = np.zeros(shape=[512, 512, 3]) secondaryImage = np.zeros(shape=[512, 512, 3]) # just keep looping until we get images if(np.sum(headCamImage) == 0 or np.sum(armCamImage) == 0): rospy.loginfo("still waiting on camera images...") continue # get primary image if(primaryCamString == "head"): primaryImage = resizeImage(headCamImage, primarySize) elif(primaryCamString == "leftArm"): primaryImage = resizeImage(armCamImage, primarySize) elif(primaryCamString == ""): pass else: rospy.logerr("Invalid Option for primaryCamString recieved!") # get secondary image if(secondaryCamString == "head"): secondaryImage = resizeImage(headCamImage, secondarySize) elif(secondaryCamString == "leftArm"): secondaryImage = resizeImage(armCamImage, secondarySize) elif(secondaryCamString == ""): pass else: rospy.logerr("Invalid Option for secondaryCamString recieved!") # publish both new images if(np.sum(primaryImage) != 0 and np.sum(secondaryImage) != 0): primaryImageMessage = backBridge.cv2_to_imgmsg(primaryImage, "bgr8") primaryPub.publish(primaryImageMessage) secondaryImageMessage = backBridge.cv2_to_imgmsg(secondaryImage, "bgr8") secondaryPub.publish(secondaryImageMessage)
[ "def query_camera(self):\n ok, orig_pic = self.vs.read() # Read video stream\n if ok: # If no errors\n orig_pic = imutils.rotate(orig_pic, angle=self.camera_rot)\n curr_pic = imutils.resize(orig_pic, width=self.image_width)\n return curr_pic, orig_pic\n else:\n return None, None", "def read_cameras(self):\n for camera in self.camlist:\n image = camera.cam.read()\n if camera.vflip:\n image = cv2.flip(image, -1)\n if camera.resize_width:\n image = imutils.resize(image, width=camera.width_pixels)\n camera.cam_q.append(image)\n for detector in camera.detectors:\n self.run_detector(camera, image, detector)", "def rightCam(viewCam):\n\n pass", "def update(self):\r\n\r\n # Update the vision frames in the system\r\n self._system.update()\r\n\r\n # Create blank PIL images to hold the video streams\r\n layered = PIL.Image.new('RGBA', (400, 400))\r\n stacked = PIL.Image.new('RGBA', (200, 800))\r\n control = PIL.Image.new('RGBA', (600, 800))\r\n\r\n focalpoint = self._system[self._appString[\"device\"].get()].focalpoint()\r\n # print(focalpoint)\r\n\r\n # Get each vision key and vision for the selected device\r\n visionList = [(visionKey, vision) for visionKey, vision in self._system[self._appString[\"device\"].get()]]\r\n\r\n # Loop through each vision in the vision list\r\n for i, (visionKey, vision) in enumerate(visionList):\r\n\r\n # Grab the frames from the vision when it is \"curr\"\r\n frameList = [frame for frameKey, frame in vision if frameKey==self._appString[\"frame\"].get()]\r\n\r\n # Loop through each frame in the frame list\r\n for frame in frameList:\r\n\r\n # Get the properties and turn the image into RGBA\r\n ratio, size = vision.properties()\r\n rgbFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\r\n\r\n # print(rgbFrame.shape)\r\n width, height, channels = rgbFrame.shape\r\n\r\n # Paste the images together in layered\r\n\r\n imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (int(400 * ratio), int(400 * ratio))))\r\n layered.paste(imgFrame, (int(200 * (1 - ratio)), int(200 * (1 - ratio))))\r\n\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 // width)), int(200 * (1 - ratio) - focalpoint[1] * (200 // height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1)), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200/width) / ratio), int(200 * (1 - ratio) - focalpoint[1] * (200/height) / ratio)))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1) / 200), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1) / 200)))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (400//width * (1- ratio))), int(200 * (1 - ratio) - focalpoint[1] * (400//height * (1 - ratio)))))\r\n\r\n # Paste the images together in stacked\r\n imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (200, 200)))\r\n stacked.paste(imgFrame, (0, 200 * i))\r\n\r\n # Add the stacked image to the canvas\r\n self._pilFrames[\"stacked\"] = PIL.ImageTk.PhotoImage(image=stacked)\r\n self._appCanvas[\"stacked\"].create_image(100, 0, image=self._pilFrames[\"stacked\"], anchor=tkinter.NW)\r\n\r\n # Add the layered image to the canvas\r\n self._pilFrames[\"layered\"] = PIL.ImageTk.PhotoImage(image=layered)\r\n self._appCanvas[\"layered\"].create_image(0, 0, image=self._pilFrames[\"layered\"], anchor=tkinter.NW)\r\n\r\n # Add the control image to the canvas\r\n imgFrame = cv2.cvtColor(self._system[self._appString[\"device\"].get()][self._appString[\"vision\"].get()][self._appString[\"frame\"].get()], cv2.COLOR_BGR2RGBA)\r\n control = PIL.Image.fromarray(cv2.resize(imgFrame, (600, 600)))\r\n self._pilFrames[\"control\"] = PIL.ImageTk.PhotoImage(image=control)\r\n self._appCanvas[\"control\"].create_image(100, 90, image=self._pilFrames[\"control\"], anchor=tkinter.NW)\r\n\r\n # Continue to update with a delay of 15\r\n self.after(15, self.update)", "def _create_in_stream(self):\n # Color Camera vid\n self.color_cam = self.createXLinkIn()\n self.color_cam.setStreamName(self.color_cam_name)\n\n # Right and Left Cameras\n self.mono_left_cam = self.createXLinkIn()\n self.mono_left_cam.setStreamName(self.left_cam_name)\n self.mono_right_cam = self.createXLinkIn()\n self.mono_right_cam.setStreamName(self.right_cam_name)", "def setup_camera(self):\n\n # camera_transform = carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15))\n camera_transform_A = carla.Transform(carla.Location(x=1.2, y=0, z=1.3), carla.Rotation(pitch=0))\n camera_transform = carla.Transform(carla.Location(x=0.2, y=-0.25, z=1.3), carla.Rotation(pitch=0))\n lidar_transform_A = carla.Transform(carla.Location(x=1.2, y=0, z=2.0), carla.Rotation(pitch=0))\n lidar_transform = carla.Transform(carla.Location(x=0.2, y=-0.25, z=2.0), carla.Rotation(pitch=0))\n self.camera = self.world.spawn_actor(self.camera_blueprint(), camera_transform, attach_to=self.car)\n self.camera_A_rgb = self.world.spawn_actor(self.camera_blueprint(), camera_transform_A, attach_to=self.car_A)\n\n camera_depth_bp = self.world.get_blueprint_library().find('sensor.camera.depth')\n camera_depth_bp.set_attribute('image_size_x', str(VIEW_WIDTH))\n camera_depth_bp.set_attribute('image_size_y', str(VIEW_HEIGHT))\n camera_depth_bp.set_attribute('fov', str(VIEW_FOV))\n self.camera_A_depth = self.world.spawn_actor(camera_depth_bp, camera_transform_A, attach_to=self.car_A)\n '''\n camera_depth_bp2 = self.world.get_blueprint_library().find('sensor.camera.depth')\n camera_depth_bp2.set_attribute('image_size_x', str(VIEW_WIDTH))\n camera_depth_bp2.set_attribute('image_size_y', str(VIEW_HEIGHT))\n camera_depth_bp2.set_attribute('fov', str(VIEW_FOV))\n self.camera_A_depth2 = self.world.spawn_actor(camera_depth_bp2, camera_transform, attach_to=self.car)\n '''\n lidar_bp1 = self.world.get_blueprint_library().find('sensor.lidar.ray_cast')\n lidar_bp1.set_attribute('range', '5000')\n lidar_bp1.set_attribute('channels', '128')\n lidar_bp1.set_attribute('rotation_frequency', '50')\n lidar_bp1.set_attribute('points_per_second', '1000000')\n self.lidar1 = self.world.spawn_actor(lidar_bp1, lidar_transform_A, attach_to = self.car_A)\n lidar_bp2 = self.world.get_blueprint_library().find('sensor.lidar.ray_cast')\n lidar_bp2.set_attribute('range', '5000')\n lidar_bp2.set_attribute('channels', '128')\n lidar_bp2.set_attribute('rotation_frequency', '50')\n lidar_bp2.set_attribute('points_per_second', '5000')\n self.lidar2 = self.world.spawn_actor(lidar_bp2, lidar_transform, attach_to = self.car)\n\n gnss_bp1 = self.world.get_blueprint_library().find('sensor.other.gnss')\n self.gnss1 = self.world.spawn_actor(gnss_bp1, camera_transform_A, attach_to = self.car_A)\n gnss_bp2 = self.world.get_blueprint_library().find('sensor.other.gnss')\n self.gnss2 = self.world.spawn_actor(gnss_bp2, camera_transform, attach_to = self.car)\n\n weak_self = weakref.ref(self)\n self.camera_A_rgb.listen(lambda image: self.set_rgb_image1(weak_self, image))\n # self.camera_A_depth.listen(lambda image: self.set_depth_image(weak_self, image))\n # self.camera_A_depth2.listen(lambda image: self.set_depth_image2(weak_self, image))\n self.camera.listen(lambda image: self.set_rgb_image2(weak_self, image))\n self.lidar1.listen(lambda cloud: self.set_lidar1(weak_self, cloud))\n self.lidar2.listen(lambda cloud: self.set_lidar2(weak_self, cloud))\n self.gnss1.listen(lambda gnss: self.set_gnss1(weak_self, gnss))\n self.gnss2.listen(lambda gnss: self.set_gnss2(weak_self, gnss))\n\n\n calibration = np.identity(3)\n # calibration[0, 0] = 5\n calibration[0, 2] = VIEW_WIDTH / 2.0\n calibration[1, 2] = VIEW_HEIGHT / 2.0\n calibration[0, 0] = calibration[1, 1] = VIEW_WIDTH / (2.0 * np.tan(VIEW_FOV * np.pi / 360.0))\n print(\"calibration = \")\n print(calibration)\n self.camera.calibration = calibration\n self.camera_A_rgb.calibration = calibration", "def start_cameras_and_mjpeg_servers():\n if not DEVELOP: \n\n # Stop camera triggers\n camera_trigger.stop()\n\n # Start cameras\n camera_master.set_camera_launch_param(\n frame_rate='camera_driver',\n trigger=True\n )\n camera_master.start_cameras()\n # Wait until the camera nodes are ready and then start the mjpeg servers\n while not mct_introspection.camera_nodes_ready(mode='calibration'):\n time.sleep(0.2)\n\n # Delay until all camera nodes are ready and start triggers\n time.sleep(10)\n frame_rates = file_tools.read_frame_rates()\n camera_trigger.start(frame_rates['camera_calibration'])\n\n # Start mjpeg servers\n mjpeg_servers.set_topics(['image_raw'])\n mjpeg_servers.start_servers()\n target_info = file_tools.read_target_info(config.camera_calib_target_type)\n calibrator_master.start(target_info['size'], target_info['square'])", "def primary_cam_setup(cam):\n\ttry:\n\t\tresult = True\n\t\tnodemap = cam.GetNodeMap()\n\n\t\t# Configure the camera to allow for chunk data\n\t\tresult &= configure_chunk_data(nodemap)\n\n\t\t# Setup the pixel format\n\t\tresult &= pixel_format(1, cam, 'BGR8')\n\n\t\t# Set up the primary camera output GPIO signal\n\t\tprint('\\n\\t*** CONFIGURING HARDWARE OUTPUT ***')\n\t\tcam.LineSelector.SetValue(PySpin.LineSelector_Line2)\n\t\tcam.V3_3Enable.SetValue(True)\n\t\tprint('\\t\\tCamera 1 Hardware output set to Line 2...')\n\n\t\tresult &= trigger_selector(1, cam, 'FrameStart')\n\t\tresult &= trigger_overlap(1, cam, 'ReadOut')\n\t\tresult &= configure_trigger(1, cam, 'software')\n\n\t\tprint(\"\\n\\t*** CONFIGURING CAMERA ***\")\n\t\tresult &= acquisition_mode(1, cam)\t\t\t# Continuous acquisition\n\t\tresult &= framerate(1, cam)\t\t\t\t\t# Set the framerate\n\t\tresult &= auto_exposure_mode(1, cam, 'Off') # Autoexposure = Off\n\t\tresult &= exposure_change(cam, first_exp) # Set first exposure\n\t\tresult &= auto_gain_mode(1, cam, 'Off')\t\t# Autogain = Off\n\t\tresult &= gain_change(cam, first_gain)\t # Set first gain\n\t\tprint('\\n')\n\n\texcept PySpin.SpinnakerException as ex:\n\t\tprint('Error: %s' % ex)\n\t\tresult = False\n\n\treturn result", "def _create_camera(self):\n ...", "def compare(image_a, image_b, is_camera_image):\n\n # Generate a unique filename\n filename = uuid.uuid4().hex[:3]\n\n if is_camera_image:\n image_a = imutils.rotate_bound(image_a, 90)\n image_b = imutils.rotate_bound(image_b, 90)\n\n # Store original to show in future\n original = image_a\n\n # Convert to greyscale\n image_a = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)\n image_b = cv2.cvtColor(image_b, cv2.COLOR_BGR2GRAY)\n\n # Reduce size and blur to account for shaky handheld camera based images\n if is_camera_image:\n scale_multiplier = 0.03125\n image_a = cv2.resize(image_a, (0, 0), fx=scale_multiplier, fy=scale_multiplier)\n image_b = cv2.resize(image_b, (0, 0), fx=scale_multiplier, fy=scale_multiplier)\n image_a = cv2.GaussianBlur(image_a, (1001, 1001), cv2.BORDER_DEFAULT)\n image_b = cv2.GaussianBlur(image_b, (1001, 1001), cv2.BORDER_DEFAULT)\n\n # Obtain SSIM and determine differences\n try:\n _, differences = structural_similarity(image_a, image_b, full=True, gaussian_weights=True)\n except ValueError:\n print('Images are not the same size')\n return None\n\n # Convert to cv2 array\n differences = (differences * 255).astype('uint8')\n\n # Threshold and find contours (differences)\n thresh = cv2.threshold(differences, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(contours)\n\n # Draw contours (differences)\n for cont in contours:\n (x, y, w, h) = cv2.boundingRect(cont)\n if is_camera_image:\n multiplier = int(1 / scale_multiplier)\n y *= multiplier\n x *= multiplier\n h *= multiplier\n w *= multiplier\n cv2.rectangle(original, (x, y), (x + w, y + h), (255, 0, 0), 4)\n\n # TODO: Create GIF highlighting differences (instead of statuic image)\n cv2.imwrite('static/images/differences/' + filename + '.jpg', original)\n\n return filename", "def test_camera_resolutions(self):\n # Test the CameraResolution equality\n self.assertEqual(Camera.K_VGA, Camera.K_VGA)\n self.assertNotEqual(Camera.K_QVGA, Camera.K_QQVGA)\n\n # Testing that the retrieved camera frames correspond to the required\n # image resolution\n for resolution in [Camera.K_VGA, Camera.K_QVGA, Camera.K_QQVGA]:\n for camera_id in CameraTest.robot.camera_dict.keys():\n CameraTest.robot.subscribeCamera(\n camera_id,\n resolution=resolution)\n\n self.assertEqual(\n CameraTest.robot.getCameraFrame().shape[1],\n resolution.width)\n self.assertEqual(\n CameraTest.robot.getCameraFrame().shape[0],\n resolution.height)\n\n self.assertEqual(\n resolution,\n CameraTest.robot.getCameraResolution())\n\n CameraTest.robot.unsubscribeCamera(camera_id)", "def initCamera() :\n\n # initialize a dictonary for camera configuration vars\n configSet = {}\n\n f = open(\"/home/pi/dragon-timelapse/cam.config\", \"r\")\n l = f.readline()\n while l:\n l = l.strip( \" \\t\\n\" )\n data = l.split( \":\", 1)\n\n configSet[ data[0] ] = data[1]\n l = f.readline()\n f.close\n\n # set some of the things we can definately lock down.\n #camera.iso = int( configSet['iso'] )\n camera.resolution = ( int( configSet['resX']), int( configSet['resY']) )\n camera.vflip = configSet['vflip']\n camera.hflip = configSet['hflip']\n #camera.sharpness = int( configSet['sharpness'] )\n #camera.drc_strength = configSet['drc_strength']\n camera.crop = (0.0, 0.0, 1.0, 1.0)\n\n # open up the camera shutter and let the more dynamic settings\n # settle into place for the current conditions before locking them.\n camera.start_preview()\n time.sleep(2)\n camera.shutter_speed = camera.exposure_speed\n camera.exposure_mode = 'off'\n g = camera.awb_gains\n camera.awb_mode = 'off'\n camera.awb_gains = g\n camera.stop_preview()\n\n return camera.shutter_speed", "def publish(self):\n responses = self._airsim_client.simGetImages([\n # uncompressed RGB array bytes\n ImageRequest(self._camera_name, ImageType.Scene, compress=False),\n # # infrared uncompressed image\n # ImageRequest(self._camera_name, ImageType.Infrared, compress=False),\n # # floating point uncompressed image\n # ImageRequest(self._camera_name, ImageType.DepthPlanner, pixels_as_float=True, compress=False),\n ], self._vehicle_name)\n color_response = responses[0]\n # ir_response = responses[1]\n # depth_response = responses[2]\n\n header = Header()\n header.stamp = self.get_clock().now().to_msg()\n # TODO: implement parameter for frame id, also decide on if each separate image type should have a different frame id\n # This may mean we should load the ids via ros parameters\n header.frame_id = self._camera_frame_id\n\n # Handle cam info it has not been found yet\n if self._vehicle_name not in self._cam_info_msgs.keys():\n self._cam_info_msgs[self._vehicle_name] = {}\n cam_info = self._airsim_client.simGetCameraInfo(self._camera_name, self._vehicle_name)\n d_params = self._airsim_client.simGetDistortionParams(self._camera_name, self._vehicle_name)\n self.get_logger().info(f\"{d_params}\")\n self.get_logger().info(f\"\"\"\n HFOV: {cam_info.fov},\n PROJ: {cam_info.proj_mat}\n \"\"\")\n # TODO: implement multiple cameras for each lens on realsense and update this method\n self._cam_info_msgs[self._vehicle_name][\"color\"] = construct_info(header, cam_info, color_response.height, color_response.width)\n # self._cam_info_msgs[self._vehicle_name][\"ir\"] = self._cam_info_msgs[self._vehicle_name][\"color\"]\n\n image_color = construct_image(header, color_response, \"bgr8\")\n # image_ir = construct_image(header, ir_response, \"rgb8\")\n # image_depth = construct_image(header, depth_response, \"rgb8\")\n\n # TODO: use camera pose from airsim\n tfmsg = TransformStamped()\n translation = Vector3(x=0., y=0., z=0.)\n tfmsg.transform.translation = translation\n tfmsg.transform.rotation = Quaternion(x=0., y=0., z=0., w=1.)\n tfmsg.child_frame_id = self._camera_frame_id\n tf_header = Header()\n tf_header.stamp = header.stamp\n tfmsg.header = tf_header\n tfmsg.header.frame_id = \"world\"\n self.br.sendTransform(tfmsg)\n\n self._pub_color.publish(image_color)\n # self._pub_ir.publish(image_ir)\n # self._pub_depth.publish(image_depth)\n self._pub_info_color.publish(self._cam_info_msgs[self._vehicle_name][\"color\"])\n # self._pub_info_ir.publish(self._cam_info_msgs[self._vehicle_name][\"ir\"])", "def camera_setup_6():\n K = np.array([[1790.634474, 0., 973.099292],\n [0., 1785.950534, 803.294457],\n [0., 0., 1. ]])\n \n Rt = np.array([[ -2.1022535018250471e-01, -9.2112145235168197e-02, 9.7330398891652492e-01, -1.4076865278184414e-02],\n [ -9.7735897207277012e-01, -4.6117027185500481e-03, -2.1153763709301088e-01, -3.1732881069183350e-01],\n [ 2.3973774202277975e-02, -9.9573795995643932e-01, -8.9057134763516621e-02, -7.2184838354587555e-02],\n [ 0., 0., 0., 1. ]])\n R = Rt[0:3, 0:3].T\n t = -np.matmul(R, Rt[0:3, 3:4])\n\n imSize = [1920, 1440]\n dist = np.array([-0.191070, 0.100324, 0.004250, -0.003317, 0.000000])\n cam = Camera(K, R, t, imSize=imSize, id=6, dist=dist)\n return cam", "def process_camera():\n\n pic_array = take_picture()\n detections, shapes, descriptors = detect_faces(person_database,pic_array)\n\n names = []\n\n for desc in descriptors:\n name = find_match(person_database, desc)\n names.append(name)\n\n return pic_array, names, detections, shapes, descriptors", "def update_camera_params(self):\n\n scale_width = float(self._res_w) / self.original_camera_info.width\n scale_height = float(self._res_h) / self.original_camera_info.height\n\n scale_matrix = np.ones(9)\n scale_matrix[0] *= scale_width\n scale_matrix[2] *= scale_width\n scale_matrix[4] *= scale_height\n scale_matrix[5] *= scale_height\n\n # Adjust the camera matrix resolution\n self.current_camera_info.height = self._res_h\n self.current_camera_info.width = self._res_w\n\n # Adjust the K matrix\n self.current_camera_info.K = np.array(self.original_camera_info.K) * scale_matrix\n\n # Adjust the P matrix (done by Rohit)\n scale_matrix = np.ones(12)\n scale_matrix[0] *= scale_width\n scale_matrix[2] *= scale_width\n scale_matrix[5] *= scale_height\n scale_matrix[6] *= scale_height\n self.current_camera_info.P = np.array(self.original_camera_info.P) * scale_matrix", "def __update_video_image_camera(self):\n self.background_camera = self.background_renderer.GetActiveCamera()\n\n origin = (0, 0, 0)\n spacing = (1, 1, 1)\n\n # Works out the number of millimetres to the centre of the image.\n x_c = origin[0] + 0.5 * (self.image_extent[0] +\n self.image_extent[1]) * spacing[0]\n y_c = origin[1] + 0.5 * (self.image_extent[2] +\n self.image_extent[3]) * spacing[1]\n\n # Works out the total size of the image in millimetres.\n i_w = (self.image_extent[1] - self.image_extent[0] + 1) * spacing[0]\n i_h = (self.image_extent[3] - self.image_extent[2] + 1) * spacing[1]\n\n # Works out the ratio of required size to actual size.\n w_r = i_w / self.width()\n h_r = i_h / self.height()\n\n # Then you adjust scale differently depending on whether the\n # screen is predominantly wider than your image, or taller.\n if w_r > h_r:\n scale = 0.5 * i_w * (self.height() / self.width())\n else:\n scale = 0.5 * i_h\n\n self.background_camera.SetFocalPoint(x_c, y_c, 0.0)\n self.background_camera.SetPosition(x_c, y_c, -1000)\n self.background_camera.SetViewUp(0.0, -1.0, 0.0)\n self.background_camera.SetClippingRange(990, 1010)\n self.background_camera.SetParallelProjection(True)\n self.background_camera.SetParallelScale(scale)", "def select_camera_image(row):\n cameras = ['left', 'center', 'right']\n steering = float(row['steering'])\n # use one of the cameras randomily\n camera = random.choice(cameras)\n steering += steering_adj[camera]\n image = plt.imread(folder+row[camera].strip())\n return image, steering", "def get_camera_from_cameras(\n camera_id: int, data: dict[str, Any] | None\n) -> dict[str, Any] | None:\n for camera in data.get(KEY_CAMERAS, []) if data else []:\n if camera.get(KEY_ID) == camera_id:\n val: dict[str, Any] = camera\n return val\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checkout code for CESM If sandbox exists, check that the right tag has been checkedout. Otherwise, download the code, checkout the tag and run manage_externals. The scripts don't seem to like multiple applications of manage_externals.
def code_checkout(cesm_repo, coderoot, tag): sandbox = os.path.split(coderoot)[-1] if os.path.exists(coderoot): print('Check for right tag: '+coderoot) p = Popen('git status', shell=True, cwd=coderoot, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() stdout = stdout.decode('UTF-8') stderr = stderr.decode('UTF-8') print(stdout) print(stderr) if tag not in stdout.split('\n')[0]: raise ValueError('tag does not match') else: stat = check_call(['mkdir', '-p', coderoot]) if stat != 0: sys.exit(1) # clone the repo p = Popen('git clone '+cesm_repo+' '+sandbox, shell=True, cwd=coderoot+'/..', stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if stdout: print(stdout) if stderr: print(stderr) if p.returncode != 0: raise Exception('git error') # check out the right tag p = Popen('git checkout %s'%tag, shell=True, cwd=coderoot) stdout, stderr = p.communicate() if stdout: print(stdout) if stderr: print(stderr) if p.returncode != 0: raise Exception('git error') # check out externals p = Popen('./manage_externals/checkout_externals -v', shell=True, cwd=coderoot) stdout, stderr = p.communicate() if stdout: print(stdout) if stderr: print(stderr) if p.returncode != 0: raise Exception('git error')
[ "def checked_out_MPS():\n\n checked_out_packages = os.path.join(os.environ[\"CMSSW_BASE\"], \"src\", \".git\",\n \"info\", \"sparse-checkout\")\n checked_out = False\n git_initialized = False\n try:\n with open(checked_out_packages, \"r\") as f:\n packages = (\"/Alignment/\", \"/Alignment/MillePedeAlignmentAlgorithm/\",\"/*/\")\n for line in f:\n if line.strip() in packages:\n checked_out = True\n break\n git_initialized = True # since the sparse checkout file is there\n except IOError as e:\n if e.args != (2, 'No such file or directory'): raise\n\n return checked_out, git_initialized", "def checkout_qmk():\n if exists('qmk_firmware'):\n rmtree('qmk_firmware')\n\n if not fetch_source(repo_name(QMK_GIT_URL)):\n git_clone(QMK_GIT_URL, QMK_GIT_BRANCH)", "def code_verify(revision=None):\r\n if is_old_code():\r\n fprint('installed code is in the old style (directory instead of symlink). Manual intervention required')\r\n return False\r\n rev = revision or hg_revision()\r\n if exists('~/viewfinder.%s' % rev):\r\n fprint('Code at revision %s is installed' % rev)\r\n return True\r\n else:\r\n fprint('Code at revision %s is not installed' % rev)\r\n return False", "def lifecycle_approve_for_my_org(self, orderer_url, orderer_tls_rootcert, channel_name, cc_name,\n chaincode_version, policy, sequence=1):\n res, installed = self.lifecycle_query_installed(\"3s\")\n cc_label = cc_name+\"_\"+chaincode_version\n package_id = \"\"\n for each in installed['installed_chaincodes']:\n if each['label'] == cc_label:\n package_id = each['package_id']\n break\n if package_id == \"\":\n return 1, \"not exist the chaincode, please check chaincode_name and chaincode_version\"\n\n if os.getenv(\"CORE_PEER_TLS_ENABLED\") == \"false\" or os.getenv(\"CORE_PEER_TLS_ENABLED\") is None:\n if self.version in BasicEnv.binary_versions_v2:\n res = os.system(\"./../bin/{}/bin/peer lifecycle chaincode approveformyorg -o {} \"\n \" --channelID {} --name {} --version {} --init-required --package-id {} --sequence {}\"\n \" --signature-policy {} > ./approve.txt\"\n .format(self.version, orderer_url, channel_name, cc_name,\n chaincode_version, package_id, sequence, policy))\n else:\n if self.version in BasicEnv.binary_versions_v2:\n res = subprocess.Popen(\"./../bin/{}/bin/peer lifecycle chaincode approveformyorg -o {} --tls \"\n \"--cafile {} --channelID {} --name {} --version {} --init-required --package-id \"\n \"{} --sequence {} --signature-policy {}\"\n .format(self.version, orderer_url, orderer_tls_rootcert, channel_name,\n cc_name, chaincode_version, package_id, sequence, policy), shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = res.communicate()\n return_code = res.returncode\n\n if return_code == 0:\n content = str(stdout, encoding=\"utf-8\")\n else:\n stderr = str(stderr, encoding=\"utf-8\")\n return return_code, stderr\n return return_code, content", "def test_checkout_repository(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.contribtool.checkout_repository(TOOLNAME,username,userpass)", "def sync_code_to_masters(\n cluster: Cluster,\n dcos_checkout_dir: Path,\n sudo: bool,\n) -> None:\n local_packages = dcos_checkout_dir / 'packages'\n local_test_dir = local_packages / 'dcos-integration-test' / 'extra'\n if not Path(local_test_dir).exists():\n message = (\n 'DCOS_CHECKOUT_DIR must be set to the checkout of a DC/OS '\n 'repository.\\n'\n '\"{local_test_dir}\" does not exist.'\n ).format(local_test_dir=local_test_dir)\n raise click.BadArgumentUsage(message=message)\n\n dcos_checkout_dir_variant = _dcos_checkout_dir_variant(\n dcos_checkout_dir=dcos_checkout_dir,\n )\n\n node_test_dir = Path('/opt/mesosphere/active/dcos-integration-test')\n\n test_tarstream = _tar_with_filter(\n path=local_test_dir,\n tar_filter=_cache_filter,\n )\n\n dcos_variant = get_cluster_variant(cluster=cluster)\n if dcos_variant is None:\n message = (\n 'The DC/OS variant cannot yet be determined. '\n 'Therefore, code cannot be synced to the cluster.'\n )\n click.echo(message, err=True)\n sys.exit(1)\n\n syncing_oss_to_ee = bool(\n dcos_variant == DCOSVariant.ENTERPRISE\n and dcos_checkout_dir_variant == DCOSVariant.OSS,\n )\n\n node_active_dir = Path('/opt/mesosphere/active')\n node_test_dir = node_active_dir / 'dcos-integration-test'\n\n if syncing_oss_to_ee:\n # This matches part of\n # https://github.com/mesosphere/dcos-enterprise/blob/master/packages/dcos-integration-test/ee.build\n for master in cluster.masters:\n master.run(args=['rm', '-rf', str(node_test_dir / 'util')])\n\n # This makes an assumption that all tests are at the top level.\n master.run(\n args=[\n 'rm',\n '-rf',\n str(node_test_dir / 'open_source_tests' / '*.py'),\n ],\n # We use a wildcard character, `*`, so we need shell expansion.\n shell=True,\n sudo=sudo,\n )\n\n master.run(\n args=[\n 'mkdir',\n '--parents',\n str(node_test_dir / 'open_source_tests'),\n ],\n sudo=sudo,\n )\n\n _send_tarstream_to_node_and_extract(\n tarstream=test_tarstream,\n node=master,\n remote_path=node_test_dir / 'open_source_tests',\n sudo=sudo,\n )\n master.run(\n args=[\n 'rm',\n '-rf',\n str(node_test_dir / 'open_source_tests' / 'conftest.py'),\n ],\n sudo=sudo,\n )\n master.run(\n args=[\n 'mv',\n str(node_test_dir / 'open_source_tests' / 'util'),\n str(node_test_dir),\n ],\n sudo=sudo,\n )\n else:\n _sync_bootstrap_to_masters(\n cluster=cluster,\n dcos_checkout_dir=dcos_checkout_dir,\n sudo=sudo,\n )\n\n for master in cluster.masters:\n # This makes an assumption that all tests are at the top level.\n master.run(\n args=['rm', '-rf', str(node_test_dir / '*.py')],\n # We use a wildcard character, `*`, so we need shell expansion.\n shell=True,\n sudo=sudo,\n )\n _send_tarstream_to_node_and_extract(\n tarstream=test_tarstream,\n node=master,\n remote_path=node_test_dir,\n sudo=sudo,\n )", "def compile_code(self,toolname,adminuser,adminpass):\n\n # ssh into a tool session container as the tools manager\n # compile and install the code\n\n # get into a tool session container.\n cm = ContainerManager()\n ws = cm.access(host=self.hubname,username=adminuser,password=adminpass)\n\n session_number,es = ws.execute('echo $SESSION')\n\n # catch errors that happen in the shell\n # so we can properly exit and close the workspace\n try:\n # become the apps user\n ws.send('sudo su - apps')\n ws.start_bash_shell()\n output,es = ws.execute('whoami')\n exit_apps = True\n if output != 'apps':\n exit_apps = False\n msg = \"doesn't look like we were able to become the apps user\"\n self.logger.error(msg)\n raise Exception(msg)\n\n # catch compile and install errors\n # so we can report them back to the developer\n\n # navigate to the tool directory\n cmd = 'cd /apps/%(toolname)s/dev/src' \\\n % { 'toolname' : toolname, }\n ws.execute(cmd)\n\n # if there is a makefile available\n # run:\n # make clean\n # make all\n # make install\n # don't fail if there is no clean or all targets\n if ws.bash_test('-e Makefile'):\n # allow 30 minutes for the code to compile\n ws.timeout = 1800\n output,es = ws.execute('make clean',False)\n output,es = ws.execute('make all',False)\n no_make_all_text = \"make: *** No rule to make target `all'. Stop.\"\n if es > 0:\n if es == 2 and output == no_make_all_text:\n output,es = ws.execute('make')\n else:\n self.logger.exception(output)\n raise ExitCodeError(output)\n output,es = ws.execute('make install')\n ws.timeout = 10\n else:\n msg = \"No Makefile found\"\n print msg\n self.logger.info(msg)\n\n finally:\n # exit sudo\n ws.stop_bash_shell()\n if exit_apps:\n ws.send('exit')\n\n # shut down the ssh connection\n ws.close()", "def copy_to_github(self, reset=False):\n self.build(reset=reset)\n\n if j.core.platformtype.myplatform.isUbuntu:\n CODE_SB_BIN = j.clients.git.getContentPathFromURLorPath(\"git@github.com:threefoldtech/sandbox_ubuntu.git\")\n elif j.core.platformtype.myplatform.platform_is_osx:\n CODE_SB_BIN = j.clients.git.getContentPathFromURLorPath(\"git@github.com:threefoldtech/sandbox_osx.git\")\n else:\n raise RuntimeError(\"only ubuntu & osx support\")\n\n CODE_SB_BASE = j.clients.git.getContentPathFromURLorPath(\"git@github.com:threefoldtech/sandbox_base.git\")\n\n C = \"\"\"\n set -ex\n\n cp {SRCBINDIR}/resty* {CODE_SB_BASE}/base/bin/\n rm -f {CODE_SB_BIN}/base/bin/resty*\n\n cp {SRCBINDIR}/openresty {CODE_SB_BASE}/base/bin/\n rm -f {CODE_SB_BIN}/base/bin/openresty\n\n cp {DIR_BIN}/*.lua {CODE_SB_BASE}/base/bin/\n rm -f {CODE_SB_BIN}/base/bin/*.lua\n\n cp {DIR_BIN}/lapis {CODE_SB_BASE}/base/bin/\n rm -f {CODE_SB_BIN}/base/bin/lapis\n\n cp {DIR_BIN}/lua {CODE_SB_BIN}/base/bin/\n rm -f {CODE_SB_BASE}/base/bin/lua\n\n cp {DIR_BIN}/moon* {CODE_SB_BASE}/base/bin/\n rm -f {CODE_SB_BIN}/base/bin/moon*\n\n cp {DIR_BIN}/openresty {CODE_SB_BIN}/base/bin/\n rm -f {CODE_SB_BASE}/base/bin/openresty\n\n \"\"\"\n args = {}\n args[\"CODE_SB_BIN\"] = CODE_SB_BIN\n args[\"CODE_SB_BASE\"] = CODE_SB_BASE\n args[\"SRCBINDIR\"] = j.core.tools.text_replace(\"{DIR_BASE}/openresty/bin\")\n args[\"BINDIR\"] = j.core.tools.text_replace(\"{DIR_BASE}/bin\")\n\n self.tools.execute(C, args=args)", "def __getFromJEMpage(self):\n\n if not self.__download(self.repo, self.version, self.lib_tar, self.dest_dir): return False\n if not self.__extract(self.lib_tar): return False\n\n if not self.__download(self.repo, self.version, self.launcher_tar, self.dest_dir): return False\n if not self.__extract(self.launcher_tar): return False\n\n self.logger.info(\"successfully downloaded and extracted JEM ver %s from repo %s\" % (self.version, self.repo))\n\n if os.path.exists(self.dest_dir + \"/JEM.py\"):\n os.environ[\"JEM_PACKAGEPATH\"] = self.dest_dir\n\n\n return True", "def test_pristine_tar_checkout():", "def test_pristine_tar_checkout_nonexistent():", "def checkout(self, checkout, *args):\n return self.cmd('checkout', checkout, *args)", "def chaincode_install(self):\n\n orgs = self.orgs\n for org in orgs:\n org_admin = self.client.get_user(org, \"Admin\")\n response = self.client.chaincode_install(\n requestor = org_admin,\n peer_names = ['peer0.' + org],\n cc_path = CC_PATH,\n cc_name = CC_NAME,\n cc_version = CC_VERSION\n )\n self.assertTrue(response)\n # Verify the cc pack exists now in the peer node\n dc = docker.from_env()\n for peer in ['peer0']:\n peer0_container = dc.containers.get(peer + '.' + org)\n code, output = peer0_container.exec_run(\n 'test -f '\n '/var/hyperledger/production/chaincodes/example_cc.1.0')\n self.assertEqual(code, 0, \"chaincodes pack not exists\")\n\n logger.info(\"E2E: chaincode install done\")", "def checkout_previous():\n\n if env.have_schema_been_modified:\n with settings(hide('warnings', 'stdout'), warn_only=True):\n with cd(env.path):\n \n print(\"\")\n print(blue(\"Checkout de l'ancienne version...\", bold=True))\n \n local(\"%s checkout %s\" % (env.path_git,\n env.tag))\n with settings(hide('stdout')):\n \n _buildAllClasses()", "def checkout(stage):\n try:\n config, deploymentStage = config_handler.get_config(\n os.getcwd(),\n stage\n )\n print('Checking out to '+stage+' stage')\n shutil.copyfile('uxy.json', '.tmp/uxy.json')\n config['app:stage'] = stage\n configJsonFile = open('uxy.json','w')\n configJsonFile.write(json.dumps(config, indent=2))\n configJsonFile.close()\n except Exception as e:\n print('==> Checkout cancelled.')\n return\n\n print('==> Switched to '+stage+' stage')", "def test_link_to_checkout(self):\n self.browser.find_element_by_link_text('Checkout').click()\n self.assertEqual(self.browser.current_url,\n self.live_server_url + self.CHECKOUT_URL)", "def update_openblock():\n\n tf = tempfile.mktemp(suffix='-openblock')\n local('git clone git://github.com/openplans/openblock.git {0}'.format(tf))\n dest = os.path.join(PROJECT_ROOT, 'requirements', 'sdists')\n for name in ('obadmin', 'ebdata', 'ebpub'):\n package = os.path.join(tf, name)\n os.chdir(package)\n local('pip install -e {source} -d {dest}'.format(source=package,\n dest=dest))\n shutil.rmtree(tf)", "def test_ML_check_cms_aem_emvevex(self):\n\n self.setup_logFile_for_logger('madgraph.check_cmd')\n files = ['acceptance_test_aem_emvevex.pkl',\n 'acceptance_test_aem_emvevex.log',\n 'acceptance_test_aem_emvevex_widths_increased.pkl',\n 'acceptance_test_aem_emvevex_widths_increased.log']\n output_name = 'SAVEDTMP_CHECK_acceptance_test_aem_emvevex__%s__'\n \n try:\n cwd = os.getcwd()\n \n # Change this when we will make the CMS-ready EW model the default\n self.do('import model loop_qcd_qed_sm')\n for mode in ['NWA','CMS']:\n if path.isdir(pjoin(MG5DIR,output_name%mode)):\n shutil.rmtree(pjoin(MG5DIR,output_name%mode))\n \n # Make sure it works for an initial run\n command = 'check cms -reuse a e- > e- ve ve~ [virt=QCD QED] '\n options = {'name':'acceptance_test_aem_emvevex',\n 'lambdaCMS':'(1.0e-6,2)',\n 'show_plot':'False',\n 'seed':'666',\n 'resonances':'2',\n 'recompute_width':'first_time',\n 'report':'full'}\n cmd = command+' '.join('--%s=%s'%(opt, value) for opt, value in \n options.items())\n # print \"Running first CMS check cmd: \",cmd\n self.do(cmd)\n self.assertEqual(cwd, os.getcwd())\n for mode in ['NWA','CMS']:\n self.assertTrue(path.isdir(pjoin(MG5DIR,output_name%mode)))\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex.pkl')))\n self.assertTrue(path.isfile(self.tmp_path['madgraph.check_cmd']))\n res = open(self.tmp_path['madgraph.check_cmd']).read()\n self.assertTrue(res.count('=== FAILED ===')==0)\n self.assertTrue(res.count('=== PASSED ===')==2)\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex.log')))\n res = open(pjoin(MG5DIR,'acceptance_test_aem_emvevex.log')).read()\n self.assertTrue(res.count('=== FAILED ===')==0)\n self.assertTrue(res.count('=== PASSED ===')==2)\n \n # Now for a Reuse-run with the widths modified by 1%\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n self.setup_logFile_for_logger('madgraph.check_cmd')\n # Now copy the card with recomputed widths in it\n for mode in ['NWA','CMS']:\n self.assertTrue(path.isfile(pjoin(MG5DIR,output_name%mode,\n 'Cards','param_card.dat_recomputed_widths')))\n shutil.copy(pjoin(MG5DIR,output_name%mode,'Cards',\n 'param_card.dat_recomputed_widths'),\n pjoin(MG5DIR,output_name%mode,'Cards','param_card.dat'))\n options['tweak']='allwidths->1.1*allwidths(widths_increased)'\n options['recompute_width']='never'\n cmd = command+' '.join('--%s=%s'%(opt, value) for opt, value in \n options.items())\n # print \"Running second CMS check cmd: \",cmd\n self.do(cmd)\n self.assertEqual(cwd, os.getcwd())\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex_widths_increased.pkl')))\n self.assertTrue(path.isfile(self.tmp_path['madgraph.check_cmd']))\n res = open(self.tmp_path['madgraph.check_cmd']).read()\n self.assertTrue(res.count('=== FAILED ===')==2)\n self.assertTrue(res.count('=== PASSED ===')==0)\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex_widths_increased.log')))\n res = open(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex_widths_increased.log')).read()\n self.assertTrue(res.count('=== FAILED ===')==2)\n self.assertTrue(res.count('=== PASSED ===')==0)\n \n # Clean up duties\n for mode in ['NWA','CMS']:\n shutil.rmtree(pjoin(MG5DIR,output_name%mode))\n for file in files:\n try:\n os.remove(pjoin(MG5DIR,file))\n except:\n pass\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n\n except KeyError as e:\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n for mode in ['NWA','CMS']:\n try:\n shutil.rmtree(pjoin(MG5DIR,output_name%mode))\n except:\n pass\n for f in files:\n try:\n os.remove(pjoin(MG5DIR,f))\n except:\n pass\n raise e\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)", "def check_workspace ():\n\n try:\n ex (\"cd $DOC_ROOT/ACE_TAO && git pull -p\")\n print (\"Successfully updated ACE/TAO working copy\")\n except:\n print (\"Unable to update ACE/TAO workspace at \" + doc_root)\n raise\n\n try:\n ex (\"cd $DOC_ROOT/MPC && git pull -p\")\n print (\"Successfully updated MPC working copy to revision \")\n except:\n print (\"Unable to update the MPC workspace at \" + doc_root + \"/ACE/MPC\")\n raise\n\n vprint (\"Repos root URL = \" + opts.repo_root + \"\\n\")\n vprint (\"Repos MPC root URL = \" + opts.mpc_root + \"\\n\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test adding basic Deterministic InnerNode.
def test_addInner(self): print("\nTest 1: Adding InnerNode") try: builder = StaticBuilder() builder.addInput(10, name="In") enc_name = builder.addInner(3, name="In") except AttributeError: print("\nCAUGHT! Trying to assign the same name to two nodes! " "AttributeError exception\n") builder = StaticBuilder() builder.addInput(10, name="In") enc_name = builder.addInner(3, name="Det") enc1 = builder.nodes[enc_name] print('\nNode keys in builder:', list(builder.nodes.keys())) print("This node's key:", enc_name) self.assertEqual(enc1.label, 1, "The label has not been assigned correctly") self.assertEqual(builder.num_nodes, 2, "The number of nodes has not been " "assigned correctly") self.assertEqual(enc1.num_declared_outputs, 0, "The number of outputs of the " "DeterministicNode has not been assigned correctly") self.assertEqual(enc1.num_declared_inputs, 0, "The number of inputs of the " "DeterministicNode has not been assigned correctly")
[ "def test_add_node(self):\n # will call add_edges, so no extensive test are necessary\n node = Node(data={\"id\": \"0\"})\n expected_nodes = [\n Node(data={\"id\": \"0\"}, position={}),\n ]\n expected_edges = []\n\n graph = Graph()\n graph.add_node(node)\n compare_edges(expected_edges, graph.edges)\n compare_nodes(expected_nodes, graph.nodes)", "def test_add_root(self):\n pass", "def test_node_creation():\n a_node = Node(4)\n assert a_node", "def test_add_children(self):\n parent, child = (graph.Node(id) for id in (\"parent\", \"child\"))\n parent.children.add(child)\n self.assertTrue(child in parent.children)", "def test_Tree():", "def testAddNodes1(self):\n self._testAddNodes(1)", "def test_add_left_and_right_child():\n tree = BinaryTree()\n tree.root = TNode(1)\n tree.root.left = TNode(2)\n tree.root.right = TNode(3)\n assert tree.root.left.value == 2\n assert tree.root.right.value == 3", "def testAddNodes0(self):\n self._testAddNodes(0)", "def test_add_to_empty_binary_tree(self):\n binary_tree = BinaryTree()\n for val in range(1, 5):\n binary_tree.add(Node(val))\n self.assertEqual(binary_tree.root.value, 1)\n self.assertEqual(binary_tree.root.right.value, 2)\n self.assertEqual(binary_tree.root.right.right.value, 3)\n self.assertEqual(binary_tree.root.right.right.right.value, 4)", "def test_add_node(self):\n self.hierarchy.add_node('FrequencyCoordinator')\n self.assertNotIn('FrequencyCoordinator', self.hierarchy.custom_nodes)\n self.assertIn('FrequencyCoordinator', self.hierarchy.root['channels'][0])\n thread = self.hierarchy.root['channels'][0]['FrequencyCoordinator']['thread']\n self.assertIn(thread, self.hierarchy.root['channels'][0]['root']['peer_list'])\n self.hierarchy.remove_node('FrequencyCoordinator') # Cleanup.", "def test_add_network(self):\n pass", "def test_new_node_val():\n a_node = Node(67)\n assert a_node.value == 67", "def create_test_node():\n node = cmds.createNode(\"unknown\")\n _add_test_attrs_to_node(node)\n return node", "def test_add_element(self): # add\n tree = BinarySearchTree()\n tree.add(59)\n self.assertEqual(tree.find(59), True)", "def test_add_node(self):\n nodes_to_add = {\"nodes\": [\n {\n \"address\": \"10.2.2.3\",\n \"port\": 80,\n \"condition\": \"ENABLED\",\n \"type\": \"PRIMARY\"\n },\n {\n \"address\": \"10.2.2.4\",\n \"port\": 81,\n \"condition\": \"ENABLED\",\n \"type\": \"SECONDARY\"\n }]}\n\n main_treq_args = ['post', 'clburl/loadbalancers/12345/nodes',\n ((json.dumps(nodes_to_add),), self.expected_kwargs)]\n\n def add(clb, clock):\n return clb.add_nodes(self.rcs, nodes_to_add[\"nodes\"], clock=clock)\n\n self.assert_mutate_function_retries_until_success(\n add, main_treq_args, (Response(202), json.dumps(nodes_to_add)),\n nodes_to_add)\n\n self.assert_mutate_function_retries_until_timeout(\n add, main_treq_args, 60)\n\n self.assert_mutate_function_does_not_retry_if_not_pending_update(\n add, main_treq_args)", "def testAddNodes3(self):\n self._testAddNodes(3)", "def test_tree_two_nodes_right(one_t):\n one_t.insert(5)\n assert one_t.depth() == 1", "def test_node_init():\n from dll import Node\n new_node = Node(0, None, None)\n assert new_node.contents == 0\n assert new_node.next_node is None\n assert new_node.previous_node is None", "def test_add_child_to_collection(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test adding basic OutputNode
def test_addOutput(self): print("\nTest 2: Adding OutputNode") builder = StaticBuilder() builder.addInput(10, name="In") builder.addInner(3, name="Det") o_name = builder.addOutput(name="Out") o1 = builder.nodes[o_name] print("\nNode keys in builder:", list(builder.nodes.keys())) print("This node's key:", o_name) self.assertEqual(o1.label, 2, "The label has not been assigned correctly") self.assertEqual(builder.num_nodes, 3, "The number of nodes has not been " "assigned correctly") self.assertEqual(o1.num_declared_outputs, 0, "The number of outputs of the " "OutputNode has not been assigned correctly") self.assertEqual(o1.num_declared_inputs, 0, "The number of inputs of the " "OutputNode has not been assigned correctly")
[ "def test_get_node_outputs(self):\n pass", "def add_output(self, output):\n self._outputs.append(output)", "def add_output_nodes_argument(self, help, required = True):\n\t\tself.parser.add_argument(\n\t\t\t\"--output-nodes\",\n\t\t\tnargs = \"+\",\n\t\t\ttype = str,\n\t\t\trequired = required,\n\t\t\thelp = help\n\t\t)", "def add_output_test(name, test_fn):\n _output_tests[name] = test_fn", "def testNewOutputModule(self):\n manager.OutputManager.RegisterOutput(TestOutput)\n\n output_module = manager.OutputManager.NewOutputModule('test_output')\n self.assertIsInstance(output_module, TestOutput)\n\n with self.assertRaises(ValueError):\n manager.OutputManager.NewOutputModule(1)\n\n with self.assertRaises(KeyError):\n manager.OutputManager.NewOutputModule('bogus')\n\n manager.OutputManager.DeregisterOutput(TestOutput)", "def testAddNodes0(self):\n self._testAddNodes(0)", "def declare_outputs(self):", "def create_output_node() -> bpy.types.ShaderNode:\n output_node = material.node_tree.nodes.new('ShaderNodeOutputMaterial')\n\n log(\"New output node is {}\".format(output_node))\n return output_node", "def write_output(self):", "def writeOutput(self, output):", "def test_expected_output(self):\n output = Resource._workflow_node_structure(EXAMPLE_NODE_LIST)\n self.assertEqual(\n output,\n [\n {\n 'job_template': 48,\n 'always_nodes': [\n {\n 'project': 98\n }\n ]\n }\n ]\n )", "def _generate_output(self):\n raise NotImplementedError()", "def testHasOutputClass(self):\n manager.OutputManager.RegisterOutput(TestOutput)\n\n self.assertTrue(manager.OutputManager.HasOutputClass('test_output'))\n self.assertFalse(manager.OutputManager.HasOutputClass('bogus'))\n self.assertFalse(manager.OutputManager.HasOutputClass(1))\n\n manager.OutputManager.DeregisterOutput(TestOutput)", "def test_output_instruction(self):\n LEXER.input('print(\\'Hello World\\')')\n self.checks_tokens(['OUTPUT', 'LPAREN', 'STRING', 'RPAREN'])", "def test_node_write_to_output_buffer(graph):\n a = sf.Node()\n graph.render_subgraph(a)\n assert a.output_buffer[0][3] == 0.0\n a.output_buffer[0][3] = 1.0\n assert a.output_buffer[0][3] == 1.0\n\n #--------------------------------------------------------------------------------\n # Why is the output buffer of length 256 (SIGNALFLOW_DEFAULT_BLOCK_SIZE)\n # rather than 2048 (SIGNALFLOW_NODE_BUFFER_SIZE)? Because the output buffer's\n # length is reported by the Python bindings as `last_num_frames`.\n # Whether this is a good idea is open to debate.\n #\n # Better would be to have a precise and rigorous block size throughout, which\n # would mean adding a block buffer between the audio I/O and the Graph.\n #--------------------------------------------------------------------------------\n assert a.output_buffer.shape == (32, 256)\n a.output_buffer[31][255] = 1.0\n assert a.output_buffer[31][255] == 1.0\n with pytest.raises(IndexError):\n a.output_buffer[32][255] == 1.0\n with pytest.raises(IndexError):\n a.output_buffer[31][256] == 1.0", "def test_create_named_output_edge(self):\n n1, n2 = Node('a'), Node('b')\n result = n1 * 'foo' | n2\n self.assertEqual(result, n2)\n self.assertEqual(n1.eout, [Edge(n1, n2, 'foo')])\n self.assertEqual(n1.ein, [])\n self.assertEqual(n2.ein, [Edge(n1, n2, 'foo')])\n self.assertEqual(n2.eout, [])", "def create_test_node():\n node = cmds.createNode(\"unknown\")\n _add_test_attrs_to_node(node)\n return node", "def add_output(self, output, number, logid='default-log'):\n cell = self.get_cell(number, logid)\n out_element = ET.SubElement(cell, 'output')\n out_element.text = output", "def testAddNodes1(self):\n self._testAddNodes(1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test building a model with 2 outputs. Test Cloning an output
def test_BuildModel1(self): print("\nTest 5: Building a Model with cloning") builder = StaticBuilder("Clone") in1 = builder.addInput(10) enc1 = builder.addInner(3) out1 = builder.addOutput(name="Out1") out2 = builder.addOutput(name="Out2") builder.addDirectedLink(in1, enc1) builder.addDirectedLink(enc1, out1) builder.addDirectedLink(enc1, out2) builder.build()
[ "def test_custom_model_n_outputs():\n\n @custom_model\n def model(x, y, n_outputs=2):\n return x + 1, y + 1\n\n m = model()\n assert not isinstance(m.n_outputs, Parameter)\n assert isinstance(m.n_outputs, int)\n assert m.n_outputs == 2\n assert m.outputs == (\"x0\", \"x1\")\n assert (\n separability_matrix(m)\n == [\n [True, True],\n [True, True],\n ]\n ).all()\n\n @custom_model\n def model(x, y, z, n_outputs=3):\n return x + 1, y + 1, z + 1\n\n m = model()\n assert not isinstance(m.n_outputs, Parameter)\n assert isinstance(m.n_outputs, int)\n assert m.n_outputs == 3\n assert m.outputs == (\"x0\", \"x1\", \"x2\")\n assert (\n separability_matrix(m)\n == [\n [True, True, True],\n [True, True, True],\n [True, True, True],\n ]\n ).all()", "def test_prepare_outputs_mixed_broadcast():\n\n model = models.Gaussian2D(1, 2, 3, 4, 5)\n\n output = model([1, 2], 3)\n assert output.shape == (2,)\n np.testing.assert_array_equal(output, [0.9692332344763441, 1.0])\n\n output = model(4, [5, 6])\n assert output.shape == (2,)\n np.testing.assert_allclose(output, [0.8146473164114145, 0.7371233743916278])", "def test_BuildModel2(self):\n print(\"\\nTest 6: Building a Model with Concat\")\n builder = StaticBuilder(\"Concat\")\n in1 = builder.addInput(10)\n in2 = builder.addInput(20)\n enc1 = builder.addInner(3, num_islots=2)\n out1 = builder.addOutput()\n\n builder.addDirectedLink(in1, enc1, islot=0)\n builder.addDirectedLink(in2, enc1, islot=1)\n builder.addDirectedLink(enc1, out1)\n \n builder.build()", "def test_simple_merge(self):\n input_tensor = Input(shape=(3,))\n x1 = Dense(4)(input_tensor)\n x2 = Dense(5)(x1)\n x3 = Dense(6)(x1)\n x4 = merge([x2, x3], mode=\"concat\")\n x5 = Dense(7)(x4)\n\n model = Model(input=[input_tensor], output=[x5])\n input_names = [\"data\"]\n output_names = [\"output\"]\n\n spec = keras.convert(model, input_names, output_names).get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.HasField(\"neuralNetwork\"))\n\n # Test the inputs and outputs\n self.assertEqual(len(spec.description.input), len(input_names))\n self.assertEqual(\n sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))\n )\n self.assertEqual(len(spec.description.output), len(output_names))\n self.assertEqual(\n sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))\n )", "def test_clone_scenario(self):\n pass", "def _test_output_shapes(model):\n assert model.r == r\n assert model.m == m\n assert model.c_.shape == (r,)\n assert model.A_.shape == (r,r)\n assert model.Hc_.shape == (r,r*(r+1)//2)\n assert model.H_.shape == (r,r**2)\n assert model.Gc_.shape == (r,r*(r+1)*(r+2)//6)\n assert model.G_.shape == (r,r**3)\n assert model.B_.shape == (r,m)\n assert hasattr(model, \"datacond_\")\n assert hasattr(model, \"dataregcond_\")\n assert round(model.dataregcond_, 6) <= round(model.datacond_, 6)\n assert hasattr(model, \"residual_\")\n assert hasattr(model, \"misfit_\")\n assert round(model.misfit_, 6) <= round(model.residual_, 6)", "def test_create(self):\n empty_model = mdl.Model()\n built_model = ex.build_model()", "def _make_model(self, verbose=False):\n if \"Model Outputs\" in self.data_dict:\n self._model_output = modeloutput.ModelOutput(self.data_dict[\"Model Outputs\"], joints = self._joint_objs)\n if verbose:\n print(\"Model Outputs generated\")\n elif verbose:\n print(\"No Model outputs\")", "def test_prepare_outputs_single_entry_vector():\n\n model = models.Gaussian2D(1, 2, 3, 4, 5)\n\n output = model(np.array([1]), np.array([2]))\n assert output.shape == (1,)\n np.testing.assert_allclose(output, [0.9500411305585278])", "def test_same_models(self):\n\t\t\n\t\t# TODO: finish\n\t\tpass", "def test_prepare_sample_to_forward(self):\n sample = [\n {\"src\": \"ola mundo\", \"ref\": \"hi world\", \"mt\": \"hey world!\", \"score\": 0.8},\n {\"src\": \"ola mundo\", \"ref\": \"hi world\", \"mt\": \"hey world!\", \"score\": 0.8},\n ]\n\n model_input, target = self.estimator.prepare_sample(sample)\n model_output = self.estimator(**model_input)\n self.assertTrue(model_output[\"score\"].shape[0] == 2)\n self.assertTrue(model_output[\"score\"].shape[1] == 1)", "def test_write_model(multicut):\n\n training_structure = get_random_atoms()\n sgp_calc = get_sgp_calc(multiple_cutoff=multicut)\n\n # Predict on training structure.\n training_structure.calc = sgp_calc\n forces = training_structure.get_forces()\n\n # Write the SGP to JSON.\n sgp_name = f\"sgp_calc_{multicut}.json\"\n sgp_calc.write_model(sgp_name)\n\n # Odd Pybind-related issue here that seems to be related to polymorphic\n # kernel pointers. Need to return the kernel list for SGP prediction to\n # work. Possibly related to:\n # https://stackoverflow.com/questions/49633990/polymorphism-and-pybind11\n sgp_calc_2, _ = SGP_Calculator.from_file(sgp_name)\n\n os.remove(sgp_name)\n\n # Compute forces with reconstructed SGP.\n training_structure.calc = sgp_calc_2\n forces_2 = training_structure.get_forces()\n\n # Check that they're the same.\n max_abs_diff = np.max(np.abs(forces - forces_2))\n assert max_abs_diff < 1e-8", "def test_copied_models_are_equal(dbdiskrepo):\n original = fit_model()\n\n shallow = copy(original)\n assert original.artifact.id == shallow.artifact.id\n assert original.artifact.value_id == shallow.artifact.value_id\n assert hash(original) == hash(shallow)\n\n deep = deepcopy(original)\n assert original.artifact.id == deep.artifact.id\n assert original.artifact.value_id == deep.artifact.value_id\n assert hash(original) == hash(deep)", "def test_copying_layout(empty_model):\n assert 1 == 0 # TODO", "def test_simple_model():\n # Train the simple copy task.\n V = 11\n criterion = LabelSmoothing(size=V, padding_idx=0, smoothing=0.0)\n model = Transformer.make_model(V, V, N=2, d_model=512, d_ff=1024)\n model_opt = NoamOpt(model.src_embed[0].d_model, 1, 400,\n torch.optim.Adam(model.parameters(),\n lr=0,\n betas=(0.9, 0.98),\n eps=1e-9))\n for epoch in range(15):\n model.train()\n Training.run_epoch(Training.data_gen(V, 30, 20), model,\n SimpleLossCompute(model.generator,\n criterion,\n model_opt))\n model.eval()\n Training.run_epoch(Training.data_gen(V, 30, 5), model,\n SimpleLossCompute(model.generator,\n criterion,\n None))\n\n # Run and decode model copy\n model.eval()\n src = Variable(torch.LongTensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]))\n src_mask = Variable(torch.ones(1, 1, 10))\n output = Transformer.greedy_decode(model,\n src,\n src_mask,\n max_len=10,\n start_symbol=1)\n assert torch.equal(output, torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]))", "def test_custom_model_settable_parameters():\n\n @custom_model\n def model(x, y, n_outputs=2, bounding_box=((1, 2), (3, 4))):\n return x + 1, y + 1\n\n m = model()\n assert m.n_outputs == 2\n assert m.bounding_box == ((1, 2), (3, 4))\n m.bounding_box = ((9, 10), (11, 12))\n assert m.bounding_box == ((9, 10), (11, 12))\n m = model(bounding_box=((5, 6), (7, 8)))\n assert m.n_outputs == 2\n assert m.bounding_box == ((5, 6), (7, 8))\n m.bounding_box = ((9, 10), (11, 12))\n assert m.bounding_box == ((9, 10), (11, 12))\n\n @custom_model\n def model(x, y, n_outputs=2, outputs=(\"z0\", \"z1\")):\n return x + 1, y + 1\n\n m = model()\n assert m.n_outputs == 2\n assert m.outputs == (\"z0\", \"z1\")\n m.outputs = (\"a0\", \"a1\")\n assert m.outputs == (\"a0\", \"a1\")\n m = model(outputs=(\"w0\", \"w1\"))\n assert m.n_outputs == 2\n assert m.outputs == (\"w0\", \"w1\")\n m.outputs = (\"a0\", \"a1\")\n assert m.outputs == (\"a0\", \"a1\")", "def test_new_model_saves(self): #\n model = pycotools3.model.Model(self.cps_file, new=True)\n model.save()", "def test_construct_model(instance):\n construct_model(instance)", "def test_set_output_implicitly(self):\n self.command.output = \"\"\n self.command.package = self.input_ovf\n self.assertEqual(self.command.output, \"\")\n self.command.run()\n self.assertEqual(self.command.output, self.input_ovf)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds a model with 2 inputs. Test ConcatNode
def test_BuildModel2(self): print("\nTest 6: Building a Model with Concat") builder = StaticBuilder("Concat") in1 = builder.addInput(10) in2 = builder.addInput(20) enc1 = builder.addInner(3, num_islots=2) out1 = builder.addOutput() builder.addDirectedLink(in1, enc1, islot=0) builder.addDirectedLink(in2, enc1, islot=1) builder.addDirectedLink(enc1, out1) builder.build()
[ "def concat_model():\n x = tf.keras.Input(shape=[10, 10, 3, ])\n x1 = tf.keras.layers.Conv2D(5, (2, 2))(x)\n x2 = tf.keras.layers.Conv2D(6, (2, 2))(x)\n x3 = tf.keras.layers.Conv2D(7, (2, 2))(x)\n z = tf.keras.layers.concatenate([x2, x1, x3], axis=-1)\n z1 = tf.keras.layers.Conv2D(10, (2, 2))(z)\n z2 = tf.keras.layers.Conv2D(10, (2, 2))(z)\n z = tf.add(z1, z2)\n z = tf.keras.layers.Flatten()(z)\n output = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"concat_model\")(z)\n return output", "def create_helper_concat_node(inputs, output_name, axis=0):\n concat_node = onnx.helper.make_node(\n \"Concat\",\n inputs=inputs,\n outputs=[output_name],\n name=output_name,\n axis=axis,\n )\n return [concat_node]", "def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def test_BuildModel1(self):\n print(\"\\nTest 5: Building a Model with cloning\")\n builder = StaticBuilder(\"Clone\")\n in1 = builder.addInput(10)\n enc1 = builder.addInner(3)\n out1 = builder.addOutput(name=\"Out1\")\n out2 = builder.addOutput(name=\"Out2\")\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc1, out2)\n \n builder.build()", "def build_model_with_all_configurable_types(inputs):\n x = inputs\n x = Conv2D(filters=10, kernel_size=3, name='conv')(x)\n x = BatchNormalization(name='bn')(x)\n x = Activation(tf.nn.relu, name='activation')(x)\n x = SeparableConv2D(filters=10, kernel_size=3, name='sep_conv')(x)\n x = tf.reduce_sum(x, axis=[1, 2])\n x = Dense(units=10, name='dense')(x)\n return x", "def multiple_input_model():\n\n input1 = tf.keras.Input(name='input1', shape=(10, 10, 3))\n input2 = tf.keras.Input(name='input2', shape=(12, 12, 3))\n x1 = tf.keras.layers.Conv2D(8, (1, 1), name='conv1a')(input1)\n x2 = tf.keras.layers.Conv2D(8, (3, 3), name='conv1b')(input2)\n x = tf.keras.layers.add([x1, x2])\n x = tf.keras.layers.Conv2D(4, (1, 1), name='conv2')(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax, name=\"multiple_input_model\")(x)\n\n return outputs", "def convert_rnn_param_concat(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n axis = int(attrs.get(\"dim\"))\n\n # mxnet RNN node and ONNX RNN/LSTM/GRU nodes\n # use different ways to store their parameters\n\n # The conversion between these formats is broken into 2 steps\n # The first step (performed here in _rnn_param_concat) regroups the\n # flattened parameters according to the table below.\n # The second step corrects the shapes and orders of gates and is\n # performed and described in more detail in the RNN node\n\n # mxnet [ONNX] -> ONNX (group)\n # i2h_weights [W (+ WB)] -> W (input weights)\n # h2h_weights [R (+ RB)] -> R (recurrence weights)\n # i2h_biases [Wb (+ WBb)] -> B = [Wb + Rb (+ WBb + RBb)]\n # h2h_biases [Rb (+ RBb)] -> (biases)\n\n split = len(input_nodes) // 2\n weights, biases = input_nodes[:split], input_nodes[split:]\n i2h_weights = weights[::2]\n h2h_weights = weights[1::2]\n i2h_biases = biases[::2]\n h2h_biases = biases[1::2]\n reordered_biases = [\n bias\n for pair in zip(i2h_biases, h2h_biases)\n for bias in pair\n ]\n\n # The order of mxnet parameters in the inputs is:\n # [\n # '{}{}_{}_{}'.format(d, l, g, t)\n # for t in ['weight', 'bias']\n # for l in range(num_layers)\n # for d in ['l', 'r'][:num_directions]\n # for g in ['i2h', 'h2h']\n # ]\n\n w = onnx.helper.make_node(\n \"Concat\",\n inputs=i2h_weights,\n outputs=[name + \"__W\"],\n axis=axis,\n name=name + \"__W\"\n )\n r = onnx.helper.make_node(\n \"Concat\",\n inputs=h2h_weights,\n outputs=[name + \"__R\"],\n axis=axis,\n name=name + \"__R\"\n )\n b = onnx.helper.make_node(\n \"Concat\",\n inputs=reordered_biases,\n outputs=[name + \"__B\"],\n axis=axis,\n name=name + \"__B\"\n )\n return [w, r, b]", "def build_model():", "def create_split_concat_net_const(self, input_shape, output_shapes, axis, ir_version):\n\n #\n # Create ONNX model\n #\n\n import onnx\n from onnx import helper\n from onnx import TensorProto\n import numpy as np\n\n concat_axis = 0\n concat_output_shape = input_shape.copy()\n concat_output_shape[concat_axis] *= 2\n\n const_number = np.prod(input_shape)\n constant = np.random.randint(-127, 127, const_number).astype(np.float)\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)\n outputs, split = [], []\n for id, output_shape in enumerate(output_shapes):\n helper.make_tensor_value_info('output_{}'.format(id), TensorProto.FLOAT, output_shape)\n outputs.append('output_{}'.format(id))\n split.append(output_shape[axis])\n\n # Output for concat\n output_concat = helper.make_tensor_value_info('output_dyn_concat', TensorProto.FLOAT, concat_output_shape)\n\n node_const_def = onnx.helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['const1'],\n value=helper.make_tensor(\n name='const_tensor',\n data_type=TensorProto.FLOAT,\n dims=input_shape,\n vals=constant,\n ),\n )\n\n node_split_def = onnx.helper.make_node(\n 'Split',\n inputs=['const1'],\n outputs=outputs,\n axis=axis,\n split=split\n )\n\n node_concat_def = onnx.helper.make_node(\n 'Concat',\n inputs=outputs,\n outputs=['output_concat'],\n axis=axis\n )\n\n node_dyn_concat_def = onnx.helper.make_node(\n 'Concat',\n inputs=['input', 'output_concat'],\n outputs=['output_dyn_concat'],\n axis=concat_axis\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_const_def, node_split_def, node_concat_def, node_dyn_concat_def],\n 'test_split_model',\n [input],\n [output_concat],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_split_model')\n\n #\n # Create reference IR net\n # Please, spesify 'type': 'Input' for inpit node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n\n ref_net = None\n\n return onnx_net, ref_net", "def concatenate_model( self, other ):\n\n\t\t# Unify the graphs (requiring disjoint states)\n\t\tself.graph = networkx.union( self.graph, other.graph )\n\t\t\n\t\t# Connect the two graphs\n\t\tself.add_transition( self.end, other.start, 1.00 )\n\n\t\t# Move the end to other.end\n\t\tself.end = other.end", "def _create_aggregate_input(self, v1, v2):\n # sum over time steps; resulting shape is (batch, num_units)\n v1 = mask_3d(v1, self.sentence1_size, 0, 1)\n v2 = mask_3d(v2, self.sentence2_size, 0, 1)\n v1_sum = tf.reduce_sum(v1, [1])\n v2_sum = tf.reduce_sum(v2, [1])\n return tf.concat(axis=1, values=[v1_sum, v2_sum])", "def _construct_combined_model(self):\n # We are optimizing the addition of both losses\n self._discriminator.compile(loss=self._loss_function, optimizer=self._disc_optimizer)\n\n # We define the input and the output for the generator\n noise = layers.Input(shape=(self._noise_dim,))\n label = layers.Input(shape=(1,))\n generated_img = self._generator([noise, label])\n\n # For the combo model we will only train the generator\n self._discriminator.trainable = False\n\n # Discriminator takes an img and outputs real/fake & pred_class\n is_real, pred_class = self._discriminator(generated_img)\n\n # The combined model of both the generator and the discriminator\n # Input: noise, and label to the generator\n # Generator uses this to create an image\n # Then the discriminator takes an image \n # The discriminator then ouputs if real and the predicted class\n combo = Model([noise, label], [is_real, pred_class])\n\n combo.compile(loss=self._loss_function, optimizer=self._gen_optimizer)\n\n return combo", "def build(self):\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(self.num_hidden1, init='lecun_uniform', input_shape=(self.num_inputs,)))\n\t\tmodel.add(Activation('relu'))\n\n\t\tmodel.add(Dense(self.num_hidden2, init='lecun_uniform'))\n\t\tmodel.add(Activation('relu'))\n\n\t\tmodel.add(Dense(self.num_output, init='lecun_uniform'))\n\t\tmodel.add(Activation('linear'))\n\n\t\trms = RMSprop(lr=self.lr)\n\t\tmodel.compile(loss='mse', optimizer=rms)\n\t\tself.model = model", "def test_concat_get_op_product_graph(self):\n\n tf.compat.v1.reset_default_graph()\n\n _ = concat_model()\n conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), ['input_1'], ['concat_model/Softmax'])\n self.assertTrue(validate_branch_ops(conn_graph))\n self.assertTrue(validate_product_tensor_lists(conn_graph))\n self.assertEqual(2, conn_graph.branch_count)\n self.assertEqual(13, len(conn_graph.get_all_ops()))\n self.assertEqual(12 + len(tf.compat.v1.get_default_graph().get_collection('variables')),\n len(conn_graph.get_all_products()))\n\n # Check that the order of input products to the concat op matches the order of input tensors in the tf graph\n concat_tf_op = tf.compat.v1.get_default_graph().get_operation_by_name(\"concatenate/concat\")\n concat_op = conn_graph.get_all_ops()['concatenate/concat']\n for index, product in enumerate(concat_op.get_input_products()):\n self.assertTrue(len(product.consumers) == 1)\n self.assertEqual(product.tensor_dict[product.consumers[0]], concat_tf_op.inputs[index])", "def convert_concat(self, op):\n try:\n from tflite.Operator import Operator\n from tflite.ConcatenationOptions import ConcatenationOptions\n from tflite.BuiltinOptions import BuiltinOptions\n from tflite.ActivationFunctionType import ActivationFunctionType\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n assert isinstance(op, Operator)\n input_tensors = self.get_input_tensors(op)\n assert len(input_tensors) > 1, \"input tensors length should be greater than 1\"\n\n data_nodes = [self.tensor_tab[t.tensor_idx] for t in input_tensors]\n\n output_tensors = self.get_output_tensors(op)\n assert len(output_tensors) == 1, \"output tensors length should be 1\"\n output_tensor = output_tensors[0]\n output_tensor_idx = output_tensor.tensor_idx\n output_tensor_shape = output_tensor.tensor.ShapeAsNumpy()\n\n assert op.BuiltinOptionsType() == BuiltinOptions.ConcatenationOptions\n op_options = op.BuiltinOptions()\n concat_options = ConcatenationOptions()\n concat_options.Init(op_options.Bytes, op_options.Pos)\n concat_dim = concat_options.Axis()\n fused_activation_fn = concat_options.FusedActivationFunction()\n assert fused_activation_fn == ActivationFunctionType.NONE, \\\n 'Concat operator with fused activation is not supported yet.'\n\n out_nodes = self.nn_concat(concat_dim, data_nodes, output_tensor_shape)\n\n self.tensor_tab[output_tensor_idx] = out_nodes\n return out_nodes", "def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b", "def create_split_concat_net(self, input_shape, output_shapes, axis, ir_version):\n\n #\n # Create ONNX model\n #\n\n import onnx\n from onnx import helper\n from onnx import TensorProto\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)\n outputs, split = [], []\n for id, output_shape in enumerate(output_shapes):\n helper.make_tensor_value_info('output_{}'.format(id), TensorProto.FLOAT, output_shape)\n outputs.append('output_{}'.format(id))\n split.append(output_shape[axis])\n\n # Output for concat\n output_concat = helper.make_tensor_value_info('output_concat', TensorProto.FLOAT, input_shape)\n\n node_split_def = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=outputs,\n axis=axis,\n split=split\n )\n\n node_concat_def = onnx.helper.make_node(\n 'Concat',\n inputs=outputs,\n outputs=['output_concat'],\n axis=axis\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_split_def, node_concat_def],\n 'test_split_model',\n [input],\n [output_concat],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_split_model')\n\n #\n # Create reference IR net\n # Please, spesify 'type': 'Input' for inpit node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n\n ref_net = None\n\n return onnx_net, ref_net", "def split_and_concat_model():\n x = tf.keras.Input(shape=[224, 224, 3, ])\n # TODO: implement split for the following commented out method of splitting\n # y1 = x[:, :100, :, :]\n # y2 = x[:, 101:, :, :]\n y1, y2 = tf.split(x, [100, 124], 1)\n y1 = tf.nn.relu(y1)\n y2 = tf.keras.layers.BatchNormalization()(y2)\n z = tf.keras.layers.concatenate([y1, y2], axis=1)\n z = tf.keras.layers.Flatten()(z)\n output = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"split_and_concat_model\")(z)\n return output", "def concatenate_tasks(\n tasks,\n concat_train=True,\n concat_valid=True,\n concat_test=True,\n):\n new_task = deepcopy(tasks[0])\n new_task._name = \"+\".join(task.name for task in tasks)\n if concat_train:\n new_task._train_data = ConcatDataset(\n [task.train_data for task in tasks])\n if concat_valid:\n new_task._valid_data = ConcatDataset(\n [task.valid_data for task in tasks])\n if concat_test:\n new_task._test_data = ConcatDataset([task.test_data for task in tasks])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the IP ADDRESS of the camera, to which you are connected and ACQUISITION MODE into which you want to put the camera, this command will send the according request to the camera.
def command(mode, ip, log): logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging_config[log]) # Using the default dict to get a valid format string no matter what phantom_socket = PhantomSocket(ip) phantom_socket.connect() click.echo('CONNECTED TO THE PHANTOM CAMERA') mode_identifier = _modes[mode] phantom_socket.set_mode(mode_identifier) click.echo('PHANTOM WILL TRANSIT INTO THE MODE "%s" NOW!' % mode_identifier) click.echo('THIS WILL CAUSE A REBOOT OF THE CAMERA, SO PLEASE HAVE PATIENCE') click.echo('IN CASE A CONNECTION CANNOT BE ESTABLISHED EVEN AFTER SOME TIME, HARD RESET THE CAMERA') click.echo('AFTER THE HARD RESET, THE MODE SHOULD BE CHANGED') phantom_socket.disconnect()
[ "def camera_control(camera_host, camera_port, camera_user, camera_pass, q):\n\n try:\n camera = IPCamera(camera_host, camera_port, camera_user, camera_pass)\n q.put(camera.get_rtsp_url())\n except RuntimeError as exc:\n q.put(exc)\n\n try:\n while True:\n camera.move_to(*q.get())\n except KeyboardInterrupt:\n pass", "def camera_button(conn):\r\n camera = conn.space_center.camera\r\n if camera.mode == conn.space_center.CameraMode.map:\r\n camera.mode = conn.space_center.CameraMode.automatic\r\n else:\r\n camera.mode = conn.space_center.CameraMode.map", "def event_btn_confirm_ip(self):\n\n print(\"attempting to open camera\")\n self.change_state(States.ACTIVATE_CAMERA)", "def camstart():\n\n\trespond = send_command('camstart')", "def camera_start(self):\n mycam = ONVIFCamera(self.__cam_ip, 80, self.__cam_user, self.__cam_password)\n logging.info('Create media service object')\n media = mycam.create_media_service()\n logging.info('Get target profile')\n media_profile = media.GetProfiles()[0]\n logging.info('Camera working!')\n\n self.mycam = mycam\n self.camera_media_profile = media_profile\n self.camera_media = media\n self.mycam = mycam\n\n return self.mycam", "def camera_assignment():\n roslaunch('camera_assignment.launch')", "def change_IP(self,server_IP,MAC):\n content = {'server_IP':server_IP,'MAC_address':MAC}\n content = json.dumps(content)\n headers = {\"Content-Type\":\"application/json\"}\n #address will be given by the api\n r = requests.post(f\"http://{self.webserver_address}/api/camera/update_ip\", data = content,headers = headers,verify=False)\n if(r.status_code == 200):\n return True\n return False", "def setMode(self, request, context):\n \n self.vehicle.mode = VehicleMode(str(request.mode))\n self.vehicle.wait_ready('mode')\n \n return droneconnect_pb2.Null()", "def send_request(self) -> None:\n # The following parameters must be set for a request primitive\n # (* sent in A-ASSOCIATE-RQ PDU)\n # Application Context Name*\n # Calling AE Title*\n # Called AE Title*\n # UserInformation*\n # Maximum PDV Length*\n # Implementation Class UID*\n # Calling Presentation Address\n # Called Presentation Address\n # Presentation Context Definition List*\n primitive = A_ASSOCIATE()\n # DICOM Application Context Name, see PS3.7 Annex A.2.1\n primitive.application_context_name = UID(APPLICATION_CONTEXT_NAME)\n # Calling AE Title is the source DICOM AE title\n primitive.calling_ae_title = self.requestor.ae_title\n # Called AE Title is the destination DICOM AE title\n primitive.called_ae_title = self.acceptor.ae_title\n # The TCP/IP address of the source, pynetdicom includes port too\n primitive.calling_presentation_address = (\n cast(str, self.requestor.address),\n cast(int, self.requestor.port),\n )\n # The TCP/IP address of the destination, pynetdicom includes port too\n primitive.called_presentation_address = (\n cast(str, self.acceptor.address),\n cast(int, self.acceptor.port),\n )\n # Proposed presentation contexts\n primitive.presentation_context_definition_list = (\n self.requestor.requested_contexts\n )\n\n # User Information - PS3.7 Annex D.3.3\n # Mandatory items:\n # Maximum Length Notification (1)\n # Implementation Class UID Notification (1)\n # Optional notification items:\n # Implementation Version Name Notification (0 or 1)\n # Optional negotiation items:\n # SCP/SCU Role Selection Negotiation (0 or N)\n # Asynchronous Operations Window Negotiation (0 or 1)\n # SOP Class Extended Negotiation (0 or N)\n # SOP Class Common Extended Negotiation (0 or N)\n # User Identity Negotiation (0 or 1)\n primitive.user_information = self.requestor.user_information\n\n # Save the request primitive\n self.requestor.primitive = primitive\n\n # Send the A-ASSOCIATE request primitive to the peer\n self.dul.send_pdu(primitive)", "def set_ip_addressing_mode(self, mode):\n self.set_parameter(ATStringCommand.MA.command, utils.int_to_bytes(mode.code, num_bytes=1))", "def initialCamera(self, cmd):\n\n pass", "def startCamera(self):\n if self.video == \"camera\":\n self.cap = cv2.VideoCapture(gstreamer_pipeline(\n capture_width=416, capture_height=416, flip_method=0), cv2.CAP_GSTREAMER)\n else:\n video_path = Path(self.video)\n if not video_path.exists():\n raise Exception(\"Video file not found\")\n self.cap = cv2.VideoCapture(str(video_path))", "def cozmo_app(coz_conn):\n coz = coz_conn.wait_for_robot()\n coz.camera.image_stream_enabled = True\n coz_ros = CozmoRos(coz)\n coz_ros.run()", "async def request_new_video(blink, network, camera_id):\n url = f\"{blink.urls.base_url}/network/{network}/camera/{camera_id}/clip\"\n return await http_post(blink, url)", "def send_video_to_virtual_camera(self):\n with pyvirtualcam.Camera(width=self.width, height=self.height, fps=self.fps) as cam:\n while self.cap.isOpened():\n _, frame = self.cap.read()\n if frame is None:\n break\n cam.send(frame)\n cam.sleep_until_next_frame()\n self.cap.release()", "def dst_nat_into_vrf():\n\t\n device_params = {\n 'device_type': 'mikrotik_routeros',\n 'port': '11209',\n 'username': 'admin'}\n \t\t\n device_params['ip'] = input('IP Address of managed device: ')\n nd_port = input('SSH port. Blank, if default(11209): ')\n if nd_port:\n device_params['port'] = nd_port\n nd_user = input('Username. Blank, if default (admin): ')\n if nd_user:\n device_params['username'] = nd_user\n device_params['password'] = getpass.getpass()\n outside_address = input('Put outside address for dstnat(default - 93.189.145.82): ')\n if not outside_address:\n outside_address = '93.189.145.82'\n #outside_int = input('Put outside interface (default - ether2(DC Kraud outside int)): ')\n #if not outside_port:\n # outside_port = 'ether2'\n outside_port_dstnat = input('Put outside port for dstnat(Public port): ')\n inside_port = input('Put destination port(only port):') \n inside_address = input('Put inside address for dstnat (Inside adress): ')\n commands = []\n commands.append(f'/ip firewall mangle add action=mark-connection chain=prerouting connection-state=new dst-address={outside_address} dst-port={outside_port_dstnat} in-interface=ether2 new-connection-mark=into-vrf passthrough=yes protocol=tcp comment=\"DST_NAT_MANGLE_RULE_BY_SCRIPT FOR LEAKING FROM VRF\"')\n commands.append(f'/ip firewall nat add action=dst-nat chain=dstnat comment=\"DST_NAT_MANGLE_RULE_BY_SCRIPT FOR LEAKING FROM VRF\" dst-address={outside_address} dst-port={outside_port_dstnat} in-interface=ether2 protocol=tcp to-addresses={inside_address} to-ports={inside_port}')\n \n with ConnectHandler(**device_params) as ssh:\n for comm in commands:\n ssh.send_command(comm)\n return print(f'\"{commands[0]}\" and \"{commands[1]}\" are sent to device')", "def start_cam(cam = 'pi1', host = ' ', port = ' '):\n try:\n # using systemd to manage daemons. {space} is for weird systemd escaping\n space = '\\\\\\\\x20'\n remote_command = f\"ssh -f {cam} systemctl --user restart picamera@'{host}.local{space}{port}'\" \n print(remote_command)\n os.system(remote_command)\n except Exception as exc:\n sys.exit(f'SSH connection to {cam} failed with: {exc}')", "def establishConnection(self) -> None:\n self.conn = ServalCamera()\n self.conn.connect(self.url)\n self.conn.set_chip_config_files(\n bpc_file_path=self.bpc_file_path,\n dacs_file_path=self.dacs_file_path)\n self.conn.set_detector_config(**self.detector_config)\n # Check pixel depth. If 24 bit mode is used, the pgm format does not work\n # (has support up to 16 bits) so use tiff in that case. In other cases (1, 6, 12 bits)\n # use pgm since it is more efficient\n self.pixel_depth = self.conn.detector_config['PixelDepth']\n if self.pixel_depth == 24:\n file_format = 'tiff'\n else:\n file_format = 'pgm'\n self.conn.destination = {\n \"Image\":\n [{\n # Where to place the preview files (HTTP end-point: GET localhost:8080/measurement/image)\n \"Base\": \"http://localhost\",\n # What (image) format to provide the files in.\n \"Format\": file_format,\n # What data to build a frame from\n \"Mode\": \"count\"\n }]\n }", "def SetRateCameraAttitude(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to insert the supplied genome. If the genome is inserted, this method will return True, otherwise it will return False.
def try_insert_genome(self, genome): raise Exception("called abstract insert_genome method")
[ "def can_insert(data):\n return False", "def _can_insert(self, index, value):\n return not bool(self._insert_callback(index, value))", "def insert_whole_genome_if_not_exist(self, head, head_id, sequence):\n id_wholeDNA = self.get_id_whole_genome_by_head_and_head_id(head, head_id)\n if id_wholeDNA == -1:\n sql_string = \"INSERT INTO WHOLE_DNA (head_WD, head_id_WD, sequence_WD) VALUES (%s, %s, %s)\"\n dalObj = DAL(self.db_name, sql_string)\n params = [head, head_id, sequence]\n dalObj.sqlcommand = sql_string\n dalObj.parameters = params\n results = dalObj.executeInsert()\n return results.lastrowid\n else:\n print(\"It already exists an Whole DNA with these heads data\")\n return id_wholeDNA", "def add_genome_to_pop(self, genome, lineage):\n\n individual = Individual(genome, self._next_id, lineage)\n self._next_id += 1\n self.population.append(individual)\n return True", "def perform_insert(self, addr, data):\n\t\treturn 0", "def test_verify_insert(self):\n self._verify([self.applied_commands['insert']])", "def is_insert(self) -> bool:\n return self.statement.is_dml and self.statement.is_insert", "def insert(self, vdi: VDI) -> bool:\n try:\n self.session.xenapi.VBD.insert(self.vbd, vdi.vdi)\n return True\n except Exception as e:\n print(\"VBD.insert Exception\", e)\n return False", "def can_insert(data):\n return hasattr(data, 'read')", "def insert(self, val: int) -> bool:\n if val in self.sett:\n return False\n self.sett.add(val)\n return True", "def add_genome(self, genome):\n self.genomes.append(genome)", "def insert(self, val: int) -> bool:\n if val not in self.set:\n self.set.add(val)\n return True\n return False", "def perform_insert(self, addr: int, data: bytes) -> int:\n\t\treturn 0", "def test_insert(self):\n self._run_tests(\"insert\")", "def insert(self, val: int) -> bool:\n if val in self.setlist :\n return False\n self.setlist.add(val)\n return True", "def insert_whole_genome_no_verification(self, head, head_id, sequence):\n sql_string = \"INSERT INTO WHOLE_DNA (head_WD, head_id_WD, sequence_WD) VALUES (%s, %s, %s)\"\n params = [head, head_id, sequence]\n dalObj = DAL(self.db_name, sql_string)\n dalObj.sqlcommand = sql_string\n dalObj.parameters = params\n results = dalObj.executeInsert()\n return results.lastrowid", "def has_insert(self, shape):\n for insert in self.inserts:\n if insert.shape == shape:\n return True\n return False", "def insert(self, val):\n res = val in self.map\n idx = len(self.vec)\n if res:\n self.map[val].append(idx)\n self.vec.append(val)\n else:\n self.map[val] = [idx]\n self.vec.append(val)\n return not res", "def inject_genome(self, genome: Genome):\n self.population[genome.key] = genome" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate a evaluator class.
def build_evaluator(cfg: CfgNode) -> EvaluatorBase: name = cfg["name"] evaluator = simple_build(name, cfg, EVALUATORS) return evaluator
[ "def create_evaluator_class(T, base_name):\n class Evaluator(e2eGAN):\n def get_name(self):\n return 'Testers/Eval_{}'.format(base_name)\n\n def gen_encoder_model(self):\n return T.gen_encoder_model(self)\n\n def gen_decoder_model(self):\n return T.gen_decoder_model(self)\n\n return Evaluator", "def evaluator(self, evaluator):\n self.__evaluator = evaluator", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def get_evaluator(self, evaluator_name):\n evaluator_cls = self._registry.get(evaluator_name)\n if evaluator_cls is None:\n raise MlflowException(\n f\"Could not find a registered model evaluator for: {evaluator_name}. \"\n f\"Currently registered evaluator names are: {list(self._registry.keys())}\"\n )\n return evaluator_cls()", "def evaluator_generator(params):\n if params.type == 'box':\n evaluator = coco_evaluator.COCOEvaluator(\n annotation_file=params.val_json_file,\n include_mask=False,\n per_category_metrics=params.per_category_metrics)\n elif params.type == 'box_and_mask':\n evaluator = coco_evaluator.COCOEvaluator(\n annotation_file=params.val_json_file,\n include_mask=True,\n per_category_metrics=params.per_category_metrics)\n elif params.type == 'shapemask_box_and_mask':\n evaluator = coco_evaluator.ShapeMaskCOCOEvaluator(\n mask_eval_class=params.mask_eval_class,\n annotation_file=params.val_json_file,\n include_mask=True,\n per_category_metrics=params.per_category_metrics)\n elif params.type == 'lvis_box':\n evaluator = coco_evaluator.LVISEvaluator(\n annotation_file=params.val_json_file, include_mask=False)\n elif params.type == 'lvis_box_and_mask':\n evaluator = coco_evaluator.LVISEvaluator(\n annotation_file=params.val_json_file, include_mask=True)\n else:\n raise ValueError('The detection evaluation type `{}` is not supported.'\n .format(params.type))\n\n return evaluator", "def __new__(cls,\n input_fn,\n steps=100,\n name=None,\n hooks=None,\n exporters=None,\n delay_secs=120,\n throttle_secs=600):\n # Validate input_fn.\n _validate_input_fn(input_fn)\n\n # Validate steps.\n if steps is not None and steps <= 0:\n raise ValueError('Must specify steps > 0, given: {}'.format(steps))\n\n # Validate name.\n if name is not None and not isinstance(name, six.string_types):\n raise TypeError('`name` must be string, given: {}'.format(name))\n\n # Validate hooks.\n hooks = _validate_hooks(hooks)\n\n # Validate exporters.\n exporters = _validate_exporters(exporters)\n\n # Validate delay_secs.\n if delay_secs < 0:\n raise ValueError(\n 'Must specify delay_secs >= 0, given: {}'.format(delay_secs))\n\n # Validate throttle_secs.\n if throttle_secs < 0:\n raise ValueError(\n 'Must specify throttle_secs >= 0, given: {}'.format(throttle_secs))\n\n return super(EvalSpec, cls).__new__(\n cls,\n input_fn=input_fn,\n steps=steps,\n name=name,\n hooks=hooks,\n exporters=exporters,\n delay_secs=delay_secs,\n throttle_secs=throttle_secs)", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def init_process(self):\n global evaluator\n evaluator = Evaluator(self.params)", "def default_evaluator(\n environment_factory: types.EnvironmentFactory,\n network_factory: experiments.NetworkFactory,\n policy_factory: experiments.DeprecatedPolicyFactory\n) -> experiments.EvaluatorFactory:\n def evaluator(\n random_key: networks_lib.PRNGKey,\n variable_source: core.VariableSource,\n counter: counting.Counter,\n make_actor: experiments.MakeActorFn,\n ):\n \"\"\"The evaluation process.\"\"\"\n\n # Create environment and evaluator networks\n environment = environment_factory(1)\n environment_spec = specs.make_environment_spec(environment)\n policy = policy_factory(network_factory(environment_spec))\n\n actor = make_actor(random_key, policy, environment_spec, variable_source)\n actor._per_episode_update = True # pylint: disable=protected-access\n\n # Create logger and counter.\n counter = counting.Counter(counter, 'evaluator')\n logger = loggers.make_default_logger('evaluator')\n\n # Create the run loop and return it.\n return environment_loop.EnvironmentLoop(environment, actor, counter, logger)\n return evaluator", "def create_eval(self):\n self.ev_id = \"ev-\" + base64.b32encode(os.urandom(10)).decode(\"ascii\")\n self.ev_name = \"Evaluation: \" + self.ml_name\n self._ml.create_evaluation(\n EvaluationId=self.ev_id,\n EvaluationName=self.ev_name,\n MLModelId=self.ml_id,\n EvaluationDataSourceId=self.fold.eval_ds_id\n )\n logger.info(\"Created Evaluation \" + self.ev_id)", "def evaluator(self):\n return self.__evaluator", "def __init__(self, samples, evaluator=None, evalBuildParms=None, evalCallParms=None):\n self.samples = samples\n self.evalCallParms = evalCallParms\n # If all samples are nan, then assume function is not interpolated. Call evaluator directly.\n if np.all(np.isnan(samples.data)): # Function is defined directly by evaluator\n self.evaluator = evaluator\n else:\n if samples.ndim == 1: # Univariate\n x = samples[samples.dims[0]].data\n y = samples.data\n if evaluator is None:\n evaluator = interp.interp1d\n if evalBuildParms is None:\n evalBuildParms = {'kind': 'linear', 'axis': -1, 'copy': True, 'bounds_error': False,\n 'fill_value': 0.0}\n self.evaluator = evaluator(x, y, **evalBuildParms)\n elif samples.ndim == 2: # Bivariate\n x = samples[samples.dims[0]].data\n y = samples[samples.dims[1]].data\n z = samples.data\n if evaluator is None:\n evaluator = interp.interp2d\n if evalBuildParms is None:\n evalBuildParms = {'kind': 'linear', 'copy': True, 'bounds_error': False,\n 'fill_value': 0.0}\n self.evaluator = evaluator(x, y, z, **evalBuildParms)\n elif samples.ndim > 2: # Multivariate\n domainsamples = []\n for axis in samples.dims:\n domainsamples.append(samples[axis].data) # Build a list of domain axis samples\n if evaluator is None:\n evaluator = interp.RegularGridInterpolator\n if evalBuildParms is None:\n evalBuildParms = {'method': 'linear', 'bounds_error': False,\n 'fill_value': 0.0}\n self.evaluator = evaluator(domainsamples, samples.data, **evalBuildParms)\n else:\n raise ValueError('Input samples to BasisFunction constructor must have at lease one axis domain '\n 'coordinate.')\n self.evalCallParms = evalCallParms\n self.dims = self.samples.dims\n self.dimset = set(self.dims) # BasisFunctions assembled into a FunctionalBasis must have the same set of dims", "def getEvaluator(self) -> Evaluator:\n return self.getOrDefault(self.evaluator)", "def __init__(self, otherPrecisions=None):\n\n QueryEvaluator.__init__(self)\n self.otherPrecisions = otherPrecisions", "def __init__(self, grid_points, metrics_eval_func=None):\n self.grid_points = grid_points\n self.metrics_eval_func = metrics_eval_func or self._create_default_metrics_eval_func(grid_points)", "def _add_evaluators(self):\n self._evaluators[\"goal_reached\"] = EvaluatorGoalReached()\n self._evaluators[\"collision\"] = EvaluatorCollisionEgoAgent()\n self._evaluators[\"step_count\"] = EvaluatorStepCount()\n self._evaluators[\"drivable_area\"] = EvaluatorDrivableArea()", "def load_evaluation_data(di_dict, predictor_class, predictor_kwargs):\n ed = EvaluationData()\n predictor_kwargs.update({'di_dict': di_dict})\n predictor = predictor_class(**predictor_kwargs)\n ed.load(di_dict, predictor)\n return ed", "def get_evaluator(name: str) -> EvaluatorType:\n if name not in _EVALUATORS:\n raise ValueError(\"Name %s is not among registered %s.\" %\n (name, _EVALUATORS))\n return _EVALUATORS[name]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate a evaluate helper class.
def build_evaluate_helper(cfg: CfgNode) -> EvaluateHelper: evaluator = build_evaluator(cfg.evaluator) helper = EvaluateHelper(evaluator) return helper
[ "def create_evaluator_class(T, base_name):\n class Evaluator(e2eGAN):\n def get_name(self):\n return 'Testers/Eval_{}'.format(base_name)\n\n def gen_encoder_model(self):\n return T.gen_encoder_model(self)\n\n def gen_decoder_model(self):\n return T.gen_decoder_model(self)\n\n return Evaluator", "def evaluator(evaluate):\r\n @functools.wraps(evaluate)\r\n def ecspy_evaluator(candidates, args):\r\n fitness = []\r\n for candidate in candidates:\r\n fitness.append(evaluate(candidate, args))\r\n return fitness\r\n ecspy_evaluator.single_evaluation = evaluate\r\n return ecspy_evaluator", "def build_evaluator(cfg: CfgNode) -> EvaluatorBase:\n name = cfg[\"name\"]\n evaluator = simple_build(name, cfg, EVALUATORS)\n return evaluator", "def __new__(cls,\n input_fn,\n steps=100,\n name=None,\n hooks=None,\n exporters=None,\n delay_secs=120,\n throttle_secs=600):\n # Validate input_fn.\n _validate_input_fn(input_fn)\n\n # Validate steps.\n if steps is not None and steps <= 0:\n raise ValueError('Must specify steps > 0, given: {}'.format(steps))\n\n # Validate name.\n if name is not None and not isinstance(name, six.string_types):\n raise TypeError('`name` must be string, given: {}'.format(name))\n\n # Validate hooks.\n hooks = _validate_hooks(hooks)\n\n # Validate exporters.\n exporters = _validate_exporters(exporters)\n\n # Validate delay_secs.\n if delay_secs < 0:\n raise ValueError(\n 'Must specify delay_secs >= 0, given: {}'.format(delay_secs))\n\n # Validate throttle_secs.\n if throttle_secs < 0:\n raise ValueError(\n 'Must specify throttle_secs >= 0, given: {}'.format(throttle_secs))\n\n return super(EvalSpec, cls).__new__(\n cls,\n input_fn=input_fn,\n steps=steps,\n name=name,\n hooks=hooks,\n exporters=exporters,\n delay_secs=delay_secs,\n throttle_secs=throttle_secs)", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def evaluator(evaluate):\n @functools.wraps(evaluate)\n def inspyred_evaluator(candidates, args):\n fitness = []\n for candidate in candidates:\n fitness.append(evaluate(candidate, args))\n return fitness\n inspyred_evaluator.single_evaluation = evaluate\n return inspyred_evaluator", "def evaluator(self, evaluator):\n self.__evaluator = evaluator", "def evaluator(self):\n return self.__evaluator", "def clf_eval():\n y_true = np.random.randint(2, size=10000)\n y_pred = np.clip(np.random.normal(0.25, 0.3, size=y_true.shape) + y_true * 0.5, 0, 1)\n\n model_eval = ClassificationEvaluation(\n y_true=y_true,\n y_pred=y_pred,\n class_names=['a', 'b'],\n model_name='foo',\n )\n return model_eval", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def eval_test(eval_fn, group1, group2, verbose = 0):\n # Only allow known-safe eval_fn's\n if eval_fn in [ 'my_classifier' ]:\n return evaluate(globals()[eval_fn], group1, group2, verbose)\n else:\n raise Exception(\"Error: Tester tried to use an invalid evaluation function: '%s'\" % eval_fn)", "def init_process(self):\n global evaluator\n evaluator = Evaluator(self.params)", "def __init__(self, grid_points, metrics_eval_func=None):\n self.grid_points = grid_points\n self.metrics_eval_func = metrics_eval_func or self._create_default_metrics_eval_func(grid_points)", "def _prepare_evaluate(self):\n labels = list()\n labels += ['num_procs', 'num_periods', 'is_debug', 'seed_emax', 'seed_sim']\n labels += ['num_draws_emax', 'num_agents_sim', 'num_types', 'edu_spec', 'version']\n labels += ['num_draws_prob', 'seed_prob']\n num_procs, num_periods, is_debug, seed_emax, seed_sim, num_draws_emax, num_agents_sim, \\\n num_types, edu_spec, version, num_draws_prob, seed_prob = \\\n dist_class_attributes(self.respy_base, *labels)\n\n periods_draws_emax = create_draws(num_periods, num_draws_emax, seed_emax, is_debug)\n periods_draws_sims = create_draws(num_periods, num_agents_sim, seed_sim, is_debug)\n\n disturbances = (periods_draws_emax, periods_draws_sims)\n\n # We want to maintain a pure PYTHON version for testing purposes.\n args = list()\n args += [num_periods, num_types, edu_spec['start'], edu_spec['max'], edu_spec['max'] + 1]\n state_space_info = respy_f2py.wrapper_create_state_space(*args)\n if self.mpi_setup == MISSING_INT:\n slavecomm = self.mpi_setup\n else:\n slavecomm = self.mpi_setup.py2f()\n self.set_up_baseline(periods_draws_emax, None)\n\n initial_conditions = get_initial_conditions(self.respy_base)\n\n args = (smm_sample_f2py, state_space_info, initial_conditions, disturbances, slavecomm)\n self.simulate_sample = partial(*args)", "def eval(self):\n return self.with_transforms(\"eval\")", "def evaluate(self, env):\n raise NotImplementedError('abstract') # pragma: no cover", "def __init__(self, samples, evaluator=None, evalBuildParms=None, evalCallParms=None):\n self.samples = samples\n self.evalCallParms = evalCallParms\n # If all samples are nan, then assume function is not interpolated. Call evaluator directly.\n if np.all(np.isnan(samples.data)): # Function is defined directly by evaluator\n self.evaluator = evaluator\n else:\n if samples.ndim == 1: # Univariate\n x = samples[samples.dims[0]].data\n y = samples.data\n if evaluator is None:\n evaluator = interp.interp1d\n if evalBuildParms is None:\n evalBuildParms = {'kind': 'linear', 'axis': -1, 'copy': True, 'bounds_error': False,\n 'fill_value': 0.0}\n self.evaluator = evaluator(x, y, **evalBuildParms)\n elif samples.ndim == 2: # Bivariate\n x = samples[samples.dims[0]].data\n y = samples[samples.dims[1]].data\n z = samples.data\n if evaluator is None:\n evaluator = interp.interp2d\n if evalBuildParms is None:\n evalBuildParms = {'kind': 'linear', 'copy': True, 'bounds_error': False,\n 'fill_value': 0.0}\n self.evaluator = evaluator(x, y, z, **evalBuildParms)\n elif samples.ndim > 2: # Multivariate\n domainsamples = []\n for axis in samples.dims:\n domainsamples.append(samples[axis].data) # Build a list of domain axis samples\n if evaluator is None:\n evaluator = interp.RegularGridInterpolator\n if evalBuildParms is None:\n evalBuildParms = {'method': 'linear', 'bounds_error': False,\n 'fill_value': 0.0}\n self.evaluator = evaluator(domainsamples, samples.data, **evalBuildParms)\n else:\n raise ValueError('Input samples to BasisFunction constructor must have at lease one axis domain '\n 'coordinate.')\n self.evalCallParms = evalCallParms\n self.dims = self.samples.dims\n self.dimset = set(self.dims) # BasisFunctions assembled into a FunctionalBasis must have the same set of dims", "def getFactoryEvaluateExpressionOnly(self):\n # factory function for evaluateExpressionOnly\n def evaluateExpressionOnly_factory(expression):\n return self.evaluateExpressionOnly(expression)\n\n return evaluateExpressionOnly_factory" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot x and y axis of dfs in common graph.
def plot(x, y, *dfs): ax = None for df in dfs: ax = df[[x, y]].set_index(x).plot(kind='line', ylim=(0, None), xlim=(0, None), ax=ax)
[ "def draw_plot(ax, dfs, legend, x, y, xscale, yaxis_max):\n xticks = dfs_all_values(dfs, x)\n # loop over all pandas.DataFrame objects\n for df in dfs:\n # setting the x-column as an index is required to draw the y-column\n # as a function of x argument\n df = df.set_index(x)\n # plot line on the subplot\n df[y].plot.line(ax=ax, rot=45, marker='.')\n\n if xscale == \"linear\":\n ax.set_xscale(xscale)\n else:\n ax.set_xscale(xscale, base=2)\n ax.xaxis.set_major_formatter(ScalarFormatter())\n\n ax.set_xticks(xticks)\n ax.set_xlabel(get_label(x))\n ax.set_ylabel(get_label(y))\n ax.set_ylim(bottom=0)\n if yaxis_max is not None:\n ax.set_ylim(top=float(yaxis_max))\n ax.legend(legend, fontsize=6)\n ax.grid(True)", "def plot2D(*dfs, columns=None, figsize=(5, 5), plot_titles=False):\n fig, ax = plt.subplots(figsize=figsize)\n\n for df, color in zip(dfs, cycle(COLORS)):\n X, Y = (df[col] for col in columns)\n plt.scatter(X, Y, c=color, marker=MARKER)\n\n for axis, col in zip(['x', 'y'], columns):\n getattr(ax, f'set_{axis}label')(col)\n\n if plot_titles:\n for df in dfs:\n for i, j, text in zip(df.iloc[:, 0], df.iloc[:, 1], df.index):\n corr = 2\n ax.annotate(text, xy=(i + corr, j + corr))\n\n plt.show()", "def plot_data(self, df):\n fig, ax = plt.subplots(dpi=150)\n cols = df.columns.astype(str)\n ax.plot(df[cols[0]], df[cols[1]])\n ax.set_xlabel(cols[0])\n ax.set_ylabel(cols[1])\n\n return fig, ax", "def plot_graph(self) -> None:", "def show_plot_for_system(x_axis: list, y_axis: list, labels: list) -> None:\n plt.title(labels[2])\n plt.xlabel(labels[0])\n plt.ylabel(labels[1])\n plt.grid()\n plt.plot(x_axis, y_axis, 'k')\n plt.show()", "def plot_graph(self):\r\n x = []\r\n y = []\r\n\r\n for n in self.graph.get_all_v().values():\r\n if(n.get_pos() != None):\r\n x.append(n.get_pos().get_x())\r\n y.append(n.get_pos().get_y())\r\n else:\r\n x_random = random.random()\r\n y_random = random.random()\r\n n.set_pos(x_random, y_random, 0)\r\n x.append(x_random)\r\n y.append(y_random)\r\n fig, ax = plt.subplots()\r\n ax.scatter(x, y, 60, \"red\")\r\n for xi in self.graph.get_all_v().values():\r\n for yi in self.graph.all_out_edges_of_node(xi.get_key()):\r\n src = (xi.get_pos().get_x(), xi.get_pos().get_y())\r\n dest = (self.graph.get_node(yi).get_pos().get_x(), self.graph.get_node(yi).get_pos().get_y())\r\n plt.annotate(\"\", dest, src, arrowprops=dict(edgecolor=\"black\", arrowstyle=\"->\"))\r\n\r\n plt.title(\"OOP - Ex3\")\r\n plt.xlabel(\"x axis\")\r\n plt.ylabel(\"y axis\")\r\n plt.show()", "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()", "def show_graphs ():\n plt.ylim = (0, 300)\n plt.xlim = (0, 300)\n #Set up lidar plot to figure 1\n lidar_plot = plt.figure (1)\n #Assign title\n plt.title ('Lidar data')\n #Assign data\n plt.imshow (lidar_clean)\n #Set up radar plot to figure 2\n radar_plot = plt.figure (2)\n #Assign title\n plt.title ('Radar data')\n #Assign data\n plt.imshow (radar_clean)\n #Show plots\n plt.show ()", "def plot_dat_file(dat_paths: [str]):\n import pandas as pd\n import matplotlib.pyplot as plt\n\n fig, ax = plt.subplots(1, 3, sharey=\"all\", sharex=\"col\", figsize=(8, 6))\n for i, dat_path in enumerate(dat_paths):\n if i == i:\n skipfoot = 11 + 9\n else:\n skipfoot = 11\n dat_file = pd.read_csv(\n dat_path,\n skiprows=3,\n skipfooter=skipfoot,\n header=None,\n delim_whitespace=True,\n engine=\"python\",\n )\n depth = dat_file.values[:, 0]\n vp = dat_file.values[:, 1]\n vs = dat_file.values[:, 3]\n dens = dat_file.values[:, 5]\n\n ax[0].plot(vp, depth, label=f\"nr {i}\")\n\n ax[1].plot(vs, depth)\n ax[2].plot(dens, depth)\n ax[0].set_ylim(ax[0].get_ylim()[::-1])\n ax[0].legend()\n plt.show()", "def plot_xdop_distribution(dRtk: dict, dfXDOP: pd.DataFrame, dfXDOPdisp: pd.DataFrame, logger: logging.Logger, showplot: bool = False):\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n logger.info('{func:s}: creating XDOP distribution plot'.format(func=cFuncName))\n\n # select colors for xDOP coordinate difference\n colors = ('blue', 'green', 'cyan', 'red')\n\n # set up the plot\n plt.style.use('ggplot')\n\n # subplots\n fig = plt.figure(figsize=(14.0, 9.0), tight_layout=False)\n fig.suptitle('{syst:s} - {posf:s} - {date:s}: XDOP'.format(posf=dRtk['info']['rtkPosFile'], syst=dRtk['syst'], date=dRtk['Time']['date']))\n\n # create a grid for lotting the XDOP line plots and 6 XDOP distribution plots\n gs = GridSpec(2, 4)\n\n # plot the XDOPs and #SVs on the first axis\n ax = fig.add_subplot(gs[0, :]) # first row, span all columns\n plot_xdop_svs(dfDops=dfXDOP, colors=colors, axis=ax, logger=logger)\n\n # add the xDOP distributions\n axisShare = None\n for col, xdop, color in zip((0, 1, 2, 3), dfXDOPdisp.columns[-4:], colors):\n # create exis for this figure\n if axisShare is None:\n ax = fig.add_subplot(gs[1, col])\n axisShare = ax\n else:\n ax = fig.add_subplot(gs[1, col], sharey=axisShare)\n # ax.get_yaxis().set_ticklabels([])\n ax.tick_params(labelleft=False)\n\n # plot distribution for a DOP value\n plot_xdop_histogram(dfDopsDist=dfXDOPdisp, xdop=xdop, color=color, axis=ax, logger=logger)\n\n # save the plot in subdir png of GNSSSystem\n amutils.mkdir_p(os.path.join(dRtk['info']['dir'], 'png'))\n pngName = os.path.join(dRtk['info']['dir'], 'png', os.path.splitext(dRtk['info']['rtkPosFile'])[0] + '-XDOP.png')\n fig.savefig(pngName, dpi=fig.dpi)\n\n if showplot:\n plt.show(block=True)\n else:\n plt.close(fig)", "def plot_by_id(self, x_axis=None, y_axis=None):\n if x_axis is None:\n x_axis = self._ws\n\n if y_axis is None:\n y_axis = self._w\n\n turbs = self._df[self._id].unique()\n num_turbs = len(turbs)\n num_rows = np.ceil(num_turbs / 4.0)\n\n plt.figure(figsize=(15, num_rows * 5))\n n = 1\n for t in turbs:\n plt.subplot(num_rows, 4, n)\n scada_sub = self._df.loc[self._df[self._id] == t, :]\n plt.scatter(scada_sub[x_axis], scada_sub[y_axis], s=5)\n n = n + 1\n plt.title(t)\n plt.xlabel(x_axis)\n plt.ylabel(y_axis)\n plt.tight_layout()\n plt.show()", "def plot_datasets(datasets):\n\n\t# plt.grid(True)\n\n\tfor ds in datasets:\n\t\t(f, ax) = plt.subplots()\n\n\t\tax.grid(True)\n\n\t\tif 'xl' in ds:\n\t\t\tax.set_xlabel(ds['xl'])\n\t\tif 'yl' in ds:\n\t\t\tax.set_ylabel(ds['yl'])\n\n\t\tif 'xl' in ds and 'yl' in ds:\n\t\t\ttitle = \"%s from %s\" % (ds['yl'], ds['xl'])\n\t\t\tf.canvas.set_window_title(title)\n\n\t\tif 'x' in ds:\n\t\t\ttitle = \"%s from %s\" % (ds['yl'], ds['xl']) if 'title' not in ds else ds['title']\n\t\t\tf.canvas.set_window_title(title)\n\t\t\tmarker = 'y1m' in ds and ds['y1m'] or None\n\t\t\tax.plot(ds['x'], ds['y'], label=ds['yl'], marker=marker)\n\t\tif 'x2' in ds:\n\t\t\t# label = \"y2\" if 'y2l' not in ds else ds['y2l']\n\t\t\tlabel = 'y2l' in ds and ds['y2l'] or 'y2'\n\t\t\tmarker = 'y2m' in ds and ds['y2m'] or None\n\t\t\tax.plot(ds['x2'], ds['y2'], label=label, marker=marker)\n\t\t\tax.legend()\n\t\tif 'x3' in ds:\n\t\t\t# label = \"y3\" if 'y3l' not in ds else ds['y3l']\n\t\t\tlabel = 'y3l' in ds and ds['y3l'] or 'y3'\n\t\t\tmarker = 'y3m' in ds and ds['y3m'] or None\n\t\t\tax.plot(ds['x3'], ds['y3'], label=label, marker=marker)\n\t\t\tax.legend()\n\n\t\tif 'sub' in ds:\n\t\t\tfor sub in ds['sub']:\n\t\t\t\t# ax.set_ylabel(sub['yl'])\n\t\t\t\t# ax.set_xlabel(sub['xl'])\n\t\t\t\t# title = \"%s from %s\" % (sub['yl'], sub['xl']) if 'title' not in sub else sub['title']\n\t\t\t\t# f.canvas.set_window_title(title)\n\n\t\t\t\tlabel = 'yl' in sub and sub['yl']\n\t\t\t\tmarker = 'ym' in sub and sub['ym'] or None\n\t\t\t\tax.plot(sub['x'], sub['y'], label=label, marker=marker)\n\t\t\t\tax.legend()\n\n\t\tax.spines['left'].set_position('zero')\n\t\tax.spines['bottom'].set_position('zero')\n\t\tax.spines['left'].set_smart_bounds(True)\n\t\tax.spines['bottom'].set_smart_bounds(True)\n\n\tplt.show()", "def plot_all(self):\n self.plot_ramps()\n self.plot_groupdq()", "def plot_all_df_columns(df, col_nums, title='', xlabel=''):\n i = 1\n values = df.values\n for col in col_nums:\n plt.subplot(len(col_nums), 1, i)\n plt.plot(values[:, col])\n plt.title(title)\n plt.ylabel(dr_df.columns[col])\n plt.xlabel(xlabel)\n i += 1\n plt.tight_layout()\n plt.show()", "def plot_multiplots(data):\n plt.subplot(2, 1, 1)\n plt.plot(data[\"timestamp\"], data[\"temperature\"], 'o-')\n plt.title(\"Sensor Boards\")\n plt.ylabel(\"Temperatture\")\n plt.grid()\n\n plt.subplot(2, 1, 2)\n plt.plot(data[\"timestamp\"], data[\"humidity\"], '.-')\n plt.xlabel(\"Zeitstempel\")\n plt.ylabel(\"Luftfeuchtigkeit\")\n plt.grid()\n\n plt.show()", "def plot_all(self):\n\n # Find the max bandwidth for the plots that use secondary y-axes\n # self._get_max_bw()\n\n self.plot_throttle()\n self.plot_temps()\n\n if self.power_df is not None:\n self.plot_power()\n\n plt.clf()", "def plot_xdop_svs(dfDops: pd.DataFrame, colors: tuple, axis, logger: logging.Logger):\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n logger.info('{func:s}: creating XDOP / #SVs vs time plot'.format(func=cFuncName))\n\n axis.set_ylim([0, 24])\n axis.set_ylabel('#SVs [-]', fontsize='large', color='grey')\n # axis.set_xlabel('Time [sec]', fontsize='large')\n\n axis.fill_between(dfDops['DT'], 0, dfDops['#SVs'], alpha=0.5, linestyle='-', linewidth=3, color='grey', label='#SVs', interpolate=False)\n # plot PDOP on second y-axis\n axRight = axis.twinx()\n\n axRight.set_ylim([0, 15])\n axRight.set_ylabel('XDOP [-]', fontsize='large')\n\n # plot XDOPs (last 4 columns)\n for dop, color in zip(dfDops.columns[-4:], colors):\n axRight.plot(dfDops['DT'], dfDops[dop], linestyle='-', marker='.', markersize=1, color=color, label=dop)\n\n # add the legend to the plot\n axRight.legend(loc=\"upper right\")\n\n # set title\n axis.set_title('Visible satellites & XDOP', fontsize='x-large')\n\n # create the ticks for the time axis\n dtFormat = plot_utils.determine_datetime_ticks(startDT=dfDops['DT'].iloc[0], endDT=dfDops['DT'].iloc[-1])\n\n if dtFormat['minutes']:\n # axis.xaxis.set_major_locator(dates.MinuteLocator(byminute=range(10, 60, 10), interval=1))\n pass\n else:\n axis.xaxis.set_major_locator(dates.HourLocator(interval=dtFormat['hourInterval'])) # every 4 hours\n axis.xaxis.set_major_formatter(dates.DateFormatter('%H:%M')) # hours and minutes\n\n axis.xaxis.set_minor_locator(dates.DayLocator(interval=1)) # every day\n axis.xaxis.set_minor_formatter(dates.DateFormatter('\\n%d-%m-%Y'))\n\n axis.xaxis.set_tick_params(rotation=0)\n for tick in axis.xaxis.get_major_ticks():\n # tick.tick1line.set_markersize(0)\n # tick.tick2line.set_markersize(0)\n tick.label1.set_horizontalalignment('center')", "def plot_disc_walkers(self, id_discs=None):\n # Making sure we have a list\n if not id_discs:\n id_discs = range(len(self.axes))\n elif type(id_discs) == int:\n id_discs = [id_discs]\n \n nplots = len(id_discs)\n fig, axes = plt.subplots(nplots, 3, sharex=True, figsize=(20, nplots*5))\n shape = axes.shape\n if len(shape) > 1:\n for axg in axes:\n for ax in axg:\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n else:\n for ax in axes:\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) \n \n \n for disc_id in id_discs:\n axis_name = {\"x\": \"yz\", \"y\": \"xz\", \"z\": \"xy\"}[self.axes[disc_id]]\n param_name = ['a', 'b', 'M']\n for i in range(3):\n pid = disc_id*3+i\n samples = sampler.chain[:,:,pid].T\n if nplots > 1:\n axis = axes[disc_id][i]\n else:\n axis = axes[i]\n \n axis.plot(samples, color='k', alpha=10.0 / self.n_walkers)\n #axis.yaxis.set_major_locator(MaxNLocator(5))\n axis.set_ylabel('$'+param_name[i]+'_{{{0}{1}}}$'.format(axis_name, disc_id))\n axis.set_xlabel('Iteration')\n\n #plt.title('Parameter values for discs : ' + ', '.join(str(x) for x in id_discs))\n\n return fig", "def plot_nodes(nodes):\n x = [node.x for node in nodes]\n y = [node.y for node in nodes]\n plt.plot(x, y, 'k.')\n# plot_nodes_id(nodes)\n plot_nodes_energy(nodes)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
8 microed stepping by faking distance twice as long.
def micro_8(steps, a): df = pd.DataFrame(index=np.arange(0, steps * 16), columns=('v', 's', 'd', 't')) t = 0.0 m = 8 # micro level d = d0 = math.sqrt(1/a/m) # faster accel since distance is longer s = 0 # steps p = 0 # position p_d = 1/m # position delta for s in range(800): if s == 0: d = d0 * 0.676 else: d -= d * 2 / (4 * s + 1) s += 1 p += p_d t += d df.loc[s] = [1/d/m, p, d, t] # m = 1 # p_d = 1/m # d = d * 8 # for s in range(100, 200): # if s == 0: # d = d0 * 0.676 # else: # d -= d * 2 / (4 * s + 1) # s += 1 # p += p_d # t += d # df.loc[s] = [1/d/m, p, d, t] return df.dropna()
[ "def drive_eight(n):\n # Variables for the go_diff function\n fast_speed = 80 \n slow_speed = 25\n # Half a lap time, this is the time the robot turns in a direction before switching\n half_lap_time =6.2 \n # To avoid having tu manually stop the robot we set it to drive continuously for x amount of seconds.\n elapsedSecs = 0\n while elapsedSecs < half_lap_time * 2 * n:\n arlo.go_diff(fast_speed, slow_speed, 1, 1)\n sleep(half_lap_time)\n arlo.go_diff(slow_speed, fast_speed, 1, 1)\n sleep(half_lap_time)\n elapsedSecs += half_lap_time * 2", "def test(self):\n import time\n for i in range(8):\n self.shift_out(1 << i)\n time.sleep(0.25)\n self.shift_out(0)\n time.sleep(0.25)", "def get_movements_8n():\n s2 = math.sqrt(2)\n return [(1, 0, 1.0),\n (0, 1, 1.0),\n (-1, 0, 1.0),\n (0, -1, 1.0),\n (1, 1, s2),\n (-1, 1, s2),\n (-1, -1, s2),\n (1, -1, s2)]", "def get_incremental_distance():\n return current_speed/float(FPS)", "def _get_movements_8n():\n s2 = math.sqrt(2)\n return [(1, 0, 1.0),\n (0, 1, 1.0),\n (-1, 0, 1.0),\n (0, -1, 1.0),\n (1, 1, s2),\n (-1, 1, s2),\n (-1, -1, s2),\n (1, -1, s2)]", "def shifter(self):\n #self.BA_shift = self.timeshift_latitude(self.latB, self.latA)\n #self.BC_shift = self.timeshift_latitude(self.latB, self.latC)\n\n\n self.shifted = True #changing boolean to True when function is called.\n\n secondsA = self.secondsA\n secondsB = self.secondsB\n secondsC = self.secondsC\n\n NeA = self.holefill(self.NeA, secondsA)\n NeB = self.holefill(self.NeB, secondsB)\n NeC = self.holefill(self.NeC, secondsC)\n\n start = 0\n stop = len(NeA) - np.max(np.array([self.BA_shift, self.BC_shift]))\n\n startA = start + self.BA_shift\n stopA = stop + self.BA_shift\n\n startC = start + self.BC_shift\n stopC = stop + self.BC_shift\n\n NeA = NeA[startA:stopA]\n NeB = NeB[start:stop]\n NeC = NeC[startC:stopC]\n\n longA = self.holefill(self.longA, secondsA)\n longB = self.holefill(self.longB, secondsB)\n longC = self.holefill(self.longC, secondsC)\n longA = longA[startA:stopA]\n longB = longB[start:stop]\n longC = longC[startC:stopC]\n\n latA = self.holefill(self.latA, secondsA)\n latB = self.holefill(self.latB, secondsB)\n latC = self.holefill(self.latC, secondsC)\n latA = latA[startA:stopA]\n latB = latB[start:stop]\n latC = latC[startC:stopC]\n\n radA = self.holefill(self.radA, secondsA)\n radB = self.holefill(self.radB, secondsB)\n radC = self.holefill(self.radC, secondsC)\n radA = radA[startA:stopA]\n radB = radB[start:stop]\n radC = radC[startC:stopC]\n\n velA = self.holefill(self.velA, secondsA)\n velB = self.holefill(self.velB, secondsB)\n velC = self.holefill(self.velC, secondsC)\n velA = velA[startA:stopA]\n velB = velB[start:stop]\n velC = velC[start:stop]\n\n altA = self.holefill(self.altA, secondsA)\n altB = self.holefill(self.altB, secondsB)\n altC = self.holefill(self.altC, secondsC)\n altA = altA[startA:stopA]\n altB = altB[start:stop]\n altC = altC[startC:stopC]\n\n\n mlatA = self.holefill(self.mlatA, secondsA)\n mlatB = self.holefill(self.mlatB, secondsB)\n mlatC = self.holefill(self.mlatC, secondsC)\n mlatA = mlatA[startA:stopA]\n mlatB = mlatB[start:stop]\n mlatC = mlatC[startC:stopC]\n\n mlongA = self.holefill(self.mlongA, secondsA)\n mlongB = self.holefill(self.mlongB, secondsB)\n mlongC = self.holefill(self.mlongC, secondsC)\n mlongA = mlongA[startA:stopA]\n mlongB = mlongB[start:stop]\n mlongC = mlongC[startC:stopC]\n\n mltA = self.holefill(self.mltA, secondsA)\n mltB = self.holefill(self.mltB, secondsB)\n mltC = self.holefill(self.mltC, secondsC)\n mltA = mltA[startA:stopA]\n mltB = mltB[start:stop]\n mltC = mltC[startC:stopC]\n\n secondsA = self.holefill(secondsA, secondsA)\n secondsB = self.holefill(secondsB, secondsB)\n secondsC = self.holefill(secondsC, secondsC)\n secondsA = secondsA[startA:stopA]\n secondsB = secondsB[start:stop]\n secondsC = secondsC[startC:stopC]\n\n indsA = np.nonzero(secondsA)[0]\n indsB = np.nonzero(secondsB)[0]\n indsC = np.nonzero(secondsC)[0]\n\n inds = np.intersect1d(indsA, indsB)\n inds = np.intersect1d(inds, indsC)\n\n self.NeA = NeA[inds]\n self.NeB = NeB[inds]\n self.NeC = NeC[inds]\n\n self.longA = longA[inds]\n self.longB = longB[inds]\n self.longC = longC[inds]\n\n self.latA = latA[inds]\n self.latB = latB[inds]\n self.latC = latC[inds]\n\n self.radA = radA[inds]\n self.radB = radB[inds]\n self.radC = radC[inds]\n\n self.velA = velA[inds]\n self.velB = velB[inds]\n self.velC = velC[inds]\n\n self.altA = altA[inds]\n self.altB = altB[inds]\n self.altC = altC[inds]\n\n self.mlatA = mlatA[inds]\n self.mlatB = mlatB[inds]\n self.mlatC = mlatC[inds]\n\n self.mlongA = mlongA[inds]\n self.mlongB = mlongB[inds]\n self.mlongC = mlongC[inds]\n\n self.mltA = mltA[inds]\n self.mltB = mltB[inds]\n self.mltC = mltC[inds]\n\n self.secondsA = secondsA[inds]\n self.secondsB = secondsB[inds]\n self.secondsC = secondsC[inds]", "def check_offset(self):\n\n for d in range(self.n_dmps):\n if abs(self.y0[d] - self.goal[d]) < 1e-4:\n self.goal[d] += 1e-4", "def launch_distance(speed, duration): \n if speed <= 0 or duration <= 0:\n return 0\n time = pow(speed/25000, -0.95)\n diff = time - duration\n return 90 - int(diff/time*90)", "def WarpStep(iters=5):\n MSG(\"WarpStep\")\n for j in range(iters):\n warp.step()\n return", "def walk(fb, cs, distance, stepLength, gait, duration_ss = -1 , duration_ds = -1, first_half_step=True):\n fb.usePosturalTaskContactCreation(True)\n prev_phase = cs.contactPhases[-1]\n for limb in gait:\n eeName = fb.dict_limb_joint[limb]\n assert prev_phase.isEffectorInContact(eeName), \"All limbs in gait should be in contact in the first phase\"\n isFirst = first_half_step\n reached = False\n firstContactReachedGoal = False\n remainingDistance = distance\n while remainingDistance >= 1e-6:\n for k, limb in enumerate(gait):\n #print(\"move limb : \",limb)\n eeName = fb.dict_limb_joint[limb]\n if isFirst:\n length = stepLength / 2.\n isFirst = False\n else:\n length = stepLength\n if k == 0 and first_half_step:\n if length > (remainingDistance + stepLength / 2.):\n length = remainingDistance + stepLength / 2.\n firstContactReachedGoal = True\n else:\n if length > remainingDistance:\n length = remainingDistance\n transform = SE3.Identity()\n #print(\"length = \",length)\n transform.translation = np.array([length, 0, 0])\n cs.moveEffectorOf(eeName, transform, duration_ds, duration_ss)\n remainingDistance -= stepLength\n if first_half_step and not firstContactReachedGoal:\n transform = SE3.Identity()\n #print(\"last length = \", stepLength)\n transform.translation = np.array([stepLength / 2., 0, 0])\n cs.moveEffectorOf(fb.dict_limb_joint[gait[0]], transform, duration_ds, duration_ss)\n q_end = fb.referenceConfig[::] + [0] * 6\n q_end[0] += distance\n fb.setCurrentConfig(q_end)\n com = fb.getCenterOfMass()\n setFinalState(cs, com, q=q_end)\n fb.usePosturalTaskContactCreation(False)", "def step(vessels,t,deltat):\n pass", "def takeoff(self, n, e, d):\n pass", "def nearest_test_pulse(self):", "def update_dist_per_loop(self):\n self.distance_per_loop = self.movable*self.base_distance \\\n *((floor(self.score/1000)*0.5) + 1)", "def step(lat,lon,bearing,distance,R):\n lat,lon,bearing = np.deg2rad(lat),np.deg2rad(lon),np.deg2rad(bearing)\n delta = distance/R\n lat2 = np.arcsin(np.sin(lat)*np.cos(delta)+np.cos(lat)*np.sin(delta)*np.cos(bearing))\n lon2 = lon+np.arctan2(np.sin(bearing)*np.sin(delta)*np.cos(lat),np.cos(delta)-np.sin(lat)*np.sin(lat2))\n return np.degrees(lat2),np.degrees(lon2)", "def get_movements_8n(x: int, y: int) -> List:\n return [(x + 1, y + 0),\n (x + 0, y + 1),\n (x - 1, y + 0),\n (x + 0, y - 1),\n (x + 1, y + 1),\n (x - 1, y + 1),\n (x - 1, y - 1),\n (x + 1, y - 1)]", "def backtrack_steps():\n\n # Initialize position and number of steps\n x = 0\n n_steps = 0\n\n # Walk until we get to positive 1\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n\n return n_steps", "def toWindspeed_6_2(buf, start):\n result = (buf[0][start+0] >> 4)* 16**5 \\\n + (buf[0][start+0] & 0xF)* 16**4 \\\n + (buf[0][start+1] >> 4)* 16**3 \\\n + (buf[0][start+1] & 0xF)* 16**2 \\\n + (buf[0][start+2] >> 4)* 16**1 \\\n + (buf[0][start+2] & 0xF)\n result /= 256.0\n result /= 100.0 # km/h\n return result", "def go(distance):\n if distance==0: return\n sign=1 if distance>0 else -1\n distance=copy.copy(abs(distance))\n distance_gone=0\n distance_per_frame=self.FRAME_TIME*self.turtle.SPEED\n steps=int(math.ceil(distance/float(distance_per_frame)))\n angle=from_my_angle(self.turtle.orientation)\n unit_vector=Vector((math.sin(angle),math.cos(angle)))*sign\n step=distance_per_frame*unit_vector\n for i in range(steps-1):\n with smartsleep.Sleeper(self.FRAME_TIME):\n self.turtle.pos+=step\n self.send_report()\n distance_gone+=distance_per_frame\n\n last_distance=distance-distance_gone\n last_sleep=last_distance/float(self.turtle.SPEED)\n with smartsleep.Sleeper(last_sleep):\n last_step=unit_vector*last_distance\n self.turtle.pos+=last_step\n self.send_report()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the elements in ref_gen to an existing index.
def update_index(self, ref_gen): testing = True logging.warning('Updating index') es_insert.index(es, ref_gen, self.index_name, testing, action="update") logging.warning('Finished updating')
[ "def addIndex(indexDef):", "def build_index(self, descriptors):", "def generate_index(self):\r\n # Reset the index as we are regenerating it from scratch\r\n self.document_index.drop_collection()\r\n # Add an index entry for each document\r\n for doc in self.get_queryset(self.document):\r\n self.add_to_index(doc)", "def insert_index(self):\n pass", "def index_add(all_index, this_index, samples, caller):\n for key, record in this_index.iteritems():\n if key not in all_index:\n all_index[key] = {}\n for sample_id in samples:\n if sample_id not in all_index[key]:\n all_index[key][sample_id] = {caller: []}\n elif caller not in all_index[key][sample_id]:\n all_index[key][sample_id][caller] = []\n # NB: If caller was run twice, will have 2 records here\n all_index[key][sample_id][caller].append(record)", "def index_doc(self, docid, obj):\n _assertint(docid)\n for index in self.values():\n index.index_doc(docid, obj)\n self.objectids.insert(docid)", "def _register_with_indexes(self, edge):\n for (restr_keys, index) in self._indexes.items():\n vals = tuple(\n self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys\n )\n index.setdefault(vals, []).append(edge)", "def update_index(self):\n self.delete_index()\n self._refresh_materialized_views()\n self.client.indices.create(self.index_name, self.mapping)\n create_aliases(self.client, self.index_name, True)\n self._add_contents()", "def build_index():\n pass", "def _add_index(self):\n self.add_column(\"index\", [str(i) for i in range(len(self))])", "def create_index():", "def _es_push_indexes(self, content):\n for c in self.es_clients:\n c.create_index(content)", "def add_index(self, list_of_attribute_names):\n self.indexes.append(list_of_attribute_names)\n return", "def test_transaction_index_add_rel_to_index(self):\r\n #test nodes\r\n n1 = self.gdb.nodes.create()\r\n n2 = self.gdb.nodes.create()\r\n r = n1.relationships.create('Knows', n2)\r\n index = self.gdb.relationships.indexes.create('index_rel')\r\n with self.gdb.transaction():\r\n index.add('test1', 'test1', r)\r\n index['test2']['test2'] = r\r\n self.assertTrue(index['test1']['test1'][-1] == r)\r\n self.assertTrue(index['test2']['test2'][-1] == r)", "def reference_index(self, value):\n self.set_reference_index(value)", "def ref_addupdate(ref: Ref, idx: Tuple[int], x: Array) -> None:\n return addupdate_p.bind(ref, x, *idx)", "def store_index(self, index, doc_type, source_list, init_id):\n\n bulk_actions = []\n doc_id = init_id\n\n for source in source_list:\n data_body = ElasticSearchUtility.__index_data_body(index, doc_type, doc_id, source[\"_source\"])\n bulk_actions.append(data_body)\n doc_id += 1\n\n print 'inserting - ', len(bulk_actions)\n helpers.bulk(self.es, bulk_actions)", "def typesense_index_referral(ref, client=None):\n if not client:\n client = typesense_client()\n\n ref_document = {\n 'id': str(ref.pk),\n 'created': ref.created.timestamp(),\n 'type': ref.type.name,\n 'referring_org': ref.referring_org.name,\n 'regions': [i.name for i in ref.regions.all()],\n 'reference': ref.reference if ref.reference else '',\n 'description': ref.description if ref.description else '',\n 'address': ref.address if ref.address else '',\n 'lga': ref.lga.name if ref.lga else '',\n 'dop_triggers': [i.name for i in ref.dop_triggers.all()],\n }\n if ref.point:\n ref_document['point'] = [ref.point.x, ref.point.y]\n client.collections['referrals'].documents.upsert(ref_document)", "def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if `assignment` is complete (i.e., assigns a value to each crossword variable); return False otherwise.
def assignment_complete(self, assignment): # print("Entered assignment_complete Function") for var in assignment: if assignment[var] is None: return False return self.consistent(assignment) # raise NotImplementedError
[ "def assignment_complete(self, assignment):\n # for each variable in the crossword\n for variable in self.crossword.variables:\n # if the variable is not assigned a value\n if variable not in assignment:\n # the crossword is not complete\n return False\n return True", "def assignment_complete(self, assignment):\n for var in self.crossword.variables:\n if var in assignment:\n continue\n else:\n return False\n\n return True", "def assignment_complete(self, assignment):\n for v in self.crossword.variables:\n if v not in assignment.keys():\n return False\n if assignment[v] not in self.crossword.words:\n return False\n \n return True", "def assignment_complete(self, assignment):\n # for word in self.crossword.variables:\n # if word not in assignment.keys() and assignment[word] not in self.crossword.words:\n # return False\n # return True \n\n for variable in self.crossword.variables:\n if variable not in assignment.keys():\n return False\n if assignment[variable] not in self.crossword.words:\n return False\n return True", "def assignment_complete(self, assignment):\n for x in assignment:\n if assignment[x]==None:\n return False\n return True", "def is_complete(self, assignment):\n\n complete = True\n for i in assignment:\n if len(assignment[i]) != 1:\n return False\n return complete", "def is_complete(self, variables):\n for var in variables:\n if not self.has_assignment_for(var):\n return False\n\n return True", "def consistent(self, assignment):\n # print(\"Entered consistent Function\")\n # print(\"assignment\")\n # print(assignment)\n\n overlaps = self.crossword.overlaps\n value_set = set()\n for variable in assignment: \n #checking overlaps with neighbors\n neighbors = self.crossword.neighbors(variable)\n for neighbor in neighbors:\n overlap = overlaps[(variable, neighbor)]\n if (neighbor in assignment):\n # print(\"var 1 overlap letter\")\n # print(assignment[variable][overlap[0]])\n # print(\"var 2 overlap letter\")\n # print(assignment[neighbor][overlap[1]])\n if (assignment[variable][overlap[0]] is not assignment[neighbor][overlap[1]]):\n return False\n \n # print(\"neighbors\")\n # print(neighbors)\n\n #checking that the assignment is the correct length for the variable\n if (variable.length != len(assignment[variable])):\n return False\n\n #the set to check for distinct variables later\n value_set.add(assignment[variable])\n\n #Checking that all variables are distinct\n #these should be the same length unless two or more variables share an value\n if( len(value_set) is not len(assignment)): \n return False\n \n return True\n\n # raise NotImplementedError", "def consistent(self, assignment):\n #iterate through variables in assignment\n for var in assignment:\n #check all other values in assignment for duplication\n for var_2 in assignment:\n #don't check itself\n if var_2 == var:\n continue\n #if assignment is the same, not consistent, return false\n if assignment[var_2] == assignment[var]:\n return False\n #check that the variable's lenggth matches the chosen word\n if var.length != len(assignment[var]):\n return False\n #Check neighbors for conflict\n for neighbor in self.crossword.neighbors(var):\n #if the neighbor has not been assigned, skip\n if neighbor not in assignment.keys():\n continue\n #Get the overlap of var and neighbor\n overlap = self.crossword.overlaps[var, neighbor]\n #assign indices\n var_index = overlap[0]\n neighbor_index = overlap[1]\n #check for conflict at indices. If conflict, return false\n if assignment[var][var_index] != assignment[neighbor][neighbor_index]:\n return False\n\n #If we make it to the end, no issues, return true\n return True", "def consistent(self, assignment):\n # to discard case of any duplicate values\n if len(set(assignment.values()))<len(assignment):\n return False\n for var in assignment:\n for x in self.crossword.neighbors(var):\n o = self.crossword.overlaps.get((var,x),None)\n if o!=None and assignment[var][o[0]]!=assignment[x][o[1]]:\n return False\n\n return True", "def assignment_ok(assignment, total_decodings):\n for codeword, decode in assignment.iteritems():\n neighbor_decodes = set()\n empty_neighbor_ct = 0\n for neighbor in adjacents(codeword):\n if neighbor in assignment:\n if assignment[neighbor] != decode:\n neighbor_decodes.add(assignment[neighbor])\n else:\n empty_neighbor_ct += 1\n if len(neighbor_decodes) + empty_neighbor_ct < total_decodes:\n return False\n return True", "def consistent(self, assignment):\n\n # Checks to make sure each word is unique\n if len(assignment) != len(set(assignment.values())):\n return False\n\n # Checks to make sure every value is the correct length\n for variable in assignment:\n if variable.length != len(assignment[variable]):\n return False\n\n # Checks that overlaps match\n for variable2 in assignment:\n if variable != variable2 and self.crossword.overlaps[variable, variable2] is not None:\n if assignment[variable][self.crossword.overlaps[variable, variable2][0]] != assignment[variable2][self.crossword.overlaps[variable, variable2][1]]:\n return False\n return True", "def isAssignment(self):\n return _libsbml.Rule_isAssignment(self)", "def isAssigned(self):\n if self.getProton1Assignments() and self.getProton2Assignments():\n return 1\n else:\n return 0", "def is_assignment(*args):\n return _ida_hexrays.is_assignment(*args)", "def backtrack(self, assignment):\n\n # If the assignment is complete and it has a value for every variable in the crossword puzzle and every value is consistent\n if self.assignment_complete(assignment) and len(assignment) == len(self.crossword.variables) and self.consistent(assignment):\n return assignment\n \n # Select a variable that has not been assigned a value\n var = self.select_unassigned_variable(assignment)\n\n # Iterate through the ordered domain values of the variable to test each one\n for value in self.order_domain_values(var, assignment):\n\n # Join var mapped to its value and the assignment to see if it is consistent\n temp = {var:value}\n temp1 = assignment\n\n # If the value has not already been used and it is consistent with the rest of the assignment\n if value not in assignment.values() and self.consistent(merge(temp, temp1)):\n\n # Actually add the variable and its value to the assignment\n assignment[var] = value\n\n # Continue to backtrack until every variable has an assignment\n result = self.backtrack(assignment)\n\n # If the assignment is complete and if it is consistent, we have found the answer\n if self.assignment_complete(result):\n if self.consistent(result):\n return result\n\n # Otherwise, we picked a wrong value somewhere, so delete the latest value of the variable to try another\n del assignment[var]\n\n # If we can never find a result to return, the problem has no solution\n return None", "def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool:\n if node is assignment_node:\n return True\n if isinstance(assignment_node, (cst.Import, cst.ImportFrom)):\n aliases = assignment_node.names\n if isinstance(aliases, cst.ImportStar):\n return False\n for alias in aliases:\n if alias.name is node:\n return True\n asname = alias.asname\n if asname is not None:\n if asname.name is node:\n return True\n return False", "def has_assignment_for(self, var):\n return self.variable_to_value.get(var) != None", "def is_assignable(self):\n if self.assignment is not None:\n return False\n for node in self.connected_nodes:\n if node.assignment is not None:\n return True\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if `assignment` is consistent (i.e., words fit in crossword puzzle without conflicting characters); return False otherwise.
def consistent(self, assignment): # print("Entered consistent Function") # print("assignment") # print(assignment) overlaps = self.crossword.overlaps value_set = set() for variable in assignment: #checking overlaps with neighbors neighbors = self.crossword.neighbors(variable) for neighbor in neighbors: overlap = overlaps[(variable, neighbor)] if (neighbor in assignment): # print("var 1 overlap letter") # print(assignment[variable][overlap[0]]) # print("var 2 overlap letter") # print(assignment[neighbor][overlap[1]]) if (assignment[variable][overlap[0]] is not assignment[neighbor][overlap[1]]): return False # print("neighbors") # print(neighbors) #checking that the assignment is the correct length for the variable if (variable.length != len(assignment[variable])): return False #the set to check for distinct variables later value_set.add(assignment[variable]) #Checking that all variables are distinct #these should be the same length unless two or more variables share an value if( len(value_set) is not len(assignment)): return False return True # raise NotImplementedError
[ "def consistent(self, assignment):\n #iterate through variables in assignment\n for var in assignment:\n #check all other values in assignment for duplication\n for var_2 in assignment:\n #don't check itself\n if var_2 == var:\n continue\n #if assignment is the same, not consistent, return false\n if assignment[var_2] == assignment[var]:\n return False\n #check that the variable's lenggth matches the chosen word\n if var.length != len(assignment[var]):\n return False\n #Check neighbors for conflict\n for neighbor in self.crossword.neighbors(var):\n #if the neighbor has not been assigned, skip\n if neighbor not in assignment.keys():\n continue\n #Get the overlap of var and neighbor\n overlap = self.crossword.overlaps[var, neighbor]\n #assign indices\n var_index = overlap[0]\n neighbor_index = overlap[1]\n #check for conflict at indices. If conflict, return false\n if assignment[var][var_index] != assignment[neighbor][neighbor_index]:\n return False\n\n #If we make it to the end, no issues, return true\n return True", "def consistent(self, assignment):\n\n # Checks to make sure each word is unique\n if len(assignment) != len(set(assignment.values())):\n return False\n\n # Checks to make sure every value is the correct length\n for variable in assignment:\n if variable.length != len(assignment[variable]):\n return False\n\n # Checks that overlaps match\n for variable2 in assignment:\n if variable != variable2 and self.crossword.overlaps[variable, variable2] is not None:\n if assignment[variable][self.crossword.overlaps[variable, variable2][0]] != assignment[variable2][self.crossword.overlaps[variable, variable2][1]]:\n return False\n return True", "def consistent(self, assignment):\n # for each of the current assignments\n for word in assignment:\n # if the word does not fit in the gaps\n if len(assignment[word]) != word.length:\n # reject attempt\n return False\n # if the word is already in the assignment\n if list(assignment.values()).count(assignment[word]) > 1:\n # reject attempt\n return False\n # for each of the overlaps\n for overlap in self.crossword.overlaps:\n # if the overlap isn't empty and is an overlap for the word\n # overlaps are a superset: if the overlap of (x, y) is in the set, so is (y, x), so we can just go by the first overlap element\n if self.crossword.overlaps[overlap] is not None and overlap[0] == word:\n # try to access the word assignment for the other overlap target\n try:\n test_word = assignment[overlap[1]]\n # if it does not exist in the assignment\n except KeyError:\n # continue to the next overlap\n continue\n # if the other overlap target has been assigned\n else:\n # extract the letter we want to match for the overlap\n test_letter = test_word[self.crossword.overlaps[overlap][1]]\n # if the letters do not match\n if assignment[word][self.crossword.overlaps[overlap][0]] != test_letter:\n # reject attempt\n return False\n return True", "def consistent(self, assignment):\n # to discard case of any duplicate values\n if len(set(assignment.values()))<len(assignment):\n return False\n for var in assignment:\n for x in self.crossword.neighbors(var):\n o = self.crossword.overlaps.get((var,x),None)\n if o!=None and assignment[var][o[0]]!=assignment[x][o[1]]:\n return False\n\n return True", "def consistent(self, assignment):\n # consistent = True\n # # check unique values of assigment and add to dict\n # values_words = {}\n # for word in assignment.values():\n # values_words = word\n # # check one time uniqueness of word in \n # for value in assignment.values():\n # if value != values_words:\n # consistent = False\n # # check the lenth of the words\n # for key, value in assignment.items():\n # # print(key.length, [len(n) for n in value])\n # # iterate over every len(word) \n # for num in [len(n) for n in value]:\n # # check the lenght of word in assiment and word in the list\n # if num != key.length:\n # consistent = False\n # # no confilict with neightboring\n # # find all overlaps or not\n # values = self.crossword.overlaps #self\n # # select words that overplas (by not None value)\n # values_constrains = [i for i in values if values[i] is not None]\n # # set of value constrains\n # temp_set_1 = set()\n # for var, ind in values_constrains:\n # temp_set_1.add(ind)\n # temp_set_2 = set()\n # for pair in creator.crossword.variables:\n # temp_set_2.add(pair)\n # # check the is the two sets are equal\n # if temp_set_2 == temp_set_1:\n # consistent = False\n # return consistent\n\n for x in assignment:\n word1 = assignment[x]\n if x.length != len(word1):\n return False\n\n for y in assignment:\n word2 = assignment[y]\n if x != y:\n if word1 == word2:\n return False\n\n overlap = self.crossword.overlaps[x, y]\n if overlap:\n a, b = overlap\n if word1[a] != word2[b]:\n return False\n return True", "def assignment_complete(self, assignment):\n # for word in self.crossword.variables:\n # if word not in assignment.keys() and assignment[word] not in self.crossword.words:\n # return False\n # return True \n\n for variable in self.crossword.variables:\n if variable not in assignment.keys():\n return False\n if assignment[variable] not in self.crossword.words:\n return False\n return True", "def assignment_complete(self, assignment):\n for v in self.crossword.variables:\n if v not in assignment.keys():\n return False\n if assignment[v] not in self.crossword.words:\n return False\n \n return True", "def assignment_ok(assignment, total_decodings):\n for codeword, decode in assignment.iteritems():\n neighbor_decodes = set()\n empty_neighbor_ct = 0\n for neighbor in adjacents(codeword):\n if neighbor in assignment:\n if assignment[neighbor] != decode:\n neighbor_decodes.add(assignment[neighbor])\n else:\n empty_neighbor_ct += 1\n if len(neighbor_decodes) + empty_neighbor_ct < total_decodes:\n return False\n return True", "def assignment_complete(self, assignment):\n for var in self.crossword.variables:\n if var in assignment:\n continue\n else:\n return False\n\n return True", "def assignment_complete(self, assignment):\n # for each variable in the crossword\n for variable in self.crossword.variables:\n # if the variable is not assigned a value\n if variable not in assignment:\n # the crossword is not complete\n return False\n return True", "def isCorrect(self):\n req = Int2Bin(Bin2Int(self.a.assignment) * Bin2Int(self.b.assignment), self.res.width)[-self.res.width:]\n logger.debug(\"a: \"+str(self.a.assignment))\n logger.debug(\"b: \"+str(self.b.assignment))\n logger.debug(\"res: \"+str(self.res.assignment))\n logger.debug(\"req: \"+str(req))\n return req == self.res.assignment", "def is_complete(self, assignment):\n\n complete = True\n for i in assignment:\n if len(assignment[i]) != 1:\n return False\n return complete", "def assignment_complete(self, assignment):\n for x in assignment:\n if assignment[x]==None:\n return False\n return True", "def assignment_complete(self, assignment):\n # print(\"Entered assignment_complete Function\")\n for var in assignment:\n if assignment[var] is None:\n return False\n return self.consistent(assignment)\n\n # raise NotImplementedError", "def consistent(self, variable, assignment):\n for constraint in self.constraints[variable]:\n if not constraint.satisfied(assignment):\n return False\n return True", "def consistent(self,assignment):\n return all(con.holds(assignment)\n for con in self.constraints\n if all(v in assignment for v in con.scope))", "def are_compatible(assignments: t.Iterable[Assignment]) -> bool:\n groups: t.DefaultDict[int, t.Set[expressions.Expression]] = collections.defaultdict(\n set\n )\n for assignment in assignments:\n target = assignment.target\n if target in groups[assignment.index]:\n return False\n groups[assignment.index].add(target)\n return True", "def _check_if_middle_part_chars_equal(\n self, original_word: str, permuted_word: str\n ) -> bool:\n return sorted(original_word[1:-1]) == sorted(permuted_word[1:-1])", "def backtrack(self, assignment):\n\n # If the assignment is complete and it has a value for every variable in the crossword puzzle and every value is consistent\n if self.assignment_complete(assignment) and len(assignment) == len(self.crossword.variables) and self.consistent(assignment):\n return assignment\n \n # Select a variable that has not been assigned a value\n var = self.select_unassigned_variable(assignment)\n\n # Iterate through the ordered domain values of the variable to test each one\n for value in self.order_domain_values(var, assignment):\n\n # Join var mapped to its value and the assignment to see if it is consistent\n temp = {var:value}\n temp1 = assignment\n\n # If the value has not already been used and it is consistent with the rest of the assignment\n if value not in assignment.values() and self.consistent(merge(temp, temp1)):\n\n # Actually add the variable and its value to the assignment\n assignment[var] = value\n\n # Continue to backtrack until every variable has an assignment\n result = self.backtrack(assignment)\n\n # If the assignment is complete and if it is consistent, we have found the answer\n if self.assignment_complete(result):\n if self.consistent(result):\n return result\n\n # Otherwise, we picked a wrong value somewhere, so delete the latest value of the variable to try another\n del assignment[var]\n\n # If we can never find a result to return, the problem has no solution\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Using Backtracking Search, take as input a partial assignment for the crossword and return a complete assignment if possible to do so. `assignment` is a mapping from variables (keys) to words (values). If no assignment is possible, return None.
def backtrack(self, assignment): # print("Entered backtrack Function") # Check if assignment is complete if len(assignment) == len(self.domains): return assignment # Try a new variable var = self.select_unassigned_variable(assignment) word_list = self.order_domain_values(var, assignment) for word in word_list: new_assignment = assignment.copy() new_assignment[var] = word[0] if self.consistent(new_assignment): result = self.backtrack(new_assignment) if result is not None: return result return None # raise NotImplementedError
[ "def backtrack(self, assignment):\n # Checks if the assignment is complete\n if self.assignment_complete(assignment):\n return assignment\n\n # Select an unassigned variable\n var = self.select_unassigned_variable(assignment)\n\n # Loop through all the words in the domain of the variable selected\n for word in self.order_domain_values(var, assignment):\n\n # Create a copy of the assignment dictionary to avoid alterations\n new_assignment = assignment.copy()\n\n # Add the new assigment to the dictionary copied\n new_assignment[var] = word\n\n # If the assignment is consistent add the assignment to the,\n # original dictionary, inforce arcconsistency and calls backtrack\n if self.consistent(new_assignment):\n assignment[var] = word\n arcs = [(x, var) for x in self.crossword.neighbors(var)]\n self.ac3(arcs)\n result = self.backtrack(assignment)\n if result != None:\n return result\n\n # Delete the assignment if not consistent assignment\n delete = assignment.pop(var, None)\n\n return None", "def backtrack(self, assignment):\n # if the assignment is complete\n if self.assignment_complete(assignment):\n # return the assignment, crossword is complete\n return assignment\n # pick a variable to try to assign\n var = self.select_unassigned_variable(assignment)\n # for each value in the variable's domain\n for value in self.order_domain_values(var, assignment):\n # attempt to assign this value and fit it into the crossword\n # make a copy of the current assignments\n trial = assignment.copy()\n # add the trial value to the test assignment\n trial[var] = value\n # if the test assignment is consistent\n if self.consistent(trial):\n # add the trial assignment to the current list of assignments\n assignment[var] = value\n # take the next backtrack step with this new assign,ent\n result = self.backtrack(assignment)\n # if the backtrack is a success\n if result is not None:\n # we have a match\n return result\n # a backtrack further down failed, so remove the trial assignment\n assignment.pop(var)\n # no assignment was possible, return None\n return None", "def backtrack(self, assignment):\n\n # If the assignment is complete and it has a value for every variable in the crossword puzzle and every value is consistent\n if self.assignment_complete(assignment) and len(assignment) == len(self.crossword.variables) and self.consistent(assignment):\n return assignment\n \n # Select a variable that has not been assigned a value\n var = self.select_unassigned_variable(assignment)\n\n # Iterate through the ordered domain values of the variable to test each one\n for value in self.order_domain_values(var, assignment):\n\n # Join var mapped to its value and the assignment to see if it is consistent\n temp = {var:value}\n temp1 = assignment\n\n # If the value has not already been used and it is consistent with the rest of the assignment\n if value not in assignment.values() and self.consistent(merge(temp, temp1)):\n\n # Actually add the variable and its value to the assignment\n assignment[var] = value\n\n # Continue to backtrack until every variable has an assignment\n result = self.backtrack(assignment)\n\n # If the assignment is complete and if it is consistent, we have found the answer\n if self.assignment_complete(result):\n if self.consistent(result):\n return result\n\n # Otherwise, we picked a wrong value somewhere, so delete the latest value of the variable to try another\n del assignment[var]\n\n # If we can never find a result to return, the problem has no solution\n return None", "def backtrack(self, assignment):\n if self.assignment_complete(assignment):\n return assignment\n var = self.select_unassigned_variable(assignment)\n for val in self.order_domain_values(var, assignment):\n new_assigment = assignment.copy()\n new_assigment[var] = val\n if self.consistent(new_assigment):\n result = self.backtrack(new_assigment)\n if result:\n return result\n return None", "def backtrack(self, assignment):\n # As stated above, if all variables in assignment is 1\n # then all values have been set and we return assignment \n if all(len(l) == 1 for l in assignment.values()):\n return assignment\n\n # Pick the next unnassigned variable that we are going to check \n key, values = self.select_unassigned_variable(assignment)\n # Loop through all the allowed values of this square in the sudoku board\n for value in values:\n # Do a deepcopy cuz otherwise R.I.P\n deep = copy.deepcopy(assignment)\n # Checks if this current value is consistent with the rest\n # of the sudoku board \n if self.check_consistency(deep, key, value):\n # IF it is consistent then we set this square to have this value \n deep[key] = [value]\n # Do inference check for hyper optimized code\n if self.inference(deep, self.get_all_arcs()):\n self.counter += 1\n result = self.backtrack(deep)\n if result is not False:\n return result\n else:\n self.fails += 1\n else:\n # Continue looping through the values of the currently selected \n # sudoku-square if the value was inconsistent with the board \n continue\n return False", "def backtrack(self, assignment):\n\n # backtrack has been called, increase global counter by 1\n self.backtrack_count += 1\n\n # if assignment is complete then return assignment\n values = list(filter(lambda node: len(node[1]) > 1, assignment.items()))\n if len(values) == 0:\n return assignment\n\n # define the next values we are going to check (var is actually the key pointing to desired value)\n var = self.select_unassigned_variable(assignment)\n\n # loop over domain of var.\n for value in assignment[var]:\n # we must make a copy here before we do anything\n copy_assignment = copy.deepcopy(assignment)\n\n copy_assignment[var] = [value]\n\n # if changed value inference over all neighbours removing values from domain\n # returns false if an inconsistency is found and true otherwise\n inference = self.inference(copy_assignment, self.get_all_neighboring_arcs(var))\n\n if inference is True:\n # we found no inconsistencies, we can call backtrack with the updated domain\n # backtrack returns true\n result = self.backtrack(copy_assignment)\n\n # Return true if assignment complete\n if result:\n return result\n else:\n # we tried a path that resulted in an inconsistency, fail\n self.fail_count += 1\n\n # No solution found\n return False", "def select_unassigned_variable(self, assignment):\n var_list= []\n #add unassigned variabled to a list along with the number of words left in its domain\n for var in self.domains:\n if var not in assignment:\n var_list.append((var, len(self.domains[var])))\n #sort this list by the number of words left in its domain\n var_list.sort(key= lambda x:x[1])\n\n #list for variables that are tied for least words left in domain\n equal_vars= [list(var_list[0])]\n for i in range(len(var_list)):\n #adds variables with same number of words left in domain\n if var_list[0][1] == var_list[i][1] and var_list[i] != var_list[0]:\n equal_vars.append(list(var_list[i]))\n\n \n #change the encoded information for words left in domain to the number of neighbors the variable had (highest degree)\n for i in range(len(equal_vars)):\n equal_vars[i][1]= len(self.crossword.neighbors(equal_vars[i][0]))\n\n #sort the list by the highest degree\n equal_vars.sort(key= lambda x:x[1])\n \n #return var with highest degree\n return equal_vars[0][0]", "def select_unassigned_variable(self, assignment):\n \n #list of frequency of neighbours\n neigh_freq = []\n for comp, overlay in self.crossword.overlaps.items():\n if overlay != None:\n neigh_freq.append([comp[0]]) #no need to double count these\n\n #create a list of tuple with the properties of each variable in the domain.\n #tuple: (variable, remaining values in its domain, number of neighbours/degree)\n ranking = []\n for v, value in self.domains.items():\n if v not in assignment: # or do they mean that v exists in assignment, but doesnt have a value?\n ranking.append(tuple((v , len(value), neigh_freq.count([v]))))\n ranking = sorted(ranking, key=lambda x: (x[1], -x[2])) #the \"-\"\" for the sorting criteria under lambda x makes reverse sort for that criteria.\n\n return ranking[0][0] #the first variable should have the smallest number of values and the highest number of neighbours", "def select_unassigned_variable(self, assignment):\n list_of_variables = []\n for var in self.crossword.variables:\n if var not in assignment:\n list_of_variables.append([var, len(self.domains[var]), len(self.crossword.neighbors(var))])\n\n if list_of_variables:\n list_of_variables.sort(key=lambda x: (x[1], -x[2]))\n return list_of_variables[0][0]\n return None", "def satisfying_assignment(formula, assignment=None):\n if not assignment:\n assignment = {}\n\n # loops through formula looking for unit clauses and propagates their consequences through the \n # formula until aren't anymore unit clauses in the formula\n i = 0\n while i < len(formula):\n if len(formula[i]) == 1:\n literal = formula[i][0]\n assignment[literal[0]] = literal[1]\n formula = update_formula(formula, { literal[0]: literal[1] })\n if formula == True:\n return assignment\n elif formula == False:\n return None\n i = 0\n else:\n i += 1\n\n # update formula with current assignment's\n formula = update_formula(formula, assignment)\n if formula == True: \n # if the formula has been simplfied to True, then return the assignment that got it there\n return assignment\n elif formula == False:\n # if the formula has been simplified to False, then return None since no assignment exists that can make it True\n return None\n\n # grab the first variable in the formula\n variable = formula[0][0][0]\n # try assigning it to be True\n option_1 = { key:value for (key,value) in assignment.items() }\n option_1[variable] = True\n # see if there is a set of assignments for option_1\n search_1 = satisfying_assignment(formula, option_1)\n if search_1:\n return search_1\n\n # try assigning it to be False\n option_2 = { key:value for (key,value) in assignment.items() }\n option_2[variable] = False\n # see if there is a set of assignments for option_2\n search_2 = satisfying_assignment(formula, option_2)\n if search_2:\n return search_2", "def backtrack(self, assignment):\n\n # Add 1 to the backtrack_counter every time the function backtrack is called\n self.backtrack_counter += 1\n\n # If the assignment is complete, return the assignment\n if self.is_complete(assignment):\n return assignment\n\n # If not complete, select the next unassigned variable\n variable = self.select_unassigned_variable(assignment)\n\n # For every value in the domain of the unassigned variable, run the AC-3 algorithm\n for value in self.order_domain_values(variable, assignment):\n # Make sure every iteration of the foor-loop has a clean slate\n assignment_copy = copy.deepcopy(assignment)\n\n # Check if the value is consistent with assignment (not necessary, value is taken from assignment)\n if value in assignment_copy[variable]:\n # add {var = value} to assignment\n assignment_copy[variable] = [value]\n\n # Run the AC-3 algorithm to check if the variable is arc-consistent\n inferences = self.inference(assignment_copy,self.get_all_neighboring_arcs(variable))\n\n # If arc-consistent, i.e. no inconsistency is found, call backtrack recursively\n if inferences:\n result = self.backtrack(assignment_copy)\n if result:\n return result\n\n # Count every time the function fails\n self.fail_counter += 1\n return False", "def assignment_complete(self, assignment):\n # for word in self.crossword.variables:\n # if word not in assignment.keys() and assignment[word] not in self.crossword.words:\n # return False\n # return True \n\n for variable in self.crossword.variables:\n if variable not in assignment.keys():\n return False\n if assignment[variable] not in self.crossword.words:\n return False\n return True", "def consistent(self, assignment):\n # for each of the current assignments\n for word in assignment:\n # if the word does not fit in the gaps\n if len(assignment[word]) != word.length:\n # reject attempt\n return False\n # if the word is already in the assignment\n if list(assignment.values()).count(assignment[word]) > 1:\n # reject attempt\n return False\n # for each of the overlaps\n for overlap in self.crossword.overlaps:\n # if the overlap isn't empty and is an overlap for the word\n # overlaps are a superset: if the overlap of (x, y) is in the set, so is (y, x), so we can just go by the first overlap element\n if self.crossword.overlaps[overlap] is not None and overlap[0] == word:\n # try to access the word assignment for the other overlap target\n try:\n test_word = assignment[overlap[1]]\n # if it does not exist in the assignment\n except KeyError:\n # continue to the next overlap\n continue\n # if the other overlap target has been assigned\n else:\n # extract the letter we want to match for the overlap\n test_letter = test_word[self.crossword.overlaps[overlap][1]]\n # if the letters do not match\n if assignment[word][self.crossword.overlaps[overlap][0]] != test_letter:\n # reject attempt\n return False\n return True", "def select_unassigned_variable(self, assignment):\n\n # filter out every key, value mapping where length of value is only 1\n # (only 1 legal value in domain mapped by key)\n assignable_pairs = dict(filter(lambda pair: len(pair[1]) > 1, assignment.items()))\n # map the lists in values to the length of the lists\n length_of_values = list(map(lambda values: len(values), assignable_pairs.values()))\n # the lowest length, the key who's value has this length is the most suitable to check next (should be returned)\n lowest_length = min(length_of_values)\n\n # check all unassigned variables, the case length = 1 is already filtered out\n for key, value in assignable_pairs.items():\n if len(value) == lowest_length:\n return key\n\n \"\"\"\n # alternative assignment of variable, just returns any valid unassigned variable:\n for key, value in assignment.items():\n if len(value) > 1:\n return key\n \"\"\"", "def backtrack(assignment, variables, domains, constrains, inf_type=None):\n # If assignment is complete\n if len(assignment) == len(variables):\n return assignment\n # Select unassigned variable\n var = select_unassigned_variable(variables, domains, constrains, assignment)\n for value in order_domain_values(var, assignment, variables, domains, constrains):\n inference_assignments = None\n # If value is consistent with assignment\n if is_consistent(var, value, assignment, constrains):\n # Add new assignment\n assignment[var] = value\n # Make a copy of domains, in case the branch is backtracked\n _domain = domains.copy()\n # Perform inference (Forward Checking or MAC)\n inference_assignments = inference(var, value, variables, _domain, constrains, assignment, inf_type = inf_type)\n if inference_assignments != \"Failure\":\n # Add inference to assignment\n for v in inference_assignments:\n assignment[v] = inference_assignments[v]\n # Recursive call for next layer in the backtrack search tree\n result = backtrack(assignment, variables, domains, constrains)\n if result != \"Failure\":\n return result\n return \"Failure\"", "def satisfying_assignment(formula):\n if len(formula) == 0:\n return {}\n solution = find_solution(formula)\n if solution != {}:\n return solution\n return None", "def satisfying_assignment(formula):\n # convert the formula to a list of sets.\n formula = [set(i) for i in formula]\n\n # call the helper starting with the givne formula and an empty assignments\n # dictionary.\n result = sat_helper(formula, {})\n if result[0]:\n return result[1] # result[1] will be the dictionary of assignments.\n else:\n return None", "def select_unassigned_variable(self, assignment):\n #Start by making counters for choices and nodes\n fewest_choices = None\n most_arcs = 0\n #Go through all variables\n for var in self.domains.keys():\n #If variable is already assigned, skip. otherwise carry on\n if var in assignment.keys() and assignment[var] != None:\n continue\n #get values for how many choices and how many connected nodes\n #for this particular variable\n choices = len(self.domains[var])\n arcs = len(self.crossword.neighbors(var))\n #Check if this node has fewer choices than the current fewest\n if fewest_choices == None or fewest_choices > choices:\n fewest_choices = choices\n most_arcs = arcs\n variable = var\n\n #if number of choices is equal, compare arcs\n elif fewest_choices == choices:\n if most_arcs < arcs:\n most_arcs = arcs\n variable = var\n\n #After checking all unassigned variables, return the current best variable\n return variable", "def select_unassigned_variable(self, assignment):\n # Simply just pick the next value that has more than one value\n # in the variable list\n for key, value in assignment.iteritems():\n if len(value) > 1:\n return key, value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draws text onto a given surface.
def draw_text(self, text, font, color, surface, x, y): #use for narrative in end sequence text_obj = font.render(text, True, color) text_rect = text_obj.get_rect() text_rect.center = (x, y) surface.blit(text_obj, text_rect)
[ "def render_text_on_surface(text, surface, font, color=BLACK, top_padding=0, left_pading=0):\n rect = surface.get_rect()\n \n last_top = rect.top + top_padding\n for index, line in enumerate(text.split(\"\\n\")):\n text_surf = font.render(line, True, color)\n text_rect = text_surf.get_rect()\n text_rect.topleft = (rect.left + left_pading, last_top)\n surface.blit(text_surf, text_rect)\n \n last_top += text_rect.h", "def draw_text(surf, text, fontsize, x, y, font=FONT):\n font = pygame.font.SysFont(font, fontsize)\n textobj = font.render(text, 1, WHITE)\n textrect = textobj.get_rect()\n textrect.topleft = (x, y)\n surf.blit(textobj, textrect)", "def draw_texts(self, text, size, x, y):\n font = pygame.font.Font(self.font_type, size)\n text_surf = font.render(text, True, self.green)\n text_rect = text_surf.get_rect()\n text_rect.center = (x, y)\n self.screen.blit(text_surf, text_rect)", "def render_surface(self, text: str) -> pygame.Surface:\n text_lines = text.split('`')\n\n text_surfaces = []\n\n for line in text_lines:\n text_surfaces.append(self.font.render(line, True, self.text_color))\n\n total_height = 0\n max_width = 0\n\n for ts in text_surfaces:\n size = ts.get_rect()\n\n total_height += size.height\n\n if size.width > max_width:\n max_width = size.width\n\n total_height += (len(text_surfaces) - 1) * self.line_spacing\n\n text_surface = pygame.Surface((max_width, total_height))\n\n y = 0\n\n for ts in text_surfaces:\n line_rect = ts.get_rect()\n\n line_rect.center = (max_width // 2, line_rect.height // 2)\n\n text_surface.blit(ts, ((max_width - line_rect.width) // 2, y))\n\n y = y + line_rect.height + self.line_spacing\n\n return text_surface", "def draw(self, text, color, cord):\n self.txt_obj = self.font_obj.render(text, True, color)\n self.rect = self.txt_obj.get_rect()\n self.rect.x = cord[0]\n self.rect.y = cord[1]\n self.surface.blit(self.txt_obj, self.rect)", "def draw_text(self, display, text, size, x, y , mode):\n font = pygame.font.Font(self.font_name, size)\n text_surface = font.render(text, True, (0,0,0))\n text_rect = text_surface.get_rect()\n if mode == \"left\":\n text_rect.topleft = (x,y)\n elif mode == \"center\":\n text_rect.center = (x,y)\n display.blit(text_surface, text_rect)", "def draw_text(self, surface, text, color, rect, font, align_top=True):\n if isinstance(text, str):\n text = text.decode('utf-8')\n rect = pygame.Rect(rect)\n line_height = font.get_linesize()\n y = rect.top\n msgs = self.wrap_text(text, rect.width, font)\n max_msgs = rect.height/line_height\n\n if align_top:\n msgs = msgs[:max_msgs]\n elif len(msgs) >= max_msgs:\n msgs = msgs[len(msgs)-max_msgs:]\n\n for s in msgs:\n image = font.render(s, True, color)\n surface.blit(image, (rect.left, y))\n y += line_height\n\n return text", "def text_draw(self, x, y, text, style={}):", "def draw_text(self, i, j, text, col, bg=None):\n txt = self.font.render(text, True, col, bg)\n rect = txt.get_rect()\n rect.center = self.get_rect(i, j).center\n self.screen.blit(txt, rect)", "def render(self, surface):\n GameEntity.renderText(self, surface)", "async def outline_text(draw_surface, coords, draw_text, font):\n draw = partial(draw_surface.text, text=draw_text, font=font,\n fill=\"black\")\n for offset_pair in product(range(-1, 2), repeat=2):\n draw((coords[0]+offset_pair[0], coords[1]+offset_pair[1]))\n draw(coords, fill=\"white\")", "def showText(self, surface, point, text, color=None, size=20):\n if not color: color = self.color\n v = self / 2\n point = v(point)\n surface.print(text, tuple(point), color=color, size=size)", "def draw_text(SCREEN, text, x, y):\n text = constants.CALIBRI_25.render(text, True, constants.BLACK)\n SCREEN.blit(text, (x, y))", "def draw_text(text: str, surface: Surface, rect: Rect, font: Font, color: Color, line_spacing: int = -2, center: bool = True) -> list:\n\n font_height = font.size(\"Tg\")[1]\n if not isinstance(text, list):\n text = wrap_text(text, font, rect.width)\n\n printable_lines = 1\n for i in range(1, len(text)):\n if ((font_height + line_spacing) * (i + 1)) <= rect.height:\n printable_lines += 1\n\n y = rect.top\n if center:\n y = (rect.height / 2) - (((font_height + line_spacing) * printable_lines) / 2)\n\n for line in text[:printable_lines]:\n # render the line\n image = font.render(line, True, color)\n\n x = rect.left\n if center:\n x = (rect.width / 2) - (image.get_width() / 2)\n\n # blit the line\n surface.blit(image, (x, y))\n y += font_height + line_spacing\n\n return text[printable_lines:]", "def display(self,surface):\n\n if self._shown:\n \"\"\"the 0,0 values mean the upper left corner of the surface\"\"\"\n self._image.display(0,0,self._surf)\n self._surf.blit(self._textsurf, self._textrect)\n surface.blit(self._surf, self._rect)\n self.displaySubwidgets(surface)", "def text(x: int, y: int, size: int = 1, text_str: str = \"\", color: str = \"\"):\n renderer.draw_string_2d(x, y, size, size, text_str, get_color(color))", "def draw(self, text=None):\n self.screen.fill((255, 255, 255))\n current = 0\n for s in self.grid.squares:\n current = current + 1\n if s.pressed == False:\n pygame.draw.rect(self.screen, s.color, s.rect)\n render = self.font.render(str(current), 0, (0, 0, 0))\n else:\n pygame.draw.rect(self.screen, (0, 0, 0), s.rect)\n render = self.font.render(str(current), 0, (255, 255, 255))\n self.screen.blit(render, (s.rect.centerx - 50, s.rect.centery - 50))\n\n if text:\n self.render_overlay(text)\n pygame.display.flip()", "def display_text(self):\n w, h = self.title_surface.get_size()\n text_x = (self.display_width - w)//2\n text_y = self.display_height//20\n self.display_surface.blit(self.title_surface, (text_x, text_y - 10))\n if self.instruction_active:\n temp_w, temp_h = self.instruction_surface.get_size()\n temp_x = (self.display_width - temp_w)//2\n temp_y = int(self.die_y - temp_h * 1.15)\n self.display_surface.blit(self.instruction_surface, (temp_x, temp_y + 250))", "def render_text_surfaces(self):\n self.images = [] # The text surfaces.\n line_width = 0\n line = []\n space_width = self.font.size(' ')[0]\n\n # Put the words one after the other into a list if they still\n # fit on the same line, otherwise render the line and append\n # the resulting surface to the self.images list.\n for word in self.text:\n line_width += self.font.size(word)[0] + space_width\n # Render a line if the line width is greater than the rect width.\n if line_width > self.rect.w:\n surf = self.font.render(' '.join(line), True, self.text_color)\n self.images.append(surf)\n line = []\n line_width = self.font.size(word)[0] + space_width\n\n line.append(word)\n\n # Need to render the last line as well.\n surf = self.font.render(' '.join(line), True, self.text_color)\n self.images.append(surf)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split the file and save chunks to separate files
def split(self): print 'Splitting file', self.__filename print 'Number of chunks', self.__numchunks, '\n' try: f = open(self.__filename, 'rb') except (OSError, IOError), e: raise FileSplitterException, str(e) bname = (os.path.split(self.__filename))[1] # Get the file size fsize = os.path.getsize(self.__filename) # Get size of each chunk self.__chunksize = int(float(fsize)/float(self.__numchunks)) chunksz = self.__chunksize total_bytes = 0 for x in range(self.__numchunks): chunkfilename = bname + '-' + str(x+1) + self.__postfix # if reading the last section, calculate correct # chunk size. if x == self.__numchunks - 1: chunksz = fsize - total_bytes try: print 'Writing file',chunkfilename data = f.read(chunksz) total_bytes += len(data) chunkf = file(chunkfilename, 'wb') chunkf.write(data) chunkf.close() except (OSError, IOError), e: print e continue except EOFError, e: print e break print 'Done.'
[ "def split_file(self, number_of_splits):\n file_size = os.path.getsize(self.input_file_path)\n unit_size = file_size / number_of_splits + 1\n original_file = open(self.input_file_path, \"r\")\n file_content = original_file.read()\n original_file.close()\n (index, current_split_index) = (1, 1)\n current_split_unit = self.initiate_file_split(current_split_index, index)\n for character in file_content:\n current_split_unit.write(character)\n if self.is_on_split_position(character, index, unit_size, current_split_index):\n current_split_unit.close()\n current_split_index += 1\n current_split_unit = self.initiate_file_split(current_split_index, index)\n index += 1\n current_split_unit.close()", "def split_file(self, input_file):\r\n file_list = [] \r\n with open(input_file, 'r', encoding='GB18030', errors='ignore') as f_in:\r\n data = f_in.readlines()\r\n lines_num = len(data)\r\n size = lines_num // self.num_workers # lines splitted in a chunk\r\n start = 0\r\n end = size\r\n w_path = \"../data/\"\r\n for i in range(lines_num//size):\r\n chunk_name = \"chunk_\" + str(i) + \".dat\"\r\n with open(w_path + chunk_name, 'w', encoding='utf-8') as f_out:\r\n f_out.write(''.join(data[start:end]))\r\n start = start + size\r\n end = end + size\r\n file_list.append(\"../data/chunk_\" + str(i) + \".dat\")\r\n \r\n print(f\"File splitted into {self.num_workers} chunks.\")\r\n return file_list, size", "def write_chunks(self, file):\n\n for c in self.chunks:\n\n self.chunk(file, c[0], c[1])", "def write_chunks(file, chunks):\n\n\tfor c in chunks:\n\n\t\tchunk(file, c[0], c[1])", "def split_file(filename, split_num):\n root, ext = os.path.splitext(filename)\n with open(filename) as f:\n lines = f.readlines()\n total_line = len(lines)\n\n print lines[0].split('\\t')\n\n size = total_line / split_num\n\n print 'Total line: %d, splited file line number: %d' % (total_line, size)\n\n total_line - size * split_num\n for i in range(0, split_num):\n split_file = root + '_' + str(i+1) + ext\n\n start = i * size;\n end = (i+1) * size;\n if i == split_num - 1:\n end = total_line\n\n print 'splite file %s: line from %d to %d' % (split_file, start, end)\n\n with open(split_file, 'w') as fw:\n for j in range(start, end):\n fw.write('%s' % lines[j])", "def split_file_output(name, data, out_dir, max_lines=1100,\n buffering=FILE_BUFFER_SIZE):\n batches = i_batch(max_lines, data)\n\n if is_py3():\n join_str = b''\n else:\n join_str = ''\n\n index = count()\n for group in batches:\n file_path = os.path.join(out_dir,\n \"{0}_{1}\".format(next(index), name))\n with open(file_path, 'wb', buffering=buffering) as shard_file:\n shard_file.write(join_str.join(group))", "def split_single_file(self, filename):\n file_size = os.path.getsize(filename)\n chunk_size = (file_size + self.worker_num - 1) / self.worker_num\n file_handler = open(filename, \"r\")\n chunks = []\n pos = 0\n while pos < file_size:\n next_pos = min(pos + chunk_size, file_size)\n if pos == 0:\n chunks.append((filename, pos, self.find_next_newline(file_handler, next_pos)))\n else:\n chunks.append((filename, self.find_next_newline(file_handler, pos), self.find_next_newline(file_handler, next_pos)))\n pos = next_pos\n file_handler.close()\n return chunks", "def combine(self):\n\n import re\n \n print 'Creating file', self.__filename\n \n bname = (os.path.split(self.__filename))[1]\n bname2 = bname\n \n # bugfix: if file contains characters like +,.,[]\n # properly escape them, otherwise re will fail to match.\n for a, b in zip(['+', '.', '[', ']','$', '(', ')'],\n ['\\+','\\.','\\[','\\]','\\$', '\\(', '\\)']):\n bname2 = bname2.replace(a, b)\n \n chunkre = re.compile(bname2 + '-' + '[0-9]+')\n \n chunkfiles = []\n for f in os.listdir(\".\"):\n print f\n if chunkre.match(f):\n chunkfiles.append(f)\n\n\n print 'Number of chunks', len(chunkfiles), '\\n'\n chunkfiles.sort(self.sort_index)\n\n data=''\n for f in chunkfiles:\n\n try:\n print 'Appending chunk', os.path.join(\".\", f)\n data += open(f, 'rb').read()\n except (OSError, IOError, EOFError), e:\n print e\n continue\n\n try:\n f = open(bname, 'wb')\n f.write(data)\n f.close()\n except (OSError, IOError, EOFError), e:\n raise FileSplitterException, str(e)\n\n print 'Wrote file', bname", "def split(self):\n if(self.back == 'y'):\n files = open(self.file_path,'r').read().split('Splitting Text')\n names = [self.file_path + str(num) for num in range(len(files))]\n for num,file in enumerate(files):\n open(names[num],'w').write(file)\n self.file_count += 1\n backNames = [self.file_path + str(num) + 'b' for num in range(len(files))]\n for num,file in enumerate(files):\n open(backNames[num],'w').write(file)\n else:\n files = open(self.file_path,'r').read().split('Splitting Text')\n names = [self.file_path + str(num) for num in range(len(files))]\n for num,file in enumerate(files):\n open(names[num],'w').write(file)\n self.file_count += 1", "def write_chunks(file_path, chunk_iterator):\n with open(file_path, 'wb') as f:\n for chunk in chunk_iterator:\n f.write(chunk.data)", "def split_audio(path_to_file, output_dir, chunk_size_in_bytes=10 * 1024 * 1024):\n try:\n os.makedirs(output_dir, exist_ok=True)\n title = Path(path_to_file).stem\n audio = AudioSegment.from_file(path_to_file, \"mp3\")\n except Exception as e:\n logger.error(f\"An error occurred while reading the audio file: {e}\")\n return\n\n # Estimate the average bitrate of the audio file\n file_size_in_bytes = os.path.getsize(path_to_file)\n duration_in_milliseconds = len(audio)\n estimated_bitrate = 8 * file_size_in_bytes / duration_in_milliseconds\n\n # Calculate the approximate duration of each chunk\n chunk_duration_in_milliseconds = 8 * chunk_size_in_bytes / estimated_bitrate\n\n num_chunks = math.ceil(duration_in_milliseconds / chunk_duration_in_milliseconds)\n logger.info(f\"Splitting {title} into {num_chunks} segments.\")\n\n for i in tqdm(range(num_chunks)):\n start = int(i * chunk_duration_in_milliseconds)\n end = int((i + 1) * chunk_duration_in_milliseconds)\n segment = audio[start:end]\n segment.export(\n Path(output_dir) / title / f\"{title}_prepared_{i}.mp3\", format=\"mp3\"\n )\n\n # return the path to the first chunk\n return Path(output_dir) / title / f\"{title}_prepared_0.mp3\"", "def splits():\n\n print(\"\\n ----------Splitting compressed file to many files.....\")\n\n # No. of files = no.of distinct crimes = length of ls_type list\n save_path = 'split_files/' ## creating split files in another folder to avoid clutter\n ls_fname = [os.path.join(save_path)+str(i)+'.csv' for i in range(0,len_ls_type)]\n\n # Create file handlers to all files and keep them open\n ls_fhandler = [open(fname,'a') for fname in ls_fname ]\n\n count = 0\n\n # iterate through compressed crimes and seperate lines into individual files based on crime type\n with open('crimes_compressed.csv') as fh:\n for line in fh:\n line = line.replace('\\n','') ## replace new line char with empty\n ls_line = line.split(',') ## split line into a list\n fh_index = int(ls_line[2]) ## since same encoding rule is enforced.\n print(line,file = ls_fhandler[fh_index]) ## write line to designated file\n count += 1\n print(\"\\r Finished: \".format(count)+str(count),end = '')\n\n #close all open file handlers\n for fh in ls_fhandler:\n fh.close()\n\n print(\"\\n ----------Splitting finished. {0} seperate files created\".format(len(ls_fname)))", "def gen_file_chunks(self, file_pointer, chunk_size):\n chunks = self.gen_chunks(file_pointer, chunk_size)\n for chunk in chunks:\n subsets = tuple(self.gen_slices(chunk))\n yield self.Chunk(chunk, subsets)", "def splitter(file_name: str, MAX_SIZE: int = 7):\n\n # convertion to MB\n MAX_SIZE = MAX_SIZE * 1024 * 1024\n\n # index go throught the bit stream\n start_index: int = 0\n\n # harvested data\n data: bytes = None\n\n created_files: int = 0\n\n with open(file_name, \"rb\") as input_stream:\n # while we didn't go out the file\n while data != b'':\n # we place the cursor at start index\n input_stream.seek(start_index)\n # read a chunk of size MAX_SIZE bytes\n data = input_stream.read(MAX_SIZE)\n\n if data == b'':\n break\n # then we open an output file\n with open(str(start_index) + \"_\" + file_name, \"wb\") as ouput_stream:\n # A write the related chunk in it\n ouput_stream.write(data)\n\n created_files += 1\n\n # we translate the cursor\n start_index += MAX_SIZE\n\n print(\"Done! \", created_files, \" files created\")", "def initiate_file_split(self, split_index, index):\n file_split = open(settings.get_input_split_file(split_index-1), \"w+\")\n file_split.write(str(index) + \"\\n\")\n return file_split", "def split_file(self):\n # process lines into blocks with Parser until EOF triggers StopIteration\n while self.maf_lines:\n try:\n # rest counters and open new file at the top of the loop AFTER\n # the most recent yield\n if self._stop:\n self._yield(new_file=True)\n # try to get next block from Parser and write to current file\n block_string = self.parser.get_block(self.maf_lines).next()\n self.current_file.write(block_string)\n # update char count for the current file\n self.char_count += len(block_string)\n # if char count crosses limit, yield current file name start new file\n if self._stop:\n yield self.current_filename\n\n except StopIteration:\n self._yield(new_file=False)\n yield self.current_filename", "def split_file(self, input_file, buffer=1024) -> str:\n file_size = os.stat(input_file).st_size\n with create_pg(total=file_size, leave=False, unit='B', unit_scale=True, unit_divisor=1024,\n desc='Splitting file') as t:\n\n with open(input_file, 'rb') as src:\n while True:\n with tempfile.NamedTemporaryFile() as f:\n with open(f.name, 'wb') as dest:\n written = 0\n while written < self.max_size:\n data = src.read(buffer)\n if data:\n dest.write(data)\n written += buffer\n t.update(len(data))\n else:\n if written == 0:\n return # file has ended on split size - don't yield\n\n break\n\n yield f.name", "def split_decode_file():\n # split files by chromosome\n header = []\n current_chrom = 'chr1'\n # file_template = decode_folder + '/{}.deCODE_2019.GRCh38.txt'\n file_template = decode_folder + '/{}.deCODE_2019_hg19.txt'\n decode_file = decode_folder + '/aau1043_DataS3_hg19_liftOver.bed'\n w = open(file_template.format(current_chrom), 'a')\n print('NOTE: appending to map files, not overwriting. may cause duplicates')\n with open(decode_file, 'r') as f:\n for line in f:\n # save the header info\n if line.startswith('#'):\n header.append(line)\n # save the column labels\n elif line.startswith('Chr'):\n header.append('# ' + line)\n # write header to first file now\n w.write(''.join(header))\n # the remaining lines are data\n else:\n # get the chromosome for the current line\n ch = line.split()[0]\n # if the chromosome matches the open file, write to it\n if ch == current_chrom:\n w.write(line)\n # if a new chromosome arises, switch to a new writefile\n else:\n w.close()\n current_chrom = ch\n w = open(file_template.format(current_chrom), 'a')\n # write header to file\n w.write(''.join(header))\n w.write(line)\n\n # close the last open file\n w.close()", "def split_sd_file(input_filepath, filename, num_in_file):\n logger = logging.getLogger(__name__)\n number = 0\n file_count = 0\n prefix, ext = os.path.splitext(filename)\n prefix, ext2 = os.path.splitext(prefix)\n with gzip.open(os.path.join(input_filepath, filename)) as gzinfile:\n lines = []\n for line in gzinfile:\n lines.append(line)\n if line == b'$$$$\\r\\n':\n number += 1\n if number == num_in_file:\n with gzip.open(os.path.join(input_filepath, prefix + \".\" + str(file_count) + ext2 + ext), \"wb\") as fi:\n logger.info(f\"Gzipping fragment {file_count}\")\n fi.writelines(lines)\n lines = []\n number = 0\n file_count += 1\n # Now write out the final leftovers\n with gzip.open(os.path.join(input_filepath, prefix + \".\" + str(file_count) + ext2 + ext), \"wb\") as fi:\n logger.info(f\"Gzipping fragment {file_count}\")\n fi.writelines(lines)\n\n return file_count" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Combine existing chunks to recreate the file. The chunks must be present in the cwd. The new file will be written to cwd.
def combine(self): import re print 'Creating file', self.__filename bname = (os.path.split(self.__filename))[1] bname2 = bname # bugfix: if file contains characters like +,.,[] # properly escape them, otherwise re will fail to match. for a, b in zip(['+', '.', '[', ']','$', '(', ')'], ['\+','\.','\[','\]','\$', '\(', '\)']): bname2 = bname2.replace(a, b) chunkre = re.compile(bname2 + '-' + '[0-9]+') chunkfiles = [] for f in os.listdir("."): print f if chunkre.match(f): chunkfiles.append(f) print 'Number of chunks', len(chunkfiles), '\n' chunkfiles.sort(self.sort_index) data='' for f in chunkfiles: try: print 'Appending chunk', os.path.join(".", f) data += open(f, 'rb').read() except (OSError, IOError, EOFError), e: print e continue try: f = open(bname, 'wb') f.write(data) f.close() except (OSError, IOError, EOFError), e: raise FileSplitterException, str(e) print 'Wrote file', bname
[ "def combine_chunks(total_parts, total_size, source_folder, dest):\n\n if not os.path.exists(os.path.dirname(dest)):\n os.makedirs(os.path.dirname(dest))\n with open(dest, 'wb+') as destination:\n for i in range(total_parts):\n part = os.path.join(source_folder, str(i))\n with open(part, 'rb') as source:\n destination.write(source.read())", "def combine_chunks(total_parts, total_size, source_folder, dest):\n\n if not os.path.exists(os.path.dirname(dest)):\n os.makedirs(os.path.dirname(dest))\n\n with open(dest, 'wb+') as destination:\n for i in range(int(total_parts)):\n part = os.path.join(source_folder, str(i))\n with open(part, 'rb') as source:\n destination.write(source.read())", "def join_file(rel_file_path, temp_file_paths):\n chunks = {}\n for temp_file in temp_file_paths:\n index, file_chunk = retrieve_metatags(temp_file)\n chunks[index] = file_chunk\n remove(temp_file)\n temp_file_path = cumulusStore.directory+rel_file_path[:-1]+\".cumuluswap\"\n\n temp_file = open(temp_file_path, 'w')\n for i in range(0, len(chunks)):\n temp_file.write(decrypt(chunks[i]))\n return get_abs_path(rel_file_path), temp_file_path", "def combine_dirname_chunks(dirname, dest_name, dest_dirname=None, subdir=None, file_pat=r'chunk_\\d+\\.csv'):\n \n if dest_dirname is None: dest_dirname = dirname\n \n if not args.argsdict['use_s3_results']:\n # merge locally\n utils.merge_csv_dirname_local(dirname=dirname, dest_name=dest_name, file_pat=file_pat)\n else: \n # download all the CSV files\n # make sure tmp is empty.\n tmp_dirpath = DB.dirpath_from_dirname('tmp', s3flag=False)\n shutil.rmtree(tmp_dirpath, ignore_errors=True)\n DB.download_entire_dirname(dirname=dirname, subdir=subdir, file_pat=file_pat, local_dirname='tmp')\n utils.merge_csv_dirname_local(dirname='tmp', subdir=subdir, dest_dirname=dirname, dest_name=dest_name, file_pat=file_pat)\n DB.upload_file_dirname(dirname=dest_dirname, filename=dest_name) # note: no subdir used in combination.", "def merge_files(self):\n print \"Merging chunks...\"\n for i in self.classes:\n req_body = self.cs.prepare_merge(i)[0]\n \n if len(req_body['sourceObjects']) > 1:\n destinationObject = \"{0}_{1}.csv\".format(i, self.table_version)\n resp = self.cs.compose(req_body, destinationObject)\n print 'File {0} created in bucket {1}'.format(resp['name'], resp['bucket'])\n else:\n sourceObject = req_body['sourceObjects'][0]['name']\n destinationObject = \"{0}_{1}.csv\".format(i, self.table_version)\n resp = self.cs.copy_object_within_bucket(sourceObject, destinationObject)\n print 'File {0} created in bucket {1}'.format(resp['name'], resp['bucket'])\n\n return", "def write_chunks(file, chunks):\n\n\tfor c in chunks:\n\n\t\tchunk(file, c[0], c[1])", "def file_combine(self, output_file, tmp_file1, tmp_file2):\n\t\t#use the unix command 'paste' to merge lines of two files\n\t\tjob_fname = 'paste %s %s > %s'%(output_file, tmp_file1, tmp_file2)\n\t\tos.system(job_fname)\t#spawnvp gets dead under MPI\n\t\t#wl = ['sh', '-c', job_fname]\n\t\t#os.spawnvp(os.P_WAIT, 'sh', wl)\n\t\t#overlapping the first file by 'mv'\n\t\tjob_fname = 'mv %s %s'%(tmp_file2, output_file)\n\t\tos.system(job_fname)\n\t\t#wl = ['sh', '-c', job_fname]\n\t\t#os.spawnvp(os.P_WAIT, 'sh', wl)", "def concat_temp_files_into_final_file():\n\n os.system('cat ' + '*' + CLEANED_TEMP_FILE_EXT + ' > ' + FINAL_NEWS_FILE_NAME)\n print 'Final file has been created.'", "def update_file(src, dest, chunk_size=1024):\r\n dest_checksum, hashlib_checksum = file_to_hashlist(dest)\r\n instruction = rolling_checksum(src, dest_checksum)\r\n temp_name = \"dest_temp\"\r\n new_fd = os.open(temp_name, os.O_CREAT | os.O_WRONLY)\r\n for data in instruction:\r\n if data in hashlib_checksum.keys():\r\n os.write(new_fd, hashlib_checksum[data])\r\n else:\r\n os.write(new_fd, data)\r\n os.close(new_fd)\r\n os.remove(dest)\r\n os.rename(temp_name, dest)\r\n change_same_permission(src, dest)\r\n change_same_time(src, dest)", "def write_chunks(self, file):\n\n for c in self.chunks:\n\n self.chunk(file, c[0], c[1])", "def rechunk(in_path,\n out_path,\n in_path_in_file,\n out_path_in_file,\n out_chunks,\n n_threads,\n out_blocks=None,\n dtype=None,\n use_zarr_format=None,\n **new_compression):\n f_in = File(in_path)\n # check if the file format was specified\n # if not, keep the format of the input file\n # otherwise set the file format\n is_zarr = f_in.is_zarr if use_zarr_format is None else use_zarr_format\n f_out = File(out_path, use_zarr_format=is_zarr)\n\n # if we don't have out-blocks explitictly given,\n # we iterate over the out chunks\n if out_blocks is None:\n out_blocks = out_chunks\n\n ds_in = f_in[in_path_in_file]\n # if no out dtype was specified, use the original dtype\n if dtype is None:\n dtype = ds_in.dtype\n\n shape = ds_in.shape\n compression_opts = new_compression if new_compression else ds_in.compression_opts\n ds_out = f_out.create_dataset(out_path_in_file,\n dtype=dtype,\n shape=shape,\n chunks=out_chunks,\n **compression_opts)\n\n def write_single_chunk(roi):\n data_in = ds_in[roi].astype(dtype, copy=False)\n if np.sum(data_in) == 0:\n return\n ds_out[roi] = data_in\n\n with futures.ThreadPoolExecutor(max_workers=n_threads) as tp:\n tasks = [tp.submit(write_single_chunk, roi)\n for roi in blocking(shape, out_blocks)]\n [t.result() for t in tasks]\n\n # copy attributes\n in_attrs = ds_in.attrs\n out_attrs = ds_out.attrs\n for key, val in in_attrs.items():\n out_attrs[key] = val", "def _concatenate_inner(self, chunks, direction):\n tmp_bucket = []\n source_chunks = chunks if direction else chunks[::-1]\n target_chunks = ChunkList()\n for chunk in source_chunks:\n if (\n # if the chunk has matched dependency, do concatenation.\n chunk.dependency == direction or\n # if the chunk is SPACE, concatenate to the previous chunk.\n (direction == False and chunk.is_space())\n ):\n tmp_bucket.append(chunk)\n continue\n tmp_bucket.append(chunk)\n if not direction: tmp_bucket = tmp_bucket[::-1]\n new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])\n chunk.update_word(new_word)\n target_chunks.append(chunk)\n tmp_bucket = []\n if tmp_bucket: target_chunks += tmp_bucket\n return target_chunks if direction else target_chunks[::-1]", "def _concatenate_parts_to_file_for_pipe(self,\n outfile,\n image_parts,\n source_dir,\n debug=False):\n close_all_fds([outfile])\n part_count = len(image_parts)\n part_file = None\n try:\n for part in image_parts:\n self.log.debug(\"Concatenating Part:\" + str(part.filename))\n sha1sum = hashlib.sha1()\n part_file_path = source_dir + \"/\" + part.filename\n with open(part_file_path) as part_file:\n data = part_file.read(euca2ools.bundle.pipes._BUFSIZE)\n while data:\n sha1sum.update(data)\n outfile.write(data)\n outfile.flush()\n data = part_file.read(euca2ools.bundle.pipes._BUFSIZE)\n part_digest = sha1sum.hexdigest()\n self.log.debug(\n \"PART NUMBER:\" + str(image_parts.index(part) + 1) +\n \"/\" + str(part_count))\n self.log.debug('Part sha1sum:' + str(part_digest))\n self.log.debug('Expected sum:' + str(part.hexdigest))\n if part_digest != part.hexdigest:\n raise ValueError('Input part file may be corrupt:{0} '\n .format(part.filename),\n '(expected digest: {0}, actual: {1})'\n .format(part.hexdigest, part_digest))\n except IOError as ioe:\n # HACK\n self.log.debug('Error in _concatenate_parts_to_file_for_pipe.' +\n str(ioe))\n if not debug:\n return\n raise ioe\n finally:\n if part_file:\n part_file.close()\n self.log.debug('Concatentate done')\n self.log.debug('Closing write end of pipe after writing')\n outfile.close()", "def append_files(out_file, in_file):\n\n with open(in_file, 'r') as handle, open(out_file, 'a') as out_handle:\n out_handle.write(handle.read())", "def _copy(self):\n for d in self._current_chunk:\n self.out.write(d)", "def merge_chunks(self, data):\r\n fn = \"speech_%s_%s.mp3\" % (\r\n data[\"lang\"], data[\"datetime\"].strftime(\"%Y%m%d-%H%M%S\"))\r\n filename_main = unique_path(fn)\r\n with open(filename_main, \"wb\") as f:\r\n # MP3s can be simply concatenated together, result is legible.\r\n for i, filename in enumerate(data[\"filenames\"]):\r\n f.write(open(filename, \"rb\").read())\r\n # Add more silence for separators like commas and periods.\r\n silence_count = 0\r\n if data[\"chunks\"][i][-1] in [\".\",\"?\",\"!\"]:\r\n silence_count = conf.SilenceCountLong\r\n elif data[\"chunks\"][i][-1] in [\",\",\":\",\";\",\"(\",\")\"]:\r\n silence_count = conf.SilenceCountShort\r\n f.write(base64.decodestring(conf.Silence) * silence_count)\r\n for filename in data[\"filenames\"]:\r\n try:\r\n os.unlink(filename)\r\n except Exception: pass\r\n data.update(filenames=[filename_main], current=filename_main, count=1)", "def packFiles(source, filesPerBlock, dest):\n\tfileCount = 1\n\t\n\ttmpFileName = \"tmp.h5\"\t\n\n\n\toutFile = createBlockFile(tmpFileName)\t\n\tfor dirname, subdirs, files in os.walk(source):\t\n\t print 'Scanning ' + dirname + '...'\t\n\t for f in files:\t\n\t if f.endswith('.h5'):\t\n\t inFile = h5py.File(os.path.join(dirname, f), 'r')\t\n\t outFile.copy(inFile, outFile['songs'], f)\t\n\t inFile.close()\t\n\t fileCount = fileCount + 1\t\n\t if(fileCount > filesPerBlock):\t\n\t outFile.close()\t\n\t upload(tmpFileName, bucket)\t\n\t fileCount = 1\t\n\t outFile = createBlockFile(tmpFileName)\t\n\n \toutFile.close()\n \tif fileCount > 1:\n\t \tupload(tmpFileName, bucket)\n\n\tos.remove(tmpFileName)", "def hadd(new_name, files, chunk_size=900):\n \n if len(files) <= chunk_size:\n return hadd_ex(new_name, files)\n \n files = files[:]\n new_files = []\n while files:\n these = files[:chunk_size]\n files = files[chunk_size:]\n\n this_fn = new_name + '_%i' % len(new_files)\n new_files.append(this_fn)\n\n if not hadd_ex(this_fn, these):\n print '\\033[36;7m PROBLEM hadding \\033[m', new_name, 'in chunks of', chunk_size, 'on', this_fn\n return False\n\n assert len(new_files) < chunk_size\n\n ok = hadd_ex(new_name, new_files)\n if not ok:\n print '\\033[36;7m PROBLEM hadding', new_name, 'in chunks of', chunk_size, 'assembling final file'\n return False\n\n for fn in new_files:\n os.remove(fn)\n\n return True", "def mergeAndSaveFile(dumpMetaFile, chunkSizeFile, outFile):\n dump = open (dumpMetaFile, \"r\")\n chunk = open (chunkSizeFile, \"r\")\n out = open (outFile, \"w\")\n \n cline = \"\"\n cline = chunk.readline()\n cline = cline.rstrip(\"\\n\")\n\n while dump:\n dline = dump.readline()\n if not dline:\n break\n dline = dline.rstrip(\"\\n\")\n \n # Split line parts \n dlineParts = dline.split(' ')\n \n # Read lines from chunkSize\n numEntries = int(dlineParts[2])\n \n entries = []\n for i in range(numEntries):\n entries.append([dlineParts[i*3 + 3], dlineParts[i*3 + 4], dlineParts[i*3 + 5], 0])\n #entries[i][0] = dlineParts[i*3 + 3]\n #entries[i][1] = dlineParts[i*3 + 4]\n #entries[i][2] = dlineParts[i*3 + 5]\n #entries[i][3] = 0\n\n while True:\n clineParts = cline.split(' ')\n if ((dlineParts[0] == clineParts[0]) and (dlineParts[1] == clineParts[1])):\n for i in range(numEntries):\n if ((entries[i][0] == clineParts[3]) and (entries[i][1] == clineParts[4])):\n entries[i][3] = clineParts[2]\n else:\n break\n cline = chunk.readline()\n cline = cline.rstrip(\"\\n\")\n if not cline:\n break\n\n # Print output\n out.write(dlineParts[0]+\" \"+dlineParts[1]+\" \"+dlineParts[2]+\" \")\n for i in range(numEntries):\n out.write(str(entries[i][3])+\" \"+entries[i][0]+\" \"+entries[i][1]+\" \"+entries[i][2]+\" \")\n out.write(\"\\n\")\n out.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
two BaseWrapper instances are equal enough
def assert_wrappers_equal(first, second): assert first.sk_params == second.sk_params assert first.history_ == second.history_ if not first.model_ or not second.model_: assert first.model_ == second.model_ else: assert_models_equal(first.model, second.model)
[ "def test_same_id(self):\n b1 = Base(1)\n b2 = Base(1)\n self.assertEqual(b1.id, 1)\n self.assertEqual(b2.id, 1)", "def test_inheritedClassesEquality(self):\n self.assertTrue(Record(1, 2) == DerivedRecord(1, 2))\n self.assertFalse(Record(1, 2) == DerivedRecord(1, 3))\n self.assertFalse(Record(1, 2) == DerivedRecord(2, 2))\n self.assertFalse(Record(1, 2) == DerivedRecord(3, 4))", "def testEquality(self):\n pass", "def test_inheritedClassesInequality(self):\n self.assertFalse(Record(1, 2) != DerivedRecord(1, 2))\n self.assertTrue(Record(1, 2) != DerivedRecord(1, 3))\n self.assertTrue(Record(1, 2) != DerivedRecord(2, 2))\n self.assertTrue(Record(1, 2) != DerivedRecord(3, 4))", "def assert_compatible(self, other):\n assert self.config == other.config, ('configs are not the same self: %s '\n 'other %s') % (self.config,\n other.config)\n\n assert self.hash_functions == other.hash_functions, (\n 'hash functions are not the same')\n return True", "def test_differentClassesInequality(self):\n self.assertTrue(Record(1, 2) != DifferentRecord(1, 2))", "def test_equal_on_equal(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def __eq__(self, other):\n return isinstance(other, type(self)) and self.size == other.size", "def test_compatible(self, other):\n if not self.center.dims == other.center.dims:\n raise ValueError(\"Devices have different dimensionality: {:d} vs {:d}\".format(self.center.dims, other.center.dims))\n\n if not self.center.shape == other.center.shape:\n raise ValueError(\"The shape of the central part does not match: {} vs {}\".format(self.center.shape, other.center.shape))\n\n if not len(self.leads) == len(other.leads):\n raise ValueError(\"The number of leads is different: {:d} vs {:d}\".format(len(self.leads), len(other.leads)))\n\n for n, (i,j) in enumerate(zip(self.leads, other.leads)):\n if not i.shape == j.shape:\n raise ValueError(\"The shape of a lead {:d} does not match: {} vs {}\".format(n,i.shape,j.shape))\n\n for n, (i,j) in enumerate(zip(self.connections, other.connections)):\n if not numpy.array_equal(i,j):\n raise ValueError(\"The connections arrays for lead {:d} are not equal\".format(n))", "def test_b(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertFalse(v1 == v2)\n self.assertFalse(v2 == v1)", "def test__eq__(self, val1, val2, eq):\n class Klass(Immutable):\n __slots__ = ('a',)\n def __init__(self, a):\n super().__init__(a=a)\n\n assert (Klass(val1) == Klass(val2)) == eq", "def __eq__(self, other): \n return type(other) == Triangle and self.base == other.base and self.height == other.height", "def _merge_sanity_check(self, other):\n if self._fields is not None and (\n set(self.query.values_select) != set(other.query.values_select)\n or set(self.query.extra_select) != set(other.query.extra_select)\n or set(self.query.annotation_select) != set(other.query.annotation_select)\n ):\n raise TypeError(\n \"Merging '%s' classes must involve the same values in each case.\"\n % self.__class__.__name__\n )", "def __eq__(self, other):\n return self._proxy == other", "def __eq__(self: BaseModel, other: object) -> bool:\n return isinstance(other, BaseModel) and self.to_dict() == other.to_dict()", "def test_equal(self):\n line_a = Line2d(Point2d(0.0, 2.0), Vector2d(4.0, 0.0))\n line_b = Line2d(Point2d(0.0, 2.0), Vector2d(4.0, 0.0))\n line_c = Line2d(Point2d(3.0, 0.0), Vector2d(0.0, 4.0))\n assert_that(line_a, equal_to(line_b))\n assert_that(line_a, is_not(equal_to(line_c)))\n assert_that(line_a.__eq__(1234), equal_to(False))", "def test_equal_on_equal_and_empty(self):\n a = Digest()\n b = Digest()\n\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def _is_equal_same_type(self, other):\n # approximate_online_count\n if self.approximate_online_count != other.approximate_online_count:\n return False\n \n # approximate_user_count\n if self.approximate_user_count != other.approximate_user_count:\n return False\n \n # description\n if self.description != other.description:\n return False\n \n # discovery_splash_hash\n if self.discovery_splash_hash != other.discovery_splash_hash:\n return False\n \n # discovery_splash_type\n if self.discovery_splash_type != other.discovery_splash_type:\n return False\n \n # emojis\n if self.emojis != other.emojis:\n return False\n \n # features\n if self.features != other.features:\n return False\n \n # icon_hash\n if self.icon_hash != other.icon_hash:\n return False\n \n # icon_type\n if self.icon_type != other.icon_type:\n return False\n \n # id\n if self.id != other.id:\n return False\n \n # invite_splash_hash\n if self.invite_splash_hash != other.invite_splash_hash:\n return False\n \n # invite_splash_type\n if self.invite_splash_type != other.invite_splash_type:\n return False\n \n # stickers\n if self.stickers != other.stickers:\n return False\n \n # name\n if self.name != other.name:\n return False\n \n return True", "def test_instance(self):\n base = Base()\n self.assertIsInstance(base, Base)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The names of the arguments to the function which are contained in the PyArgKeywords list
def arg_names(self): return self._arg_names
[ "def get_func_arg_names(func):\n return get_func_code(func).co_varnames", "def extract_keywords(func):\n if hasattr(func, 'im_func'):\n func = func.im_func\n\n try:\n return func.func_code.co_varnames[-len(func.func_defaults):]\n except (TypeError, ValueError, IndexError):\n return tuple()", "def _load_function_call_arguments(self):\n # The lower byte is the number of positional arguments\n number_of_positional = self._bytecode[self._program_counter + 1]\n # The higher byte is the number of keyword arguments\n number_of_keyword = self._bytecode[self._program_counter + 2]\n\n return '{0} positional, {1} keyword pair'.format(\n number_of_positional,\n number_of_keyword\n )", "def arguments(self):\n return [ai for a in self._arguments for ai in a.get_all_function_def_arguments()]", "def get_keyword_args(function):\n argspec = inspect.getargspec(function)\n kwargs = argspec.args[len(argspec.args) - len(argspec.defaults):]\n kwargs = {arg: value for arg, value in zip(kwargs, argspec.defaults)}\n return kwargs", "def get_arguments(self, name, *args, **kwargs):", "def keywords(self):\n return self._pyfuncitem.keywords", "def list_kwargs(func):\n \n details = inspect.getargspec(func)\n nopt = len(details.defaults)\n \n return details.args[-nopt:]", "def parameters(self):\r\n pyname = self.pyname\r\n if isinstance(pyname, pynames.ImportedName):\r\n pyname = pyname._get_imported_pyname()\r\n if isinstance(pyname, pynames.DefinedName):\r\n pyobject = pyname.get_object()\r\n if isinstance(pyobject, pyobjects.AbstractFunction):\r\n return pyobject.get_param_names()", "def getPositionalArgs():", "def param_names(self) -> List[str]:", "def get_annotated_keyword_only_arguments(arg_spec : FullArgSpec) -> str:\n if arg_spec.kwonlyargs:\n keyword_only_annotations = \",*\"\n for argument in arg_spec.kwonlyargs:\n annotation = pavo_cristatus_get_argument_annotation(arg_spec, argument)\n if annotation:\n annotation = \" : \" + annotation\n\n if arg_spec.kwonlydefaults is not None:\n keyword_only_annotations += \", {0}{1} = {2}\".format(argument, annotation,\n arg_spec.kwonlydefaults[argument])\n else:\n keyword_only_annotations += \", {0}{1}\".format(argument, annotation)\n return keyword_only_annotations\n else:\n return str()", "def inspect_args_func(frame):\n args, _, _, values = inspect.getargvalues(frame)\n return {key: values[key] for key in args if key != 'self'}", "def _get_args(function, varargs=False):\n\n try:\n params = signature(function).parameters\n except ValueError:\n # Error on builtin C function\n return []\n args = [\n key\n for key, param in params.items()\n if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)\n ]\n if varargs:\n varargs = [\n param.name\n for param in params.values()\n if param.kind == param.VAR_POSITIONAL\n ]\n if len(varargs) == 0:\n varargs = None\n return args, varargs\n else:\n return args", "def __args(self) -> List[str]:\n if self.__current_method is not None:\n return list(self.__current_method.args.keys())\n else:\n return []", "def get_annotated_keyword_arguments(arg_spec : FullArgSpec) -> str:\n keyword_annotation = str()\n if arg_spec.varkw is not None:\n keyword_annotation += \", **\" + arg_spec.varkw\n annotation = pavo_cristatus_get_argument_annotation(arg_spec, arg_spec.varkw)\n if annotation:\n keyword_annotation += \" : \" + annotation\n return keyword_annotation", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def func_args(self) -> str:\n\n return self.call_data[10:]", "def all_args():\n results = []\n\n for name, value in pwndbg.arguments.arguments():\n results.append(\"%4s = %s\" % (name, pwndbg.chain.format(value)))\n\n return results" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create FunctionDef responsible for casting python argument to C
def Python_to_C(c_object): try : cast_function = py_to_c_registry[(c_object.dtype, c_object.precision)] except KeyError: errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal') cast_func = FunctionDef(name = cast_function, body = [], arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)], results = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)]) return cast_func
[ "def C_to_Python(c_object):\n try :\n cast_function = c_to_py_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)],\n results = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)])\n\n return cast_func", "def C_to_Python(c_object):\n if c_object.rank != 0:\n cast_function = 'ndarray_to_pyarray'\n memory_handling = 'stack'\n else:\n try :\n cast_function = c_to_py_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n memory_handling = 'alias'\n\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [FunctionDefArgument(c_object.clone('v', is_argument = True, memory_handling=memory_handling))],\n results = [FunctionDefResult(Variable(dtype=PyccelPyObject(), name = 'o', memory_handling='alias'))])\n\n return cast_func", "def cython_c2py_conv_function_pointer(t_, ts):\n t = t_[1]\n argnames = []\n argdecls = []\n argbodys = []\n argrtns = []\n for n, argt in t[1][2]:\n argnames.append(n)\n decl, body, rtn = ts.cython_py2c(n, argt, proxy_name=\"c_\" + n)\n argdecls += decl.split('\\n') if isinstance(decl,basestring) else [decl]\n argbodys += body.split('\\n') if isinstance(body,basestring) else [body]\n argrtns += rtn.split('\\n') if isinstance(rtn,basestring) else [rtn]\n rtnname = 'rtn'\n rtnprox = 'c_' + rtnname\n rtncall = 'c_call_' + rtnname\n while rtnname in argnames or rtnprox in argnames:\n rtnname += '_'\n rtnprox += '_'\n argdecls = indent(argdecls)\n argbodys = indent(argbodys)\n rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2],\n cached=False, proxy_name=rtnprox, existing_name=rtncall)\n if rtndecl is None and rtnbody is None:\n rtnprox = rtnname\n rtndecls = [rtndecl]\n returns_void = (t[2][2] == 'void')\n if not returns_void:\n rtndecls.append(\"cdef {0} {1}\".format(ts.cython_ctype(t[2][2]),\n rtncall))\n rtndecl = indent(rtndecls)\n rtnbody = indent(rtnbody)\n s = ('def {{proxy_name}}({arglist}):\\n'\n '{argdecls}\\n'\n '{rtndecl}\\n'\n ' if {{var}} == NULL:\\n'\n ' raise RuntimeError(\"{{var}} is NULL and may not be '\n 'safely called!\")\\n'\n '{argbodys}\\n')\n s += ' {{var}}({carglist})\\n' if returns_void else \\\n ' {rtncall} = {{var}}({carglist})\\n'\n s += '{rtnbody}\\n'\n s = s.format(arglist=\", \".join(argnames), argdecls=argdecls,\n cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'),\n argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox,\n rtncall=rtncall, carglist=\", \".join(argrtns), rtnbody=rtnbody)\n caches = 'if {cache_name} is None:\\n' + indent(s)\n if not returns_void:\n caches += \"\\n return {rtnrtn}\".format(rtnrtn=rtnrtn)\n caches += '\\n {cache_name} = {proxy_name}\\n'\n return s, s, caches", "def arg_to_CFI(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if options.wrap_fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n \n cfi_args = {}\n for arg in ast.declarator.params:\n declarator = arg.declarator\n name = declarator.user_name\n attrs = declarator.attrs\n meta = declarator.metaattrs\n cfi_args[name] = False\n arg_typemap = arg.typemap\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif meta[\"assumed-rank\"]:\n cfi_args[name] = True\n elif attrs[\"rank\"]:\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"string\":\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"char\":\n if declarator.is_indirect():\n cfi_args[name] = True\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n cfi_args[name] = True\n has_cfi_arg = any(cfi_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cfi\"\n\n if not (need_buf_result or\n has_cfi_arg):\n return False\n\n options.wrap_fortran = False\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"cfi\"\n C_new._generated = \"arg_to_cfi\"\n C_new.splicer_group = \"cfi\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_cfi_suffix\n\n C_new.wrap.assign(c=True)#, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n name = arg.declarator.user_name\n if cfi_args[name]:\n arg.declarator.metaattrs[\"api\"] = generated_suffix\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n result_as_string.declarator.metaattrs[\"api\"] = \"cfi\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n # Fortran function may call C subroutine if string/vector result\n # Fortran function calls bufferify function.\n node._PTR_F_C_index = C_new._function_index\n return True", "def _Cfunction(name, flags, errcheck, *types):\n print name , 'xxxx', type\n\t\n if hasattr(dll, name):\n p = ctypes.CFUNCTYPE(*types)\n f = p((name, dll), flags)\n if errcheck is not None:\n f.errcheck = errcheck\n # replace the Python function\n # in this module, but only when\n # running as python -O or -OO\n if __debug__:\n _Cfunctions[name] = f\n else:\n _Globals[name] = f\n return f\n raise NameError('no function %r' % (name,))", "def fromrpython(func):\n p = annlowlevel.llhelper(PTRTYPE, func)\n return RCTypesFunc.fromllptr(p)", "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def c_function(self, restype, *argtypes):\n def wrap(f):\n def inner(*args, **kwargs):\n # TODO: apply args and kwargs appropriate to Python\n if not f._c_func:\n f._c_func = f._c_func_proto()\n return f._c_func(*args)\n if PY3:\n name = f.__name__\n varnames = f.__code__.co_varnames\n else:\n name = f.func_name\n varnames = f.func_code.co_varnames\n cargs = zip(varnames, argtypes)\n f._c_decl, f._c_code = self._create_func(name, restype, cargs, f.__doc__)\n f._c_func_proto = lambda: self.state.get_symbol(name,\n ctypes.CFUNCTYPE(restype, *argtypes))\n f._c_func = None\n self.parts.append(f)\n return inner\n return wrap", "def ggml_map_custom1_f32(ctx: ffi.CData, a: ffi.CData, fun: ffi.CData) -> ffi.CData:\n ...", "def cfunction(self):\n if self._lib is None:\n self._lib = jit_compile_and_load(self.ccode, self.basename,\n self.compiler)\n if self._cfunction is None:\n self._cfunction = getattr(self._lib, self.name)\n self._cfunction.argtypes = self.argtypes\n\n return self._cfunction", "def convert_result_as_arg(self, node, ordered_functions):\n return ordered_functions # XXX - do nothing for now\n options = node.options\n fmt_func = node.fmtdict\n# if options.F_string_len_trim is False: # XXX what about vector?\n# return\n\n ast = node.ast\n result_typemap = ast.typemap\n result_name = None\n\n # Check if result needs to be an argument.\n attrs = ast.attrs\n meta = ast.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup in [\"char\", \"string\"]:\n result_name = fmt_func.F_string_result_as_arg\n# result_as_arg = fmt_func.F_string_result_as_arg\n# result_name = result_as_arg or fmt_func.C_string_result_as_arg\n# elif result_typemap.base == \"vector\":\n# has_vector_result = True\n# elif result_is_ptr:\n# if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n# need_cdesc_result = True\n# elif attrs[\"dimension\"]:\n# need_cdesc_result = True\n\n if not result_name:\n return\n\n##########\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n# generated_suffix = \"buf\"\n C_new._generated = \"result_to_arg\"\n fmt_func = C_new.fmtdict\n# fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix + \"XXX\"\n# fmt_func.function_suffix = fmt_func.function_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=True, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n##########\n\n # decl: const char * getCharPtr2()\n new_arg = C_new.ast.result_as_arg(result_name)\n new_arg.const = False # must be writeable\n# attrs = new_arg.attrs\n# new_arg.metaattrs[\"deref\"] = None\n # Special case for wrapf.py to override \"allocatable\"\n\n # Special case for wrapf.py to override \"allocatable\"\n node.ast.metaattrs[\"deref\"] = None\n new_arg.metaattrs[\"deref\"] = \"result\"\n new_arg.metaattrs[\"is_result\"] = True\n C_new.ast.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.metaattrs[\"deref\"] = None\n\n node.wrap.fortran = False\n# node.wrap.c = False\n\n return\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)", "def ggml_map_custom2_f32(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, fun: ffi.CData) -> ffi.CData:\n ...", "def _Cfunction(name, flags, errcheck, *types):\n if hasattr(dll, name) and name in _Globals:\n p = ctypes.CFUNCTYPE(*types)\n f = p((name, dll), flags)\n if errcheck is not None:\n f.errcheck = errcheck\n # replace the Python function\n # in this module, but only when\n # running as python -O or -OO\n if __debug__:\n _Cfunctions[name] = f\n else:\n _Globals[name] = f\n return f\n raise NameError('no function %r' % (name,))", "def _Cfunction(name, flags, errcheck, *types):\r\n if hasattr(dll, name) and name in _Globals:\r\n p = ctypes.CFUNCTYPE(*types)\r\n f = p((name, dll), flags)\r\n if errcheck is not None:\r\n f.errcheck = errcheck\r\n # replace the Python function\r\n # in this module, but only when\r\n # running as python -O or -OO\r\n if __debug__:\r\n _Cfunctions[name] = f\r\n else:\r\n _Globals[name] = f\r\n return f\r\n raise NameError('no function %r' % (name,))", "def ggml_map_binary_f32(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, fun: ffi.CData) -> ffi.CData:\n ...", "def arg_to_buffer(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if node.wrap.c is False:\n# if options.wrap_c is False: # XXX cdesc.yaml GetScalar2\n # The user does not require a C wrapper.\n # This can be the case if the Fortran wrapper is doing all\n # the work via splicer or fstatements.\n return\n\n # If a C++ function returns a std::string instance,\n # the default wrapper will not compile since the wrapper\n # will be declared as char. It will also want to return the\n # c_str of a stack variable. Warn and turn off the wrapper.\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = ast.declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n\n if node.wrap.fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n if options.F_string_len_trim is False: # XXX what about vector?\n return\n\n # Arguments.\n # Is result or any argument a string or vector?\n # If so, additional arguments will be passed down so\n # create buffer version of function.\n buf_args = {}\n for arg in declarator.params:\n has_buf_arg = None\n arg_typemap = arg.typemap\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif attrs[\"cdesc\"]:\n # User requested cdesc.\n has_buf_arg = \"cdesc\"\n elif arg_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\", \"copy\"]:\n has_buf_arg = \"cdesc\"\n # XXX - this is not tested\n # XXX - tested with string **arg+intent(out)+dimension(ndim)\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"char\":\n if arg.ftrim_char_in:\n pass\n elif declarator.is_indirect():\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n has_buf_arg = \"cdesc\"\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"vector\":\n if meta[\"intent\"] == \"in\":\n # Pass SIZE.\n has_buf_arg = \"buf\"\n else:\n has_buf_arg = \"cdesc\"\n elif (arg_typemap.sgroup == \"native\" and\n meta[\"intent\"] == \"out\" and\n meta[\"deref\"] != \"raw\" and\n declarator.get_indirect_stmt() in [\"**\", \"*&\"]):\n # double **values +intent(out) +deref(pointer)\n has_buf_arg = \"cdesc\"\n #has_buf_arg = \"buf\" # XXX - for scalar?\n buf_args[declarator.user_name] = has_buf_arg\n # --- End loop over function parameters\n has_buf_arg = any(buf_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n attrs = ast.declarator.attrs\n meta = ast.declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n # Result default to \"allocatable\".\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.base == \"vector\":\n need_buf_result = \"cdesc\"\n elif result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n if meta[\"dimension\"]:\n # int *get_array() +deref(pointer)+dimension(10)\n need_buf_result = \"cdesc\"\n\n # Functions with these results need wrappers.\n if not (need_buf_result or\n has_buf_arg):\n return\n\n # XXX node.wrap.fortran = False\n # Preserve wrap.c.\n # This keep a version which accepts char * arguments.\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"buf\"\n C_new._generated = \"arg_to_buffer\"\n C_new.splicer_group = \"buf\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n \n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=node.options.wrap_c)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if buf_args[declarator.user_name]:\n meta[\"api\"] = buf_args[declarator.user_name]\n if arg.ftrim_char_in:\n continue\n arg_typemap = arg.typemap\n if arg_typemap.base == \"vector\":\n # Do not wrap the orignal C function with vector argument.\n # Meaningless to call without the size argument.\n # TODO: add an option where char** length is determined by looking\n # for trailing NULL pointer. { \"foo\", \"bar\", NULL };\n node.wrap.c = False\n node.wrap.lua = False # NotImplemented\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n # Add additional argument to hold result.\n # This will allocate a new character variable to hold the\n # results of the C++ function.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n # We've added an argument to fill, use api=buf.\n result_as_string.declarator.metaattrs[\"api\"] = \"buf\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n \n # Fortran function may call C subroutine if string/vector result\n node._PTR_F_C_index = C_new._function_index", "def ggml_map_custom3_f32(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, c: ffi.CData, fun: ffi.CData) -> ffi.CData:\n ...", "def python_value(env, value: ffi.CData) -> type:\n return PYTHON_VALUES[value.header.type](env, value)", "def ctypes_type(self):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create FunctionDef responsible for casting c argument to python
def C_to_Python(c_object): try : cast_function = c_to_py_registry[(c_object.dtype, c_object.precision)] except KeyError: errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal') cast_func = FunctionDef(name = cast_function, body = [], arguments = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)], results = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)]) return cast_func
[ "def Python_to_C(c_object):\n try :\n cast_function = py_to_c_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)],\n results = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)])\n\n return cast_func", "def C_to_Python(c_object):\n if c_object.rank != 0:\n cast_function = 'ndarray_to_pyarray'\n memory_handling = 'stack'\n else:\n try :\n cast_function = c_to_py_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n memory_handling = 'alias'\n\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [FunctionDefArgument(c_object.clone('v', is_argument = True, memory_handling=memory_handling))],\n results = [FunctionDefResult(Variable(dtype=PyccelPyObject(), name = 'o', memory_handling='alias'))])\n\n return cast_func", "def cython_c2py_conv_function_pointer(t_, ts):\n t = t_[1]\n argnames = []\n argdecls = []\n argbodys = []\n argrtns = []\n for n, argt in t[1][2]:\n argnames.append(n)\n decl, body, rtn = ts.cython_py2c(n, argt, proxy_name=\"c_\" + n)\n argdecls += decl.split('\\n') if isinstance(decl,basestring) else [decl]\n argbodys += body.split('\\n') if isinstance(body,basestring) else [body]\n argrtns += rtn.split('\\n') if isinstance(rtn,basestring) else [rtn]\n rtnname = 'rtn'\n rtnprox = 'c_' + rtnname\n rtncall = 'c_call_' + rtnname\n while rtnname in argnames or rtnprox in argnames:\n rtnname += '_'\n rtnprox += '_'\n argdecls = indent(argdecls)\n argbodys = indent(argbodys)\n rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2],\n cached=False, proxy_name=rtnprox, existing_name=rtncall)\n if rtndecl is None and rtnbody is None:\n rtnprox = rtnname\n rtndecls = [rtndecl]\n returns_void = (t[2][2] == 'void')\n if not returns_void:\n rtndecls.append(\"cdef {0} {1}\".format(ts.cython_ctype(t[2][2]),\n rtncall))\n rtndecl = indent(rtndecls)\n rtnbody = indent(rtnbody)\n s = ('def {{proxy_name}}({arglist}):\\n'\n '{argdecls}\\n'\n '{rtndecl}\\n'\n ' if {{var}} == NULL:\\n'\n ' raise RuntimeError(\"{{var}} is NULL and may not be '\n 'safely called!\")\\n'\n '{argbodys}\\n')\n s += ' {{var}}({carglist})\\n' if returns_void else \\\n ' {rtncall} = {{var}}({carglist})\\n'\n s += '{rtnbody}\\n'\n s = s.format(arglist=\", \".join(argnames), argdecls=argdecls,\n cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'),\n argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox,\n rtncall=rtncall, carglist=\", \".join(argrtns), rtnbody=rtnbody)\n caches = 'if {cache_name} is None:\\n' + indent(s)\n if not returns_void:\n caches += \"\\n return {rtnrtn}\".format(rtnrtn=rtnrtn)\n caches += '\\n {cache_name} = {proxy_name}\\n'\n return s, s, caches", "def fromrpython(func):\n p = annlowlevel.llhelper(PTRTYPE, func)\n return RCTypesFunc.fromllptr(p)", "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def _Cfunction(name, flags, errcheck, *types):\n print name , 'xxxx', type\n\t\n if hasattr(dll, name):\n p = ctypes.CFUNCTYPE(*types)\n f = p((name, dll), flags)\n if errcheck is not None:\n f.errcheck = errcheck\n # replace the Python function\n # in this module, but only when\n # running as python -O or -OO\n if __debug__:\n _Cfunctions[name] = f\n else:\n _Globals[name] = f\n return f\n raise NameError('no function %r' % (name,))", "def ggml_map_custom1_f32(ctx: ffi.CData, a: ffi.CData, fun: ffi.CData) -> ffi.CData:\n ...", "def convert_result_as_arg(self, node, ordered_functions):\n return ordered_functions # XXX - do nothing for now\n options = node.options\n fmt_func = node.fmtdict\n# if options.F_string_len_trim is False: # XXX what about vector?\n# return\n\n ast = node.ast\n result_typemap = ast.typemap\n result_name = None\n\n # Check if result needs to be an argument.\n attrs = ast.attrs\n meta = ast.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup in [\"char\", \"string\"]:\n result_name = fmt_func.F_string_result_as_arg\n# result_as_arg = fmt_func.F_string_result_as_arg\n# result_name = result_as_arg or fmt_func.C_string_result_as_arg\n# elif result_typemap.base == \"vector\":\n# has_vector_result = True\n# elif result_is_ptr:\n# if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n# need_cdesc_result = True\n# elif attrs[\"dimension\"]:\n# need_cdesc_result = True\n\n if not result_name:\n return\n\n##########\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n# generated_suffix = \"buf\"\n C_new._generated = \"result_to_arg\"\n fmt_func = C_new.fmtdict\n# fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix + \"XXX\"\n# fmt_func.function_suffix = fmt_func.function_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=True, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n##########\n\n # decl: const char * getCharPtr2()\n new_arg = C_new.ast.result_as_arg(result_name)\n new_arg.const = False # must be writeable\n# attrs = new_arg.attrs\n# new_arg.metaattrs[\"deref\"] = None\n # Special case for wrapf.py to override \"allocatable\"\n\n # Special case for wrapf.py to override \"allocatable\"\n node.ast.metaattrs[\"deref\"] = None\n new_arg.metaattrs[\"deref\"] = \"result\"\n new_arg.metaattrs[\"is_result\"] = True\n C_new.ast.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.metaattrs[\"deref\"] = None\n\n node.wrap.fortran = False\n# node.wrap.c = False\n\n return\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)", "def ggml_map_custom2_f32(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, fun: ffi.CData) -> ffi.CData:\n ...", "def c_function(self, restype, *argtypes):\n def wrap(f):\n def inner(*args, **kwargs):\n # TODO: apply args and kwargs appropriate to Python\n if not f._c_func:\n f._c_func = f._c_func_proto()\n return f._c_func(*args)\n if PY3:\n name = f.__name__\n varnames = f.__code__.co_varnames\n else:\n name = f.func_name\n varnames = f.func_code.co_varnames\n cargs = zip(varnames, argtypes)\n f._c_decl, f._c_code = self._create_func(name, restype, cargs, f.__doc__)\n f._c_func_proto = lambda: self.state.get_symbol(name,\n ctypes.CFUNCTYPE(restype, *argtypes))\n f._c_func = None\n self.parts.append(f)\n return inner\n return wrap", "def arg_to_CFI(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if options.wrap_fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n \n cfi_args = {}\n for arg in ast.declarator.params:\n declarator = arg.declarator\n name = declarator.user_name\n attrs = declarator.attrs\n meta = declarator.metaattrs\n cfi_args[name] = False\n arg_typemap = arg.typemap\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif meta[\"assumed-rank\"]:\n cfi_args[name] = True\n elif attrs[\"rank\"]:\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"string\":\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"char\":\n if declarator.is_indirect():\n cfi_args[name] = True\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n cfi_args[name] = True\n has_cfi_arg = any(cfi_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cfi\"\n\n if not (need_buf_result or\n has_cfi_arg):\n return False\n\n options.wrap_fortran = False\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"cfi\"\n C_new._generated = \"arg_to_cfi\"\n C_new.splicer_group = \"cfi\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_cfi_suffix\n\n C_new.wrap.assign(c=True)#, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n name = arg.declarator.user_name\n if cfi_args[name]:\n arg.declarator.metaattrs[\"api\"] = generated_suffix\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n result_as_string.declarator.metaattrs[\"api\"] = \"cfi\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n # Fortran function may call C subroutine if string/vector result\n # Fortran function calls bufferify function.\n node._PTR_F_C_index = C_new._function_index\n return True", "def ggml_map_binary_f32(ctx: ffi.CData, a: ffi.CData, b: ffi.CData, fun: ffi.CData) -> ffi.CData:\n ...", "def python_value(env, value: ffi.CData) -> type:\n return PYTHON_VALUES[value.header.type](env, value)", "def cfunction(self):\n if self._lib is None:\n self._lib = jit_compile_and_load(self.ccode, self.basename,\n self.compiler)\n if self._cfunction is None:\n self._cfunction = getattr(self._lib, self.name)\n self._cfunction.argtypes = self.argtypes\n\n return self._cfunction", "def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)", "def _Cfunction(name, flags, errcheck, *types):\n if hasattr(dll, name) and name in _Globals:\n p = ctypes.CFUNCTYPE(*types)\n f = p((name, dll), flags)\n if errcheck is not None:\n f.errcheck = errcheck\n # replace the Python function\n # in this module, but only when\n # running as python -O or -OO\n if __debug__:\n _Cfunctions[name] = f\n else:\n _Globals[name] = f\n return f\n raise NameError('no function %r' % (name,))", "def _Cfunction(name, flags, errcheck, *types):\r\n if hasattr(dll, name) and name in _Globals:\r\n p = ctypes.CFUNCTYPE(*types)\r\n f = p((name, dll), flags)\r\n if errcheck is not None:\r\n f.errcheck = errcheck\r\n # replace the Python function\r\n # in this module, but only when\r\n # running as python -O or -OO\r\n if __debug__:\r\n _Cfunctions[name] = f\r\n else:\r\n _Globals[name] = f\r\n return f\r\n raise NameError('no function %r' % (name,))", "def ctypes_type(self):", "def arg_to_buffer(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if node.wrap.c is False:\n# if options.wrap_c is False: # XXX cdesc.yaml GetScalar2\n # The user does not require a C wrapper.\n # This can be the case if the Fortran wrapper is doing all\n # the work via splicer or fstatements.\n return\n\n # If a C++ function returns a std::string instance,\n # the default wrapper will not compile since the wrapper\n # will be declared as char. It will also want to return the\n # c_str of a stack variable. Warn and turn off the wrapper.\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = ast.declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n\n if node.wrap.fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n if options.F_string_len_trim is False: # XXX what about vector?\n return\n\n # Arguments.\n # Is result or any argument a string or vector?\n # If so, additional arguments will be passed down so\n # create buffer version of function.\n buf_args = {}\n for arg in declarator.params:\n has_buf_arg = None\n arg_typemap = arg.typemap\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif attrs[\"cdesc\"]:\n # User requested cdesc.\n has_buf_arg = \"cdesc\"\n elif arg_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\", \"copy\"]:\n has_buf_arg = \"cdesc\"\n # XXX - this is not tested\n # XXX - tested with string **arg+intent(out)+dimension(ndim)\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"char\":\n if arg.ftrim_char_in:\n pass\n elif declarator.is_indirect():\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n has_buf_arg = \"cdesc\"\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"vector\":\n if meta[\"intent\"] == \"in\":\n # Pass SIZE.\n has_buf_arg = \"buf\"\n else:\n has_buf_arg = \"cdesc\"\n elif (arg_typemap.sgroup == \"native\" and\n meta[\"intent\"] == \"out\" and\n meta[\"deref\"] != \"raw\" and\n declarator.get_indirect_stmt() in [\"**\", \"*&\"]):\n # double **values +intent(out) +deref(pointer)\n has_buf_arg = \"cdesc\"\n #has_buf_arg = \"buf\" # XXX - for scalar?\n buf_args[declarator.user_name] = has_buf_arg\n # --- End loop over function parameters\n has_buf_arg = any(buf_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n attrs = ast.declarator.attrs\n meta = ast.declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n # Result default to \"allocatable\".\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.base == \"vector\":\n need_buf_result = \"cdesc\"\n elif result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n if meta[\"dimension\"]:\n # int *get_array() +deref(pointer)+dimension(10)\n need_buf_result = \"cdesc\"\n\n # Functions with these results need wrappers.\n if not (need_buf_result or\n has_buf_arg):\n return\n\n # XXX node.wrap.fortran = False\n # Preserve wrap.c.\n # This keep a version which accepts char * arguments.\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"buf\"\n C_new._generated = \"arg_to_buffer\"\n C_new.splicer_group = \"buf\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n \n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=node.options.wrap_c)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if buf_args[declarator.user_name]:\n meta[\"api\"] = buf_args[declarator.user_name]\n if arg.ftrim_char_in:\n continue\n arg_typemap = arg.typemap\n if arg_typemap.base == \"vector\":\n # Do not wrap the orignal C function with vector argument.\n # Meaningless to call without the size argument.\n # TODO: add an option where char** length is determined by looking\n # for trailing NULL pointer. { \"foo\", \"bar\", NULL };\n node.wrap.c = False\n node.wrap.lua = False # NotImplemented\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n # Add additional argument to hold result.\n # This will allocate a new character variable to hold the\n # results of the C++ function.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n # We've added an argument to fill, use api=buf.\n result_as_string.declarator.metaattrs[\"api\"] = \"buf\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n \n # Fortran function may call C subroutine if string/vector result\n node._PTR_F_C_index = C_new._function_index" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate function Call of c/python api PyErr_SetString
def PyErr_SetString(exception, message): func = FunctionDef(name = 'PyErr_SetString', body = [], arguments = [Variable(dtype = PyccelPyObject(), name = 'o'), Variable(dtype = NativeString(), name = 's')], results = []) exception = Variable(PyccelPyObject(), name = exception) return FunctionCall(func, [exception, message])
[ "def PyErr_Warn(space, w_category, message):\n return PyErr_WarnEx(space, w_category, message, 1)", "def PyErr_SetExcInfo(space, py_type, py_value, py_traceback):\n w_type = get_w_obj_and_decref(space, py_type)\n w_value = get_w_obj_and_decref(space, py_value)\n w_traceback = get_w_obj_and_decref(space, py_traceback)\n if w_value is None or space.is_w(w_value, space.w_None):\n operror = None\n else:\n tb = None\n if w_traceback is not None:\n try:\n tb = pytraceback.check_traceback(space, w_traceback, '?')\n except OperationError: # catch and ignore bogus objects\n pass\n operror = OperationError(w_type, w_value, tb)\n #\n ec = space.getexecutioncontext()\n ec.set_sys_exc_info(operror)", "def PyErr_SetFromErrno(space, w_type):\n PyErr_SetFromErrnoWithFilename(space, w_type,\n lltype.nullptr(rffi.CCHARP.TO))", "def PyErr_PrintEx(space, set_sys_last_vars):\n if not PyErr_Occurred(space):\n PyErr_BadInternalCall(space)\n state = space.fromcache(State)\n operror = state.clear_exception()\n\n w_type = operror.w_type\n w_value = operror.get_w_value(space)\n w_tb = operror.get_w_traceback(space)\n\n if rffi.cast(lltype.Signed, set_sys_last_vars):\n space.sys.setdictvalue(space, \"last_type\", w_type)\n space.sys.setdictvalue(space, \"last_value\", w_value)\n space.sys.setdictvalue(space, \"last_traceback\", w_tb)\n\n space.call_function(space.sys.get(\"excepthook\"),\n w_type, w_value, w_tb)", "def PyErr_SetObject(space, w_type, w_value):\n state = space.fromcache(State)\n state.set_exception(OperationError(w_type, w_value))", "def c_exception(self):\n _c_exception_result = _str_dc(self._swigobj.c_exception())\n return _c_exception_result", "def PyErr_SetFromErrnoWithFilenameObject(space, w_type, w_value):\n # XXX Doesn't actually do anything with PyErr_CheckSignals.\n errno = rffi.cast(lltype.Signed, rposix._get_errno())\n msg = os.strerror(errno)\n if w_value:\n w_error = space.call_function(w_type,\n space.newint(errno),\n space.newtext(msg),\n w_value)\n else:\n w_error = space.call_function(w_type,\n space.newint(errno),\n space.newtext(msg))\n raise OperationError(w_type, w_error)", "def failure_code(sub):\r\n return '''{\r\n %(failure_var)s = %(id)s;\r\n if (!PyErr_Occurred()) {\r\n PyErr_SetString(PyExc_RuntimeError,\r\n \"Unexpected error in an Op's C code. \"\r\n \"No Python exception was set.\");\r\n }\r\n goto __label_%(id)i;}''' % sub", "def PyErr_Fetch(space, ptype, pvalue, ptraceback):\n state = space.fromcache(State)\n operror = state.clear_exception()\n if operror:\n ptype[0] = make_ref(space, operror.w_type)\n pvalue[0] = make_ref(space, operror.get_w_value(space))\n ptraceback[0] = make_ref(space, operror.get_w_traceback(space))\n else:\n ptype[0] = lltype.nullptr(PyObject.TO)\n pvalue[0] = lltype.nullptr(PyObject.TO)\n ptraceback[0] = lltype.nullptr(PyObject.TO)", "def SPYExceptionHandler(*excargs, **exckwargs):\n\n # Depending on the number of input arguments, we're either in Jupyter/iPython\n # or \"regular\" Python - this matters for coloring error messages\n if len(excargs) == 3:\n isipy = False\n etype, evalue, etb = excargs\n else:\n etype, evalue, etb = sys.exc_info()\n try: # careful: if iPython is used to launch a script, ``get_ipython`` is not defined\n ipy = get_ipython()\n isipy = True\n cols = ipy.InteractiveTB.Colors\n cols.filename = cols.filenameEm\n cols.bold = ansiBold\n sys.last_traceback = etb # smartify ``sys``\n except NameError:\n isipy = False\n\n # Pass ``KeyboardInterrupt`` on to regular excepthook so that CTRL + C\n # can still be used to abort program execution (only relevant in \"regular\"\n # Python prompts)\n if issubclass(etype, KeyboardInterrupt) and not isipy:\n sys.__excepthook__(etype, evalue, etb)\n return\n\n # Starty by putting together first line of error message\n emsg = \"{}\\nSyNCoPy encountered an error in{} \\n\\n\".format(cols.topline if isipy else \"\",\n cols.Normal if isipy else \"\")\n\n # If we're dealing with a `SyntaxError`, show it and getta outta here\n if issubclass(etype, SyntaxError):\n\n # Just format exception, don't mess around w/ traceback\n exc_fmt = traceback.format_exception_only(etype, evalue)\n for eline in exc_fmt:\n if \"File\" in eline:\n eline = eline.split(\"File \")[1]\n fname, lineno = eline.split(\", line \")\n emsg += \"{}{}{}\".format(cols.filename if isipy else \"\",\n fname,\n cols.Normal if isipy else \"\")\n emsg += \", line {}{}{}\".format(cols.lineno if isipy else \"\",\n lineno,\n cols.Normal if isipy else \"\")\n elif \"SyntaxError\" in eline:\n smsg = eline.split(\"SyntaxError: \")[1]\n emsg += \"{}{}SyntaxError{}: {}{}{}\".format(cols.excName if isipy else \"\",\n cols.bold if isipy else \"\",\n cols.Normal if isipy else \"\",\n cols.bold if isipy else \"\",\n smsg,\n cols.Normal if isipy else \"\")\n else:\n emsg += \"{}{}{}\".format(cols.line if isipy else \"\",\n eline,\n cols.Normal if isipy else \"\")\n\n # Show generated message and leave (or kick-off debugging in Jupyer/iPython if %pdb is on)\n logger = get_parallel_logger()\n logger.critical(emsg)\n if isipy:\n if ipy.call_pdb:\n ipy.InteractiveTB.debugger()\n return\n\n # Build an ordered(!) dictionary that encodes separators for traceback components\n sep = OrderedDict({\"filename\": \", line \",\n \"lineno\": \" in \",\n \"name\": \"\\n\\t\",\n \"line\": \"\\n\"})\n\n # Find \"root\" of traceback tree (and remove outer-most frames)\n keepgoing = True\n while keepgoing:\n frame = traceback.extract_tb(etb)[0]\n etb = etb.tb_next\n if frame.filename.find(\"site-packages\") < 0 or \\\n (frame.filename.find(\"site-packages\") >= 0 and \\\n frame.filename.find(\"syncopy\") >= 0):\n tb_entry = \"\"\n for attr in sep.keys():\n tb_entry += \"{}{}{}{}\".format(getattr(cols, attr) if isipy else \"\",\n getattr(frame, attr),\n cols.Normal if isipy else \"\",\n sep.get(attr))\n emsg += tb_entry\n keepgoing = False\n\n # Format the exception-part of the traceback - the resulting list usually\n # contains only a single string - if we find more just use everything\n exc_fmt = traceback.format_exception_only(etype, evalue)\n if len(exc_fmt) == 1:\n exc_msg = exc_fmt[0]\n idx = exc_msg.rfind(etype.__name__)\n if idx >= 0:\n exc_msg = exc_msg[idx + len(etype.__name__):]\n exc_name = \"{}{}{}{}\".format(cols.excName if isipy else \"\",\n cols.bold if isipy else \"\",\n etype.__name__,\n cols.Normal if isipy else \"\")\n else:\n exc_msg = \"\".join(exc_fmt)\n exc_name = \"\"\n\n # Now go through traceback and put together a list of strings for printing\n if __tbcount__ and etb is not None:\n emsg += \"\\n\" + \"-\"*80 + \"\\nAbbreviated traceback:\\n\\n\"\n tb_count = 0\n tb_list = []\n for frame in traceback.extract_tb(etb):\n if frame.filename.find(\"site-packages\") < 0 or \\\n (frame.filename.find(\"site-packages\") >= 0 and \\\n frame.filename.find(\"syncopy\") >= 0):\n tb_entry = \"\"\n for attr in sep.keys():\n tb_entry += \"{}{}{}{}\".format(\"\", # placeholder for color if wanted\n getattr(frame, attr),\n \"\", # placeholder for color if wanted\n sep.get(attr))\n tb_list.append(tb_entry)\n tb_count += 1\n if tb_count == __tbcount__:\n break\n emsg += \"\".join(tb_list)\n\n # Finally, another info message\n if etb is not None:\n emsg += \"\\nUse `import traceback; import sys; traceback.print_tb(sys.last_traceback)` \" + \\\n \"for full error traceback.\\n\"\n\n # Glue actual Exception name + message to output string\n emsg += \"{}{}{}{}{}\".format(\"\\n\" if isipy else \"\",\n exc_name,\n cols.bold if isipy else \"\",\n exc_msg,\n cols.Normal if isipy else \"\",)\n\n\n # Show generated message and get outta here\n logger = get_parallel_logger()\n logger.critical(emsg)\n\n # Kick-start debugging in case %pdb is enabled in Jupyter/iPython\n if isipy:\n if ipy.call_pdb:\n ipy.InteractiveTB.debugger()", "def PyErr_WarnEx(space, w_category, message_ptr, stacklevel):\n if w_category is None:\n w_category = space.w_None\n w_message = space.newtext(rffi.charp2str(message_ptr))\n w_stacklevel = space.newint(rffi.cast(lltype.Signed, stacklevel))\n\n w_module = PyImport_Import(space, space.newtext(\"warnings\"))\n w_warn = space.getattr(w_module, space.newtext(\"warn\"))\n space.call_function(w_warn, w_message, w_category, w_stacklevel)\n return 0", "def PyErr_BadArgument(space):\n raise oefmt(space.w_TypeError, \"bad argument type for built-in operation\")", "def PyErr_NormalizeException(space, exc_p, val_p, tb_p):\n if exc_p[0]:\n w_etype = from_ref(space, exc_p[0])\n else:\n # There is no exception, so nothing to do\n return\n if val_p[0]:\n w_evalue = from_ref(space, val_p[0])\n else:\n # On CPython, PyErr_SetNone actually sets val to NULL.\n # Sensible code should probably never trigger this path on PyPy, but...\n w_evalue = space.w_None\n operr = OperationError(w_etype, w_evalue)\n operr.normalize_exception(space)\n decref(space, exc_p[0])\n decref(space, val_p[0])\n exc_p[0] = make_ref(space, operr.w_type)\n val_p[0] = make_ref(space, operr.get_w_value(space))", "def GetTraceback():\n#-------------------------------------------------------------------------------\n return traceback.format_exc()", "def format_exc():\n from traceback import format_exc\n return format_exc().decode('utf-8', 'surrogateescape')", "def translate(cpp):\n PyType = registry.get(type(cpp), None)\n if PyType is None:\n warnings.warn(\"Could not find appropriate Python type for C++ Exception\")\n PyType = Exception\n return PyType(cpp)", "def getCompilerError():", "def PyErr_ExceptionMatches(space, w_exc):\n w_type = PyErr_Occurred(space)\n return PyErr_GivenExceptionMatches(space, w_type, w_exc)", "def transformErr2Str(self,*args):\n error_code = c_int32(args[0])\n error_str = create_string_buffer(\"\\000\"*1024)\n status = self.__acqiris_QuantroDLL1.transformErr2Str(self.__instrumentID,error_code,error_str) \n return str(error_str)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate TypeError exception from the variable information (datatype, precision)
def generate_datatype_error(variable): dtype = variable.dtype if isinstance(dtype, NativeBool): precision = '' if isinstance(dtype, NativeComplex): precision = '{} bit '.format(variable.precision * 2 * 8) else: precision = '{} bit '.format(variable.precision * 8) message = '"Argument must be {precision}{dtype}"'.format( precision = precision, dtype = variable.dtype) return PyErr_SetString('PyExc_TypeError', message)
[ "def test_value_error_for_computing_missing_type():\n with pytest.raises(ValueError):\n compute_type(\"missing_type\", {})", "def type_error(var, type_var):\n raise TypeError('reliapy (Error 1) - the type of ' + var + ' must be: ' + type_var)", "def test_invalid_expression_type(self, parse_input_mocked_metadata):\n with pytest.raises(TypeError, match=r\"not of declared type int\"):\n parse_input_mocked_metadata(\"int Beta = -0.231e-6+5.21e-2j\")", "def test_create_fail_on_base_value_type():\n with pytest.raises(UnitParseError):\n Unit(\"a\", base_value=\"a\", dimensions=(mass / time))", "def try_wrong_types(self, p, name, type_):\n for x in (1, 1.0, \"x\", True, np.ndarray,):\n if type(x) != type_:\n with self.assertRaises(TypeError, msg=f\"{name} {type_} {x}\"):\n setattr(p, name, x)", "def test_types(self):\n values.Float.validate(1.)\n values.Float.validate(1)\n\n with self.assertRaises(TypeError):\n values.Float.validate('1.')", "def test_wrong_type(self, rule):\n rule._expected_value_type = float\n with pytest.raises(TypeError) as e:\n _ = rule._get_comparison()\n assert \"42\" in str(e.value)\n assert \"type\" in str(e.value)", "def CheckType(X,module,varname):\r\n \r\n # author: Gary Mamon\r\n \r\n t = type(X)\r\n if t is not float and t is not np.float64 and t is not int and t is not np.ndarray:\r\n raise print('ERROR in ', module, ' ', varname, \r\n ' is of type ', t, \r\n ', but it must be a float or integer')", "def test_exception_raised(self):\n\n df = d.create_df_2()\n\n x = ScalingTransformer(columns=[\"a\", \"b\", \"c\"], scaler=\"standard\")\n\n with pytest.raises(\n TypeError,\n match=r\"\"\"The following columns are not numeric in X; \\['b', 'c'\\]\"\"\",\n ):\n\n x.check_numeric_columns(df)", "def test__specification_type_to_python_type_unsupported_type(self):\n with self.assertRaises(TypeError):\n _specification_type_to_python_type(\"unsupported_type\")", "def test_create_fail_on_bad_symbol_type():\n with pytest.raises(UnitParseError):\n Unit([1]) # something other than Expr and str", "def test_non_pd_type_error(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=\"a\")\n\n with pytest.raises(ValueError):\n\n x.fit(X=df, y=[1, 2, 3, 4, 5, 6])", "def test_validate_condition_param_type_error_exception():\n condition = \"not a dict\"\n exception_message = 'Param type error. Invalid search_condition type, it should be dict.'\n with pytest.raises(ProfilerParamTypeErrorException) as exc_info:\n validate_condition(condition)\n assert exc_info.value.error_code == '50546082'\n assert exc_info.value.message == exception_message", "def test_create_fail_on_bad_dimensions_type():\n with pytest.raises(UnitParseError):\n Unit(\"a\", base_value=1, dimensions=\"(mass)\")", "def test_constructor_wrong_parameter_type(self):\n\n for invalid in (None, 1):\n with self.assertRaises(TypeError):\n group_tr = OCIO.FixedFunctionTransform(invalid)", "def conversionNotPossibleException(valueType: cern.japc.value.ValueType, valueType2: cern.japc.value.ValueType) -> cern.japc.value.ValueConversionException:\n ...", "def test_invalid_argument_type(self):\n t = TruthTable('A or B')\n\n with self.assertRaises(InvalidArgumentTypeError):\n t.equivalent_to(float())\n\n with self.assertRaises(InvalidArgumentTypeError):\n t.equivalent_to(None)", "def test_that_values_are_validated_against_specified_data_types(self):\n with pytest.raises(ValidationError):\n Person(first_name=\"John\", age=\"Young\")", "def test_float_type(self):\n\n input_ = 1.2\n expected = ValueError\n with self.assertRaises(expected):\n math.factorial(input_)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
exercising some Letter methods
def test_letter_methods(self): # shift l = get_character("G") self.assertEqual(l.x, 0) self.assertEqual(l.y, 0) l.shift(2, 2) self.assertEqual(l.x, 2) self.assertEqual(l.y, 2) # scale adjusts the scale attributes orig_width = l.scale_x orig_height = l.scale_y l.scale(x=0.5, y=2) self.assertEqual(l.scale_x, orig_width / 2) self.assertEqual(l.scale_y, orig_height * 2) # invert changes the degree attr l.rotate(180) self.assertEqual(l.degrees, 180)
[ "def display_letters(word, guesses):\n pass", "def letter_for(label):\n return \"ABCDEFGHIJ\"[label]", "def getAlphabet(self):\n return self.alpha", "def letters_to_be_capitalized(all_possible_letters,direction):\n\n Delta_y = direction[0]\n Delta_x = direction[1]\n list_first_letter = all_possible_letters[0]\n\n #first_letter is the coordinate of each possible first letter. There could \n #be multiple first letters.\n for first_letter in list_first_letter:\n #the first coordinate of letters_capitalize is the possible coordinates \n #of the first letters \n letters_capitalize = [first_letter]\n\n #if the length of the letters_capitalize is the length of all_possible_\n #letters, there is no further test to find the next letter because \n #there is only one letter.\n if len(letters_capitalize) == len(all_possible_letters):\n return letters_capitalize \n for possible_coordinates in all_possible_letters[1:]:\n #possible_coodinates represents the coordinates in the list of \n #possible coordinates of each letter. \n for coordinate in possible_coordinates:\n #coordinate represent each coordinate for one letter.\n #coordinate_Y is the Y value of current coordinate. \n #letters_capitalize[-1][0]represents the y value of the last \n #coordinate in letters_capitalize. desired_Y represents the \n #the desired Y value for the direction give.\n #Same mechanism applies to x value. \n coordinate_Y = coordinate[0]\n coordinate_X = coordinate[1]\n desired_Y = letters_capitalize[-1][0]+Delta_y\n desired_X = letters_capitalize[-1][1]+Delta_x\n\n if coordinate_Y == desired_Y\\\n and coordinate_X == desired_X:\n letters_capitalize.append(coordinate)\n #when letters_capitalize append the last letter of the word,\n #it returns the entire list of coordinates. \n if len(letters_capitalize) == len(all_possible_letters):\n return letters_capitalize", "def getLetter(index):\n alphabet = string.ascii_lowercase + \" \"\n return alphabet[index]", "def get_letter_dict():\n\treturn {\n\t\t'A': 0,\n\t\t'B': 0,\n\t\t'C': 0,\n\t\t'D': 0,\n\t\t'E': 0,\n\t\t'F': 0,\n\t\t'G': 0,\n\t\t'H': 0,\n\t\t'I': 0,\n\t\t'J': 0,\n\t\t'K': 0,\n\t\t'L': 0,\n\t\t'M': 0,\n\t\t'N': 0,\n\t\t'O': 0,\n\t\t'P': 0,\n\t\t'Q': 0,\n\t\t'R': 0,\n\t\t'S': 0,\n\t\t'T': 0,\n\t\t'U': 0,\n\t\t'V': 0,\n\t\t'W': 0,\n\t\t'X': 0,\n\t\t'Y': 0,\n\t\t'Z': 0\n\t}", "def _letter_to_name(self, letter: str) -> str:\n if letter == 'B':\n return \"Black\"\n elif letter == 'W':\n return \"White\"", "def just_letters(c):\n ascii_code = ord(c)\n if 65 <= ascii_code <= 90:\n return c\n elif 97 <= ascii_code <= 122:\n return c\n else:\n return ' '", "def is_letter(answer):\n return answer.upper() if answer.isalpha() else None, f\"{answer} must be a single letter.\"", "def index_letter(self, index):\n\t\treturn ALPHABET[index]", "def alphabet_war(fight):", "def _get_letter(self, dni_number: int) -> str:\n return self._LETTERS[dni_number % 23]", "def test_letter_delimiter(self):\n self.non_default_delimiter_template('a')", "def test_get_letter_text():\n mp.add_donor('Sean Hannity', 400)\n test_letter_text = mp.get_letter_text('Sean Hannity')\n correct_letter_text = (\"{:^41}\\n\"\n \"Thank you so much for your generous donation of:\\n\"\n \"{:>21}{:,}\\n\"\n \"We will always remember your money fondly.\").format('Sean Hannity', '$', 400)\n assert test_letter_text == correct_letter_text", "def get_letter(self):\n return self.__letter", "def _letter(self, base=ord('a'), radix=26):\n\n index = self.index\n if index < 0:\n raise TypeError(\"No iteration position\")\n s = \"\"\n while 1:\n index, off = divmod(index, radix)\n s = chr(base + off) + s\n if not index:\n return s", "def map_letter(self, letter):\n if type(letter) != str or len(letter) != 1:\n raise ValueError('Input must be a single character.')\n letter = str.upper(letter)\n\n position = EnigmaMachine.letter_to_number(letter) % 26\n mapped_letter = self.reflector_mapping[position]\n return mapped_letter", "def english():\n\n return \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"", "def motivation_letter(mistake_word, true_word):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper method to get baseline file.
def get_test_baseline(self, file_name): return os.path.abspath( os.path.join( os.path.abspath(__file__), u'..', u'baselines', file_name))
[ "def get_baseline(file_name):\r\n return os.path.abspath(\r\n os.path.join(\r\n os.path.abspath(__file__),\r\n u'..',\r\n u'baselines',\r\n file_name))", "def _getbaseline(self):\r\n if self._baseline == None:\r\n result = self._session.execute(\"up -show baseline_project \\\"%s\\\" -f \\\"%%displayname\\\" -u\" % self.objectname)\r\n if result.output.strip().endswith('does not have a baseline project.'):\r\n return None\r\n self._baseline = self._session.create(result.output)\r\n _logger.debug('baseline: %s' % self._baseline)\r\n return self._baseline", "def _GetBaseline(self, filename, directory, upstream_only = False):\n\n local_filename = os.path.join(directory, filename)\n local_directory = local_filename[:local_filename.rfind(\"/\")]\n if upstream_only:\n last_index = local_filename.rfind(\".\")\n if last_index > -1:\n local_filename = (local_filename[:last_index] +\n UPSTREAM_IMAGE_FILE_ENDING)\n\n download_file_modifiers = \"\"\n if local_filename.endswith(\".png\"):\n download_file_modifiers = \"b\" # binary file\n\n if not self.dont_download:\n CreateDirectory(local_directory)\n\n local_baseline = None\n url_of_baseline = None\n\n if self.use_local_baselines:\n test_path_key = self._NormalizeBaselineIdentifier(filename)\n dict = self.baseline_dict\n if upstream_only:\n dict = self.webkit_baseline_dict\n if test_path_key in dict:\n local_baseline = dict[test_path_key]\n url_of_baseline = local_baseline\n shutil.copy(local_baseline, local_directory)\n elif self.verbose:\n print \"Baseline %s does not exist in the index.\" % test_path_key\n else:\n index = 0\n possible_files = self._GetPossibleFileList(filename, upstream_only)\n # Download the baselines from the webkit.org site.\n while local_baseline == None and index < len(possible_files):\n local_baseline = self._DownloadFile(possible_files[index],\n local_filename,\n download_file_modifiers,\n True)\n if local_baseline:\n url_of_baseline = possible_files[index]\n index += 1\n\n if not local_baseline:\n if self.verbose:\n print \"Could not find any baseline for %s\" % filename\n else:\n local_baseline = os.path.normpath(local_baseline)\n if local_baseline and self.verbose:\n print \"Found baseline: %s\" % url_of_baseline\n\n return BaselineCandidate(local_baseline, url_of_baseline)", "def baseline(self):\n return self.data[self.data['treatment'] == 'Baseline']", "def read_base_test(base_file):\n with open(base_file) as f:\n contents = f.read()\n return contents", "def get_baseline(token, project_id):\n r = get_project(token, project_id)\n scenarios = r.json()[\"scenarios\"]\n scene = next(item for item in scenarios if item[\"parent\"] is None)\n baseline_id = scene[\"id\"]\n return baseline_id", "def get_baseline(self):\n register = self.__read_register(self.__BASELINE_REG, 2)\n HB = register[0]\n LB = register[1]\n return (HB << 8) | LB", "def load_baseline(self, baseline):\n self.baseline = es.make_baseline(baseline, spacy_model = self.spacy_model, language = self.language)", "def get_next_baseline(self):\r\n\r\n # Optimal path of baselines to be implemented over the finite horizon\r\n optimal_baseline_path = self.get_optimal_baseline_path()\r\n\r\n # Next 'optimal' emissions intensity baseline to implemented for the coming interval\r\n next_baseline = float(optimal_baseline_path[self.model.OMEGA_T.first()])\r\n\r\n return next_baseline", "def get_baseline(npix,nside_subpatch,QU):\n return libcurvedsky.utils.get_baseline(npix,nside_subpatch,QU)", "def _absolute_baseline_path(self, platform_dir):\n return self._filesystem.join(self.web_tests_dir(), 'platform',\n platform_dir)", "def fallback_expected_filename(self, test_name, extension):\n baselines = self.expected_baselines(\n test_name, extension, all_baselines=True)\n if len(baselines) < 2:\n actual_test_name = self.lookup_virtual_test_base(test_name)\n if actual_test_name:\n if len(baselines) == 0:\n return self.fallback_expected_filename(\n actual_test_name, extension)\n # In this case, baselines[0] is the current baseline of the\n # virtual test, so the first base test baseline is the fallback\n # baseline of the virtual test.\n return self.expected_filename(\n actual_test_name, extension, return_default=False)\n return None\n\n baseline_dir, baseline_filename = baselines[1]\n if baseline_dir:\n return self._filesystem.join(baseline_dir, baseline_filename)\n return None", "def _read_baseline(self, path):\n base_rmsd = dict()\n fin = open(path,'r')\n for line in fin:\n if line == '\\s' or line == '' or line == '\\n':\n continue\n k, v = line.split()\n base_rmsd[k.strip()] = float(v.strip())\n return base_rmsd", "def get_patch_baseline(self, BaselineId: str) -> Dict:\n pass", "def _get_baseline(self):\n return Point(self.x, self.y)", "def avgBaseline():\n return aBaseline", "def bbl_file(self, base_file):\n bbl_path = os.path.abspath(os.path.splitext(base_file)[0]) + '.bbl'\n return self.open_encode_safe(bbl_path).readlines()", "def _get_filebase(self) -> str:\n basename = os.path.basename(self.wavfile)\n filebase = basename.split('.')[0]\n # self.filebase = filebase\n\n return filebase", "def baseline(self: T) -> T:\n return self.with_prefix(Prefix.BASELINE)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of all the cells in the grid. We start increasing x first, i.e. 0th cell is the first cell, 1cell is the one with the next x in the list and y unchanged, .... Return array An array of size n_cells n_dims.
def cells_list(self): xx, yy = np.meshgrid(self.x_spacings, self.y_spacings) return np.vstack([yy.ravel(), xx.ravel()]).transpose()
[ "def living_cells(self):\n cells = [(i,j) for i in range(self.x_dim)\n for j in range(self.y_dim) if self.grid[i][j] == 1]\n return zip(*cells)", "def make_grid():\n return [[make_cell(x, y) for y in range(3)] for x in range(3)]", "def create_cells_from_dims(num_verts_x: int, num_verts_y: int):\n num_cells_x = num_verts_x - 1\n num_cells_y = num_verts_y - 1\n num_cells = num_cells_x*num_cells_y\n cell_array = np.zeros((num_cells, 4), dtype=int)\n cell_num = 0\n\n # I am sure this could be done in a more efficient way.\n for y_cell in range(num_cells_y):\n for x_cell in range(num_cells_x):\n cell_array[cell_num, 0] = x_cell + num_verts_x*y_cell\n cell_array[cell_num, 1] = cell_array[cell_num, 0] + 1\n cell_array[cell_num, 2] = cell_array[cell_num, 0] + num_verts_x + 1\n cell_array[cell_num, 3] = cell_array[cell_num, 0] + num_verts_x\n cell_num += 1\n\n return cell_array", "def get_adjacent_cells(self, cell):\n cells = []\n if cell.x < self.grid_width-1:\n cells.append(self.get_cell(cell.x+1, cell.y))\n if cell.y > 0:\n cells.append(self.get_cell(cell.x, cell.y-1))\n if cell.x > 0:\n cells.append(self.get_cell(cell.x-1, cell.y))\n if cell.y < self.grid_height-1:\n cells.append(self.get_cell(cell.x, cell.y+1))\n return cells", "def cells(self):\n manager = self._get_manager()\n my_cells = manager.cell_from_pin(self)\n # TODO: take subpath (c[1]) into account? construct some kind of proxy?\n if my_cells is None:\n return []\n my_cells = [c[0] for c in my_cells]\n return mycells", "def cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n part = Partition(list(self))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def construct_a_grid():\n grid_number = 100\n x = np.linspace(-2,1,grid_number)\n y = np.linspace(-1.5,1.5,grid_number)\n \n c = []\n \n for i in range(grid_number):\n row = []\n for j in range(grid_number):\n C_element = x[i]+1j*y[j]\n row.append(C_element)\n c.append(row)\n c = np.array(c)\n return c", "def all_cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.circle_star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def cells(self):\n for y, row in enumerate(self._grid):\n for x, c in enumerate(row):\n yield(Cell(self, x, y, c))", "def __init_visible_cells(self) -> List[List[int]]:\n visible_cells = []\n for _ in range(0, self.labyrinth_width):\n row = []\n for _ in range(0, self.labyrinth_height):\n row.append(0)\n visible_cells.append(row)\n\n return visible_cells", "def traverse_grid(self, start_cell, direction, num_steps):\n elements = []\n\n for step in range(num_steps):\n row = start_cell[0] + step * direction[0]\n col = start_cell[1] + step * direction[1]\n elements.append(self._grid[row][col])\n\n return elements", "def get_neighbours(self, cell: Position) -> Iterable[Position]:\n x, y = cell\n\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n ]", "def cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def get_adjcells(self,cell):\n adj_cells = []\n cells_xy = []\n if cell.x > 0:\n adj_cells.append(self.cell_array.item((cell.x-1,cell.y)))\n if cell.x < self.grid_size - 1:\n adj_cells.append(self.cell_array.item((cell.x+1,cell.y)))\n if cell.y > 0:\n adj_cells.append(self.cell_array.item((cell.x,cell.y-1)))\n if cell.y < self.grid_size - 1:\n adj_cells.append(self.cell_array.item((cell.x,cell.y+1)))\n return adj_cells", "def get_neighbors(grid, x, y):\n out = []\n if x > 0:\n out.append(grid[x-1, y])\n if y > 0:\n out.append(grid[x, y-1])\n if y < grid.shape[1] - 1:\n out.append(grid[x, y+1])\n if x < grid.shape[0] - 1:\n out.append(grid[x+1, y])\n return out", "def get_grid(self):\n\t\txvec = numpy.linspace(self.x_lower, self.x_upper, self.nx + 1)\n\t\tyvec = numpy.linspace(self.y_lower, self.y_upper, self.ny + 1)\n\t\t\n\t\t(xgrid, ygrid) = numpy.meshgrid(xvec, yvec)\n\t\t\n\t\treturn (xgrid, ygrid, self.data)", "def GLDAS025Cellgrid():\n return GLDAS025Grids(only_land=False)", "def get_grid_cells(self, featmap_size, scale, stride, offset=0):\n cell_size = stride * scale\n h, w = featmap_size\n x_range = (np.arange(w, dtype=np.float32) + offset) * stride\n y_range = (np.arange(h, dtype=np.float32) + offset) * stride\n x, y = np.meshgrid(x_range, y_range)\n y = y.flatten()\n x = x.flatten()\n grid_cells = np.stack(\n [\n x - 0.5 * cell_size, y - 0.5 * cell_size, x + 0.5 * cell_size,\n y + 0.5 * cell_size\n ],\n axis=-1)\n return grid_cells", "def get_neighbour_cells(position: tuple) -> list:\n cells = []\n y_pos = position[0]\n x_pos = position[1]\n cells.append([x_pos + 1, y_pos])\n cells.append([x_pos + 1, y_pos + 1])\n cells.append([x_pos, y_pos + 1])\n cells.append([x_pos - 1, y_pos + 1])\n cells.append([x_pos - 1, y_pos])\n cells.append([x_pos - 1, y_pos - 1])\n cells.append([x_pos, y_pos - 1])\n cells.append([x_pos + 1, y_pos - 1])\n return cells" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrapper to run Praat 'To Textgrid (silences)' function.
def detect_silences(sound, sil_threshold, sil_duration): textgrid = call(sound, 'To TextGrid (silences)', 100, 0.0, sil_threshold, sil_duration, 0.1, 'silence', 'speech') return textgrid
[ "def toTextToolMode(self):\n\n self.log.debug(\"Entered toTextToolMode()\")\n\n # Only do something if it is not currently in this mode.\n if self.toolMode != PriceBarChartWidget.ToolMode['TextTool']:\n self.toolMode = PriceBarChartWidget.ToolMode['TextTool']\n self.graphicsView.toTextToolMode()\n\n self.log.debug(\"Exiting toTextToolMode()\")", "def toTextToolMode(self):\n\n self.log.debug(\"Entered toTextToolMode()\")\n\n # Only do something if it is not currently in this mode.\n if self.toolMode != \\\n PriceBarChartGraphicsView.ToolMode['TextTool']:\n\n self.toolMode = \\\n PriceBarChartGraphicsView.ToolMode['TextTool']\n\n self.setCursor(QCursor(Qt.ArrowCursor))\n self.setDragMode(QGraphicsView.NoDrag)\n\n # Clear out internal working variables.\n self.textGraphicsItem = None\n\n scene = self.scene()\n if scene != None:\n scene.clearSelection()\n\n items = scene.items()\n for item in items:\n self.setGraphicsItemFlagsPerCurrToolMode(item)\n \n self.log.debug(\"Exiting toTextToolMode()\")", "def render_ents(self, text, spans, title):\n ...", "def __init__ (self,win,text='Press a key to continue',**kwargs):\n\n self.win = win\n \n self.text = visual.TextStim(win,text=text,**kwargs)", "def make_silence_phones_txt(self):\n raise NotImplementedError", "def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = handle_emojis(text)\n text = clean_number(text)\n text = spacing_punctuation(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n #text = stop(text)# if changing this, then chnage the dims \n #(not to be done yet as its effecting the embeddings..,we might be\n #loosing words)...\n return text", "def do(text):\n return freeling_stemming(text)", "def _yield_text_checks(name, Visualizer):\n pass", "def make_feedme_text():", "def cut_item_texts(self, arrays=None):\n if not arrays: arrays = self.masks()\n for a in arrays:\n for item in self.sources(a):\n i = self._meta['columns'][item]\n for tk in self.valid_tks:\n text = self.text(item, True, tk)\n if text: i['text'][tk] = text\n for ed in ['x', 'y']:\n if i['text'].get('{} edits'.format(ed)):\n for tk in self.valid_tks:\n text = self.text(item, True, tk, ed)\n if text: i['text']['{} edits'.format(ed)][tk] = text\n return None", "def text(text = 'abcd', size = 10, justify = 'left', layer = 0, font = \"DEPLOF\"):\n t = Device('text')\n xoffset = 0\n yoffset = 0\n\n face = font\n if face == \"DEPLOF\":\n scaling = size/1000\n\n for line in text.split('\\n'):\n l = Device(name = 'textline')\n for c in line:\n ascii_val = ord(c)\n if c == ' ':\n xoffset += 500*scaling\n elif (33 <= ascii_val <= 126) or (ascii_val == 181):\n for poly in _glyph[ascii_val]:\n xpts = np.array(poly)[:, 0]*scaling\n ypts = np.array(poly)[:, 1]*scaling\n l.add_polygon([xpts + xoffset, ypts + yoffset],\n layer = layer)\n xoffset += (_width[ascii_val] + _indent[ascii_val])*scaling\n else:\n valid_chars = '!\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~µ'\n warnings.warn('[PHIDL] text(): Warning, some characters ignored, no geometry for character \"%s\" with ascii value %s. ' \\\n 'Valid characters: %s' % (chr(ascii_val), ascii_val, valid_chars))\n t.add_ref(l)\n yoffset -= 1500*scaling\n xoffset = 0\n else:\n from .font import _get_font_by_name, _get_font_by_file, _get_glyph\n\n # Load the font\n # If we've passed a valid file, try to load that, otherwise search system fonts\n font = None\n if (face.endswith(\".otf\") or face.endswith(\".ttf\")) and os.path.exists(face):\n font = _get_font_by_file(face)\n else:\n try:\n font = _get_font_by_name(face)\n except ValueError:\n pass\n if font is None:\n raise ValueError(('[PHIDL] Failed to find font: \"%s\". ' +\n 'Try specifying the exact (full) path to the .ttf or .otf file. ' +\n 'Otherwise, it might be resolved by rebuilding the matplotlib font cache') % (face))\n\n # Render each character\n for line in text.split('\\n'):\n l = Device('textline')\n xoffset = 0\n for letter in line:\n letter_dev = Device(\"letter\")\n letter_template, advance_x = _get_glyph(font, letter)\n for poly in letter_template.polygons:\n letter_dev.add_polygon(poly.polygons, layer=layer)\n ref = l.add_ref(letter_dev)\n ref.move(destination=(xoffset, 0))\n ref.magnification = size\n xoffset += size*advance_x\n\n ref = t.add_ref(l)\n ref.move(destination=(0, yoffset))\n yoffset -= size\n\n justify = justify.lower()\n for l in t.references:\n if justify == 'left': pass\n if justify == 'right': l.xmax = 0\n if justify == 'center': l.move(origin = l.center,\n destination = (0, 0), axis = 'x')\n\n t.flatten()\n return t", "def prepare_text(document):\n text_processing = textacy.preprocess_text(\n nlp(document).text.replace('-',' ').replace('\\n',''),\n fix_unicode=True,\n lowercase=True,\n transliterate=False,\n no_urls=False,\n no_emails=False,\n no_phone_numbers=False,\n no_numbers=True,\n no_currency_symbols=True,\n no_punct=True,\n no_contractions=True,\n no_accents=True\n )\n prepared_text = nlp(text_processing)\n print ('cleaning text...')\n return (prepared_text)", "def CreateTextOutlines(text, font, textHeight, textStyle, closeLoops, plane, smallCapsScale, tolerance, multiple=False):\n url = \"rhino/geometry/curve/createtextoutlines-string_string_double_int_bool_plane_double_double\"\n if multiple: url += \"?multiple=true\"\n args = [text, font, textHeight, textStyle, closeLoops, plane, smallCapsScale, tolerance]\n if multiple: args = list(zip(text, font, textHeight, textStyle, closeLoops, plane, smallCapsScale, tolerance))\n response = Util.ComputeFetch(url, args)\n response = Util.DecodeToCommonObject(response)\n return response", "def FlashyText(win,center,text,timing):\n winner = Text(center,text)\n winner.setFace(\"arial\")\n winner.setFill(\"black\")\n winner.setSize(30)\n for i in range(1,6):\n time.sleep(timing)\n if i % 2 == 0:\n winner.draw(win)\n else:\n winner.undraw()", "async def outline_text(draw_surface, coords, draw_text, font):\n draw = partial(draw_surface.text, text=draw_text, font=font,\n fill=\"black\")\n for offset_pair in product(range(-1, 2), repeat=2):\n draw((coords[0]+offset_pair[0], coords[1]+offset_pair[1]))\n draw(coords, fill=\"white\")", "def addText(self,**opts):\n\t\ttext_tag = self.svg_dom.createElement(\"text\")\n\t\ttext_tag.setAttribute(\"id\",str(opts.get(\"id\",\"\"))+\"_text\")\n\t\ttext_tag.setAttribute(\"x\",str(opts.get(\"x\",\"0\")))\n\t\ttext_tag.setAttribute(\"y\",str(opts.get(\"y\",\"0\")))\n\t\t# text_tag.setAttribute(\"dominant-baseline\",\"mathematical\")\n\t\ttext_tag.setAttribute(\"style\", self.svg_current_font)\n\t\tself.svg_current_layer.appendChild(text_tag)\n \n\t\t# Generator\n\t\tlines = extractRTFString(opts[\"rtftext\"])\n\t\tfont_info = opts.get(\"fontinfo\",None)\n\t\tif font_info is not None:\n\t\t\tfont_height = int(font_info.get(\"Size\"))\n\t\telse:\n\t\t\tfont_height = 12\n\t\ttotal_height = 0.0\n\t\tfor span in lines:\n\t\t\ttotal_height += float(span[\"style\"].get(\"font-size\",\"%.1fpx\"%font_height)[:-2])\n\t\ty_diff = float(opts.get(\"y\", \"12.0\")) + opts.get(\"height\",0)/2 -total_height/2\n\t\tline_id=opts[\"id\"]\n\t\tlinenb = 0\n\t\tfor span in lines:\n\t\t\ty_diff+= float(span[\"style\"].get(\"font-size\",\"%.1fpx\"%font_height)[:-2])\n\t\t\tlinenb+=1\n\t\t\topts[\"id\"]=str(line_id)+\"_line\"+str(linenb)\n\t\t\tself.addLine(text_tag,text = span[\"string\"], style = span[\"style\"],\\\n\t\t\t\t\t\ty_pos=y_diff, line_height =font_height, **opts)\n\t\tself.__bb_box(opts.get(\"x\", \"0\"), opts.get(\"y\", \"0\"), opts.get(\"width\", \"0\"), opts.get(\"height\", \"0\"))", "def args_batch_to_text(args_batch: ArgsBatch) -> Text:\n lines = []\n for args in args_batch:\n lines.append('; '.join(str(a) for a in args))\n return '\\n'.join(lines)", "def breakText(self, text):\n\t\tlines= [];\n\t\tcurrentLength= 1;\n\t\twhile text and currentLength < len(text):\n\t\t\tnewText= text[:currentLength+1];\n\t\t\tnewWidth, newHeight= self.font.size(newText);\n\t\t\tif (newWidth > geom['subtitle'].width):\n\t\t\t\tline, text= self.splitText(text, currentLength);\n\t\t\t\tlines.append(line);\n\t\t\telse:\n\t\t\t\tcurrentLength+= 1;\n\t\tif text: lines.append(text);\n\t\treturn lines;", "def mytext(x,y,s,**kwargs):\n # we take care of this one\n model = kwargs.pop('model', None)\n if model:\n th = text(x,y,model,**kwargs)\n draw()\n x0,y0,w,h = th.get_window_extent().bounds\n gca().texts.remove(th)\n x = x0\n y = y0\n kwargs['transform'] = matplotlib.transforms.IdentityTransform()\n kwargs['horizontalalignment'] = 'left'\n kwargs['verticalalignment'] = 'baseline'\n# print x,y,kwargs\n return text(x,y,s,**kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves chunked speech intervals as WAV file.
def save_chunks(chunk_sound, out_path, video_id): chunk_start_ms = int(chunk_sound.get_start_time()*1000) chunk_end_ms = int(chunk_sound.get_end_time()*1000) chunk_duration = chunk_end_ms - chunk_start_ms chunk_fn = '{0}_{1}_{2}.wav'.format(video_id, chunk_start_ms, chunk_end_ms) chunk_file_path = path.join(out_path, chunk_fn) chunk_sound.save(chunk_file_path, 'WAV') return {'filename': chunk_fn, 'video_id': video_id, 'start_time': chunk_start_ms, 'end_time': chunk_end_ms, 'duration': chunk_duration}
[ "def save_wav(sounds_arr, wav_file, sample_rate, sound_length):\n nchannels = 1\n sampwidth = 2\n nframes = len(sounds_arr)\n comptype = 'NONE'\n compname = 'not compressed'\n wav_file.setparams((nchannels, sampwidth, sample_rate, nframes, comptype, compname))\n\n for sample in sounds_arr:\n for _ in range(sound_length):\n wav_file.writeframes(struct.pack('h', sample))\n\n wav_file.close()", "def save_speech(self, data, p):\n filename = 'resources/records/output_'+str(int(time.time()))\n #filename = 'resources/record.wav'\n # writes data to WAV file\n data = b''.join(data)\n wf = wave.open(filename, 'wb')\n wf.setnchannels(self.CHANNELS)\n wf.setsampwidth(p.get_sample_size(self.FORMAT))\n wf.setframerate(self.RATE_WAV) \n wf.writeframes(data)\n wf.close()\n return filename", "def save_wav(file_name, signal, fs):\n wavfile.write(file_name, fs, np.int16(signal/np.max(np.abs(signal)) * (2**(16)/2-1)))", "def save_audio(self, name=DEFAULT_OUT_NAME):\n print(\"Saving...\")\n wf = wave.open(name+'.wav', 'wb')\n wf.setnchannels(DEFAULT_CHANNELS)\n wf.setsampwidth(self.audio.get_sample_size(DEFAULT_FORMAT))\n wf.setframerate(DEFAULT_RATE)\n wf.writeframes(b''.join(self.frames))\n wf.close()\n print('Saved')", "def write_wavs(self, clean_id, batch_id, wavs, score, lens):\r\n lens = lens * wavs.shape[1]\r\n record = {}\r\n for i, (cleanid, name, pred_wav, length) in enumerate(\r\n zip(clean_id, batch_id, wavs, lens)\r\n ):\r\n path = os.path.join(self.hparams.MetricGAN_folder, name + \".wav\")\r\n data = torch.unsqueeze(pred_wav[: int(length)].cpu(), 0)\r\n torchaudio.save(path, data, self.hparams.Sample_rate)\r\n\r\n # Make record of path and score for historical training\r\n score = float(score[i][0])\r\n clean_path = os.path.join(\r\n self.hparams.train_clean_folder, cleanid + \".wav\"\r\n )\r\n record[name] = {\r\n \"enh_wav\": path,\r\n \"score\": score,\r\n \"clean_wav\": clean_path,\r\n }\r\n\r\n # Update records for historical training\r\n self.historical_set.update(record)", "def save_audio_file(self):\n\n # has not recorded audio\n if not self.is_audio_record:\n print(\"***you did not set the record flag!\")\n return\n\n import soundfile\n\n # save audio\n soundfile.write('{}out_audio.wav'.format(self.mic_params['plot_path']), self.collector.x_all, self.feature_params['fs'], subtype=None, endian=None, format=None, closefd=True)", "def __save(self,audio):\n self.__openSampleFile()\n self.__sampleFile.writeframes(audio)", "def save_frames_to_wav_file(frames: np.ndarray, sample_rate: int, file_path: str):\n wavfile.write(file_path, sample_rate, np.hstack(frames))", "def save(self):\r\n self.__ensure_dir__(self.dir)\r\n wavfile.write(os.path.join(self.dir, self.filename), self.fs, self.data)", "def save_sound(filename,sound,sample_freq,num_channels):\n #open a wave file in write ('w') mode, this will create the file\n file=wave.open(filename,'w')\n #set the framerate aka sample frequency\n file.setframerate(sample_freq)\n #set the number of the channels\n file.setnchannels(num_channels)\n #the size of the one sample in bytes\n file.setsampwidth(2)\n #write the actual sound to the file, notice the call to get_raw\n file.writeframesraw(sound.get_raw())\n file.close()", "def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')", "def write_wav_many(contin_list, directory):\n suffix = \"wav\"\n writer = write_wav\n utils.write_dir(contin_list, directory, suffix, writer)", "def save_audio(\n magnitudes,\n logdir,\n step,\n sampling_rate,\n n_fft=1024,\n mode=\"train\",\n number=0,\n save_format=\"tensorboard\",\n power=1.5,\n gl_iters=50,\n verbose=True,\n max_normalization=False\n):\n # Clip signal max and min\n if np.min(magnitudes) < 0 or np.max(magnitudes) > 255:\n if verbose:\n print(\"WARNING: {} audio was clipped at step {}\".format(mode.capitalize(), step))\n magnitudes = np.clip(magnitudes, a_min=0, a_max=255)\n signal = griffin_lim(magnitudes.T ** power, n_iters=gl_iters, n_fft=n_fft)\n\n if max_normalization:\n signal /= np.max(np.abs(signal))\n\n if save_format == \"np.array\":\n return signal\n elif save_format == \"tensorboard\":\n tag = \"{}_audio\".format(mode)\n iostream = BytesIO()\n write(iostream, sampling_rate, signal)\n summary = tf.Summary.Audio(encoded_audio_string=iostream.getvalue())\n summary = tf.Summary.Value(tag=tag, audio=summary)\n return summary\n elif save_format == \"disk\":\n file_name = '{}/sample_step{}_{}_{}.wav'.format(logdir, step, number, mode)\n if logdir[0] != '/':\n file_name = \"./\" + file_name\n write(file_name, sampling_rate, signal)\n return None\n else:\n print((\n \"WARN: The save format passed to save_audio was not understood. No \"\n \"sound files will be saved for the current step. \"\n \"Received '{}'.\"\n \"Expected one of 'np.array', 'tensorboard', or 'disk'\"\n ).format(save_format))\n return None", "def split_on_silence_threshold(wav_file, dest_dir):\n # Read the file\n audioSegment = AudioSegment.from_wav(wav_file)\n # Calculating the silence threshold\n # Normalizing the audio file belfore finding the threshold\n full_audio_wav = normalize(audioSegment)\n loudness_ms_list = [] # Save the audio levels of all the chunks\n for ms_chunk in full_audio_wav:\n loudness_ms_list.append(round(ms_chunk.dBFS))\n print(\"Audio levels are recorded\", file=sys.stderr)\n # Using pandas df for easier manipulation\n df = pd.DataFrame(loudness_ms_list)\n df[0] = df[df[0] != float(\"-inf\")] # Remove the very low levels\n st = df[0].mean()\n st = st if st < -16 else -16 # Because -16db is default\n # Splits the audio if silence duration is MSL long\n MSL = 500 # minimum silence length in ms\n chunks = split_on_silence(\n full_audio_wav, \n # split on silences longer than 500ms (500ms)\n min_silence_len=MSL, \n # anything under -16 dBFS is considered silence\n silence_thresh=st, \n # keep 200 ms of leading/trailing silence\n keep_silence=200, \n )\n # Saving all the chunks\n print(\"Writing all the files, this may take some time!\", file=sys.stderr)\n for index, chunk in enumerate(chunks):\n chunk_file_name = os.path.join(dest_dir, \"sample_{}.wav\".format(str(index).zfill(10)))\n print(\"Saving the file to \" + chunk_file_name, file=sys.stderr)\n # You can export as mp3 etc, note that it has dependency on ffmpeg\n chunk.export(chunk_file_name, format=\"wav\")", "def write_wav(contin, filename):\n print filename\n if contin.domain_samples.dimensionality != U_.sec.dimensionality:\n raise NotImplementedError\n\n else:\n with open(filename, 'wb') as wav_file:\n sp.io.wavfile.write(\n wav_file,\n rate=contin.sample_rate.to(U_.Hz).magnitude,\n data=contin.values.magnitude)", "def save_slice(self, start_time, end_time, fileName):\n wavfile.write(fileName, self.sRate, self.get_slice(start_time, end_time))", "def save(self):\n\n if self.__filename == \"\":\n raise ValueError(\"No filename set for this sound.\")\n\n scipy.io.wavfile.write(self.__filename, self.__sample_rate, self.__samples)", "def store_audio_clips(self, audio_filename, out_folder):\n os.makedirs(out_folder, exist_ok=True)\n audio = AudioSegment.from_file(audio_filename)\n for idx, interval in enumerate(self):\n out_path = os.path.join(out_folder, '%s.wav' % idx)\n audio[interval[0]: interval[1]].export(out_path)", "def write_data_to_wav(self, file_name: str, data):\r\n # apply scale and convert to int16\r\n data = np.int16(data/np.max(np.abs(data)) * self.wav_scale)\r\n # write to file\r\n write(file_name, self.audio_sample_rate, data)\r\n print('Sound ', file_name, ' has been saved')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes input ends of all feed pipes and feeds odd numbers starting from low until high both inclusive. in a round robin fashion. process ends by feeding 1 to all pipes. 1 is a sentinel value.
def distributor(ls_feed_pipe_open,low,high): def getNumber(low,high): i = low if i%2 == 0: #if i is even, then start from i+1 odd. i += 1 while i<=high: yield i i+=2 #no need to check for even numbers, so skip it here at begining yield -1 #when generator yields -1, it reached high, so terminate next_pipe = 0 number = getNumber(low,high) while True: msg = next(number) if msg == -1: #to check when generator reached high. break else: #feed pipes in a round robin fashion, #so that over time each generatePrime process experiences same load. ls_feed_pipe_open[next_pipe].send(msg) next_pipe += 1 if next_pipe == len(ls_feed_pipe_open): next_pipe = 0 for p in ls_feed_pipe_open: p.send(-1) #-1 is sentinel value for all generatePrime processs return 0
[ "def odd():\n num = 0\n while True:\n yield num * (num & 1)\n num += 1", "def infinite_odd_generator():\n current = 1\n while True:\n yield current\n current = current + 2", "def skip(head=0, tail=0):\n def process(pipe):\n buf = []\n for i, line in enumerate(pipe):\n if i < head:\n continue\n if tail > len(buf):\n buf.append(line)\n continue\n buf.append(line)\n yield buf.pop(0)\n return process", "def fission_pipes():\n def _pipes(num):\n return [base.BasePipe(i) for i in range(1, num + 1)]\n yield _pipes\n base.reset()", "def compositeOdds() -> int:\n for odd in count(3, 2):\n if not isPrime(odd):\n yield odd", "def pipe(*args):\r\n if len(args) < 2:\r\n raise ValueError(\"pipe needs at least 2 processes\")\r\n\r\n # Set stdout=PIPE in every subprocess except the last\r\n for i in args[:-1]:\r\n i[\"stdout\"] = subprocess.PIPE\r\n\r\n # Runs all subprocesses connecting stdins and stdouts to create the\r\n # pipeline. Closes stdouts to avoid deadlocks.\r\n popens = [popen_sp(**args[0])]\r\n for i in range(1, len(args)):\r\n args[i][\"stdin\"] = popens[i - 1].stdout\r\n popens.append(popen_sp(**args[i]))\r\n popens[i - 1].stdout.close()\r\n\r\n # Returns the array of subprocesses just created\r\n return popens", "def testNumberPipeTwoLines(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('4')\n self.assertFalse(pl.inPipeline)\n self.assertEqual(4, pl.stdin)\n repl.runCommandLine('')\n self.assertEqual(4, pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)", "def generatePrime(ls_primes, feed_pipe,return_dict):\n local_primes = []\n while True:\n n = feed_pipe.recv()\n if n == -1: # sentinel given by distributor.\n break\n else:\n is_prime = True\n\n ##check for divisibility\n ## no need to check for 2 since all are odd numbers\n for prime in ls_primes[1:]:\n if n%prime == 0:\n is_prime = False\n break\n\n ##if the number is prime, append to global list\n if is_prime:\n local_primes.append(n)\n if len(local_primes) >0:\n return_dict[os.getpid()] = local_primes\n return return_dict\n return 0", "def io_pipe():\n r_fd, w_fd = os.pipe()\n with io.open(r_fd, 'rb', 0) as r, \\\n \t io.open(w_fd, 'wb', 0) as w:\n \tyield r, w", "def interleave(p1, p2):\n if p1 == the_empty_stream:\n return p2\n else:\n return cons_stream(stream_car(p1),\n lambda: interleave(p2, stream_cdr(p1)))", "def get_nums():\n num = -1\n while num != 0:\n num = int(input())\n yield num", "def run(self):\n assert len(self.elements) >= 2, \"In order flow, pipe needs 2 or more elements\"\n in_pipe = self.elements[0]\n other_pipes = self.elements[1:-1]\n out_pipe = self.elements[-1]\n\n self.make_assertions(in_pipe, other_pipes, out_pipe)\n\n for data in in_pipe.grasp():\n write = True\n\n for element in other_pipes:\n if isinstance(element, elements.DataPypElement):\n data = element.extend(data)\n elif isinstance(element, elements.FilterPypElement):\n if not element.stay(data):\n write = False\n break\n if write:\n out_pipe.extract(data)", "def pipemeter(cmd1, cmd2):\n\n proc1 = subprocess.Popen(cmd1, bufsize=0, shell=True, stdout=subprocess.PIPE)\n proc2 = subprocess.Popen(cmd2, bufsize=0, shell=True, stdin=subprocess.PIPE)\n bytes_piped = 0\n\n while True:\n data = proc1.stdout.read(CHUNKSIZE)\n length = len(data)\n if length == 0:\n break\n\n written = proc2.stdin.write(data)\n if written != length:\n raise RuntimeError(\"Write failed, wanted to write: {}, written={}\".format(length, written))\n\n bytes_piped += length\n\n proc1.stdout.close()\n proc2.stdin.close()\n\n return proc1.wait(), proc2.wait(), bytes_piped", "def run(self, data, rewrap=False, prefetch=0):\n if rewrap:\n data = [data]\n\n for pipe in self._pipes:\n pipe.feed(data)\n data = pipe\n else:\n iterable = self._prefetch_callable(data, prefetch) if prefetch else data\n for out_data in iterable:\n yield out_data", "def test_1_single_process():\n\n # ********************************************************\n # We will put this function in its own thread in test_1()\n def put_data_in_stream(stream):\n num_steps=5\n step_size=4\n for i in range(num_steps):\n data = list(range(i*step_size, (i+1)*step_size))\n stream.extend(data)\n run()\n return\n\n # ********************************************************\n # We will put these lines in a separate process in test_1()\n x = Stream('x')\n y = Stream('y')\n double(x, y)\n\n # *********************************************************\n # We will put these lines in a separate process in test_1().\n s = Stream(name='s')\n increment(y, s)\n print_stream(s, name=s.name)\n\n # *********************************************************\n # This function is executed in a separate thread in test_1().\n put_data_in_stream(x)", "def testNumberPipeOneLine(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('4 |')\n self.assertAlmostEqual(4, pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)", "def shift(self):\n for pair in self.pipes:\n pair[0].move(self.pipe_speed)\n pair[1].move(self.pipe_speed)", "def task_10_generator_of_simple_numbers() -> Generator[int, None, None]:\n def is_num_simple(n):\n \"\"\"\n Return: True if n is a simple number or False if it is not\n \"\"\"\n for i in range(n, 1, -1):\n if n % i == 0 and i < n and n != 1:\n return False\n return True\n\n # generator part\n n = 2\n while n < 200:\n if is_num_simple(n):\n yield n\n n = n + 1", "def pipe_execution(input_list):\n\n # get indices of |\n pipe_indices = []\n idx = -1\n while True:\n try:\n idx = input_list.index(\"|\", idx + 1)\n pipe_indices.append(idx)\n except ValueError:\n break\n\n # execute each command\n start_ind_command = 0\n stream_io = output_stream()\n for end_ind_command in pipe_indices:\n stream_io = execute_command(input_list, start_ind_command, end_ind_command, stream_io)\n start_ind_command = end_ind_command + 1\n print_func_output(execute_command, input_list, start_ind_command, len(input_list), stream_io)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
will take numbers sequentially from feed_pipe, verify if it is prime. any primes found will be returned as a dict to main process. dict contains only one key value pair. val is always a list.
def generatePrime(ls_primes, feed_pipe,return_dict): local_primes = [] while True: n = feed_pipe.recv() if n == -1: # sentinel given by distributor. break else: is_prime = True ##check for divisibility ## no need to check for 2 since all are odd numbers for prime in ls_primes[1:]: if n%prime == 0: is_prime = False break ##if the number is prime, append to global list if is_prime: local_primes.append(n) if len(local_primes) >0: return_dict[os.getpid()] = local_primes return return_dict return 0
[ "def distributor(ls_feed_pipe_open,low,high):\n def getNumber(low,high):\n i = low\n if i%2 == 0: #if i is even, then start from i+1 odd.\n i += 1\n while i<=high:\n yield i\n i+=2 #no need to check for even numbers, so skip it here at begining\n yield -1 #when generator yields -1, it reached high, so terminate\n\n next_pipe = 0\n number = getNumber(low,high)\n while True:\n msg = next(number)\n if msg == -1: #to check when generator reached high.\n break\n else:\n #feed pipes in a round robin fashion,\n #so that over time each generatePrime process experiences same load.\n ls_feed_pipe_open[next_pipe].send(msg)\n next_pipe += 1\n if next_pipe == len(ls_feed_pipe_open):\n next_pipe = 0\n for p in ls_feed_pipe_open:\n p.send(-1) #-1 is sentinel value for all generatePrime processs\n return 0", "def allprimes():\n\n key = [] # The empty list is initiated\n\n for val in range(2, 101): # Set to obtain all prime values from 2 to 100\n if val >= 2: # They are then stored into the list\n for n in range(2, val): # The values have to be greater than 2 as 1 cannot\n if not (val % n): # be included\n break # Pulls all prime numbers by iterating through them\n else: # If a number does not obtain a remainder that means\n key.append(val) # it cannot be divisable by anything but it's own\n # number it is appended as a prime number\n return key", "def main():\n\n while True:\n input_val = input('Enter the number: ')\n try:\n val = int(input_val)\n break\n except ValueError:\n print('Only Inter value acceptes. Please, Try again.')\n\n if is_prime(val):\n print(f'{val} is prime number.')\n else:\n print(f'{val} is not a prime number.')", "def primesUntil(condition):\n mem = [2]\n while not condition(mem):\n i = mem[len(mem) - 1]\n yield i\n i = i + 1 #saves one loop through mem.\n while any(i % j == 0 for j in mem):\n i = i + 1\n mem = mem + [i]", "def primes():\n yield 2\n\n for n in count(3, 2):\n if is_prime(n):\n yield n", "def primes_by_mod(limit=1000000):\n if (limit is None or 2 < limit):\n yield 2\n\n h=set()\n k = 3\n while limit is None or k < limit:\n if all(map(lambda x: k % x != 0, h)):\n h.add(k)\n yield k\n k += 2", "def primes():\r\n try:\r\n args = request.args\r\n start_num, end_num = validate_request(args)\r\n # cache key\r\n key = f'primes:{start_num}:{end_num}'\r\n rv = cache.get(key)\r\n if rv is None: # not in cache\r\n job = get_primes_list.queue(start_num, end_num)\r\n print(job.get_id())\r\n cache.set(key, job.get_id(), timeout=3600)\r\n return jsonify(job.get_id()), 200\r\n else:\r\n return jsonify(rv), 200\r\n except Exception as e:\r\n raise InvalidUsage(\"Error Processing request {}\".format(e))", "def test_against_poisson_processes(self):\n compare_against_poisson((6.0, ), (66, ))\n compare_against_poisson((19.0, ), (512, ))\n compare_against_poisson((30.0, ), (1024, ))\n \n compare_against_poisson((5.0, 2.3, ), (16, 32, ))\n compare_against_poisson((2.1, 0.9, ), (50, 50, ))\n \n compare_against_poisson((1.1, 3.3, 2.2, ), (12, 12, 12, ))\n compare_against_poisson((7.1, 0.1, 4.2, ), (8, 3, 11, ))", "def simple(val=None,count=None):\n\n assert (val!=None)!=(count!=None)\n\n primes=[2]\n if val:\n # Find all primes up to and including val.\n for c in range(3,val+1,2):\n m=int(sqrt(c))\n for p in primes:\n if p>m:\n primes.append(c)\n break\n if c%p==0:\n break\n elif count:\n # Find the first count primes.\n c=3\n while len(primes)<count:\n m=int(sqrt(c))\n for p in primes:\n if p>m:\n primes.append(c)\n break\n if c%p==0:\n break\n c+=2\n\n return primes", "def find_primes() :\r\n start, end = np.random.randint(20, 70), np.random.randint(100, 150)\r\n \r\n lst = [num for num in range(start, end+1) if is_prime(num)]\r\n return (random.choice(lst), random.choice(lst))", "def get_primes(limit):\n print(f'\\nCalculating primes up to {limit}... ', end='')\n start_time = time()\n limitn = limit+1\n primes = dict()\n for i in range(2, limitn):\n primes[i] = True\n\n for i in primes:\n i_multiples = range(2*i, limitn, i)\n for f in i_multiples:\n primes[f] = False\n print(f'Finsished. Took {time() - start_time} seconds.\\n')\n return sorted([i for i in primes if primes[i] is True]) # Or change to set", "def getPrime(self, group=17):\n default_group = 17\n\n primes = {\n 5: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF,\n 14: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF,\n 15: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF,\n 16: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF,\n 17:\n 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF,\n 18:\n 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF\n }\n\n if group in primes.keys():\n return primes[group]\n else:\n print(\"Error: No prime with group %i. Using default.\" % group)\n return primes[default_group]", "def _calc_primes(self, until):\n # If only odd numbers will be prime, so if an even number is given,\n # check one extra number\n if until % 2 == 0:\n until += 1\n\n change = False\n if until > self.last_number_checked:\n i = self.last_number_checked + 2\n while i <= until:\n isPrime = True\n root = math.sqrt(i)//1\n for iter in self.primes:\n if iter > root:\n break\n elif i % iter == 0:\n isPrime = False\n if isPrime:\n self.primes.append(i)\n change = True\n i += 2\n\n self.last_number_checked = until\n return change", "def is_prime(val):\n if val == 2:\n return True\n if val % 2 == 0:\n return False # Checking this makes this much more efficient\n for i in xrange(2, int(math.sqrt(val)) + 1):\n if val % i == 0:\n return False\n return True", "def get_primes(count):\r\n i=1\r\n result=[]\r\n while count>0:\r\n r=0\r\n i+=1\r\n for j in range(1,i+1):\r\n if i%j==0:\r\n r+=1\r\n if r==2:\r\n result.append(i)\r\n count-=1\r\n\r\n return result", "def primes():\n\ti = 0\n\twhile i < sys.maxint - 2:\n\t\tyield cachedPrime[i]\n\t\ti += 1", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def main():\n max_value = int(input(\"Display primes up to what value? \"))\n for value in range(2, max_value + 1):\n if is_prime(value): # See if value is prime\n print(value) # Display the prime number ", "def gen_prime():\r\n yield 2\r\n num = 3\r\n while True:\r\n if is_prime(num):\r\n yield num\r\n\r\n num += 2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reject unsuported chain parts
def _select_simple_chainparts(chain_parts): for cp in chain_parts: if reject_substr_res.search(cp['chainPartName']): return False return True
[ "def eliminate_equivalents(chains):\n cleaned_chains = list(chains)\n\n for i1 in range(len(chains)):\n print '## progress: %d/%d' % (i1, len(chains))\n for i2 in range(i1 + 1, len(chains)):\n if not chains[i1] in cleaned_chains:\n continue\t\n if not chains[i2] in cleaned_chains:\n continue\n\n chain1 = create_chain(index_to_typelist(chains[i1]))\n chain2 = create_chain(index_to_typelist(chains[i2]))\n if equivalent_under_permutation(chain1, chain2):\n cleaned_chains.remove(chains[i2])\n print '## XXXXXXX'\n\n return cleaned_chains", "def breakBadPairs(self, seq):\n pairs = seq.Alphabet.Pairs\n self.expandAll()\n for i in self.traverse():\n if i.IsPaired:\n if not (seq[i.Start], seq[i.End]) in pairs:\n i.unpair()", "def _filter_committees_failing_weak_representation(self, profile: list[set[int]], committees: list[list[int]]) -> list[list[int]]:\n unique_approval_scores = self._compute_unique_approval_scores(profile)\n parties_deserving_representation = {party for party in self.parties if unique_approval_scores[party] >= self.n / self.k}\n possible_committees = [committee for committee in committees if parties_deserving_representation.issubset(set(committee))]\n return possible_committees", "def test_missing_required_subcomponent(self):\n msg = self._create_message(self.rsp_k21)\n msg.pid.pid_2.cx_4 = '&yyy&zzz' # it misses the required cwe_1\n # del msg.pid.pid_3.cx_10.cwe_1\n self.assertRaises(ValidationError, msg.validate, report_file=self.report_file)\n self._test_report_file('ERROR')", "def test_blind_sig_chain_wrong_msg(self): # pylint: disable=too-many-locals\n\n test_levels = 4\n msg = os.urandom(1024)\n fake_msg = os.urandom(1024)\n\n ca = ECCBlind()\n signer_obj = ca\n\n output = bytearray()\n\n for level in range(test_levels):\n if not level:\n output.extend(ca.pubkey())\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n child_obj = ECCBlind()\n point_r = signer_obj.signer_init()\n pubkey = child_obj.pubkey()\n\n if level == test_levels - 1:\n msg_blinded = requester_obj.create_signing_request(point_r,\n msg)\n else:\n msg_blinded = requester_obj.create_signing_request(point_r,\n pubkey)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n if level != test_levels - 1:\n output.extend(pubkey)\n output.extend(signature)\n signer_obj = child_obj\n verifychain = ECCBlindChain(ca=ca.pubkey(), chain=str(output))\n self.assertFalse(verifychain.verify(fake_msg, 1))", "def remove_incompatible_operations(pipelines):\n\n def find_duplicates(pipelines):\n for idx in range(len(pipelines)):\n for idx_ in range(idx + 1, len(pipelines)):\n if pipelines[idx] == pipelines[idx_]:\n return idx\n return -1\n\n\n def _remove_illegal_combination(pipelines, combination):\n illegal_pipes = []\n pipelines_ = []\n for idx, pipeline in enumerate(pipelines):\n combination_ = list(set.intersection(set(pipeline.keys()), set(combination)))\n actives = [pipeline[key] != None for key in pipeline if key in combination_]\n\n if sum(actives) > 1:\n illegal_pipes.append(idx) # Store the index of bad combination\n for param in combination_: # Generate substituting legal combinations\n if pipeline[param] != None: # we need to make new pipe\n pipeline_ = pipeline.copy()\n for param_ in combination_: # Set ALL conflicting parameters to None\n pipeline_[param_] = None\n pipeline_[param] = pipeline[param] # Set current parameter back to original value\n pipelines_.append(pipeline_)\n\n new_pipelines = [i for j, i in enumerate(pipelines) if j not in illegal_pipes]\n # new_pipelines.extend(pipelines_)\n return new_pipelines, pipelines_\n\n illegal_combinations = [['BASELINE', 'MSC', 'EMSC', 'RNV', 'SNV', 'LSNV'],\n ['SMOOTH', 'SAVGOL']]\n\n for combination in illegal_combinations:\n pipelines, new_pipes = _remove_illegal_combination(pipelines, combination)\n\n pipelines.extend(new_pipes)\n pipelines_set = {json.dumps(pipeline, sort_keys=True) for pipeline in pipelines}\n pipelines = [json.loads(item) for item in pipelines_set]\n\n\n return pipelines", "def resolve_conflicts(self):\n\n neighbours = self.nodes\n new_chain = None\n\n # Storing the old chain before comparison\n old_chain = self.chain\n\n # We're only looking for chains longer than ours\n max_length = len(self.chain)\n\n # Grab and verify the chains from all the nodes in our network\n for node in neighbours:\n response = requests.get(f'https://{node}/chain', verify=False)\n\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n # Check if the length is longer and the chain is valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n\n # Replace our chain if we discovered a new, valid chain longer than ours\n if new_chain:\n self.chain = new_chain\n\n print(old_chain)\n print(new_chain)\n\n return True\n\n return False", "def test_unrequired_chain_delete(self):\n self.txn.store_delete(\"felix-c\")\n self.assertEqual(self.txn.affected_chains, set([\"felix-c\"]))\n self.assertEqual(self.txn.chains_to_stub_out, set([]))\n self.assertEqual(self.txn.chains_to_delete, set([\"felix-c\"]))\n self.assertEqual(self.txn.referenced_chains,\n set([\"felix-b\", \"felix-stub\"]))\n self.assertEqual(\n self.txn.prog_chains,\n {\n \"felix-a\": [],\n \"felix-b\": [],\n })\n self.assertEqual(self.txn.required_chns,\n {\"felix-a\": set([\"felix-b\", \"felix-stub\"])})\n self.assertEqual(self.txn.requiring_chns,\n {\"felix-b\": set([\"felix-a\"]),\n \"felix-stub\": set([\"felix-a\"])})", "def extract_mixed_chains(raw_chains):\n chain_isolation_regex = re.compile(r'^\\w+\\s+\\d+\\s+(.*)')\n\n mixed_chains = [\n re.search(chain_isolation_regex,\n raw_chain).group(1).strip() # remove whitespace\n for raw_chain in raw_chains\n ]\n return mixed_chains", "def valid_chain(chain):\n\n for i in range(len(chain) - 1):\n parent_edge = chain[i]\n child_edge = chain[i + 1]\n # verify that the child of the parent edge (second node) matches the parent of the child edge (first node)\n if not parent_edge[1] == child_edge[0]:\n # if this isn't\n return False\n return True", "def test_reject_choice_cycle(orchestra: OrchestraShim):\n # Print unsolved graph\n orchestra(\"graph\", \"-b\", \"component_cyclic_C\")\n\n # Print solved graph\n with pytest.raises(Exception):\n orchestra(\"graph\", \"-b\", \"-s\", \"component_cyclic_C\")\n\n # Install\n with pytest.raises(Exception):\n orchestra(\"install\", \"-b\", \"component_cyclic_A\")", "def resolve_conflicts(self):\r\n\t\tneighbours = self.nodes\r\n\t\tnew_chain = None\r\n\r\n\t\t# We're only looking for chains longer than ours\r\n\t\tmax_length = len(self.chain)\r\n\r\n\t\t# Grab and verify the chains from all the nodes in our network\r\n\t\tfor node in neighbours:\r\n\t\t\tresponse = requests.get(f'http://{node}/chain')\r\n\r\n\t\t\tif response.status_code == 200:\r\n\t\t\t\tlength = response.json()['length']\r\n\t\t\t\tchain = response.json()['chain']\r\n\r\n\t\t\t\t# Check if the length is longer and the chain is valid\r\n\t\t\t\tif length > max_length and self.valid_chain(chain):\r\n\t\t\t\t\tmax_length = length\r\n\t\t\t\t\tnew_chain = chain\r\n\r\n\t\t# Replace our chain if we've discovered a new, valid chain, longer then ours\r\n\t\tif new_chain:\r\n\t\t\tself.chain = new_chain\r\n\t\t\treturn True\r\n\r\n\t\treturn False", "def route_rejected(self, prefix, next_hop, as_path):", "def test_fail_missing_signature_fragment_underflow(self):\n # Adjust bundle balance, since we will also remove the change\n # transaction.\n self.bundle[0].value += self.bundle[-1].value\n\n # Remove the last input's second signature fragment, and the change\n # transaction.\n del self.bundle.transactions[-2:]\n for txn in self.bundle:\n txn.last_index -= 2\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Transaction 4 has invalid signature (using 2 fragments).',\n ],\n )", "def inspect_chain(chain):\n next_key = chain.pop('BEGIN')\n while True:\n try:\n next_key = chain.pop(next_key)\n if next_key == \"END\":\n break\n except KeyError:\n return \"BAD\"\n\n if len(chain) > 0:\n return \"BAD\"\n return \"GOOD\"", "def chain_rangeValid(start, stop):\r\n for i in range(start, stop):\r\n chain = chain_153(i)\r\n if len(chain) > 1 or chain[0] == 153:\r\n for j in chain_153(i):\r\n print(j)", "def fixup(self):\n # remove edges that point to invalid children\n for bp in self.bodyparts:\n edges_to_remove = []\n for e in bp.edges:\n if e.child not in self.bodyparts:\n #bp.edges.remove(e)\n edges_to_remove.append(e)\n for e in edges_to_remove:\n bp.edges.remove(e)\n # make sure root exists\n if self.root not in self.bodyparts or len([x for x in self.bodyparts if x.isRoot == 1]) != 1:\n # randomly select the root node\n for b in self.bodyparts:\n b.isRoot = 0\n self.root = random.choice(self.bodyparts)\n self.root.isRoot = 1\n assert len([x for x in self.bodyparts if x.isRoot == 1]) == 1\n # remove input_map entries that are invalid\n for bp in self.bodyparts:\n if bp.input_map:\n # we need to keep a list and erase at the end otherwise we fall into\n # the trap of removing items for a mutable list whilst iterating\n # over it\n for (tneuron, srclist) in bp.input_map.items():\n if tneuron not in bp.network.inputs:\n del bp.input_map[tneuron]\n else:\n for (sbp, sneuron, w) in srclist[:]:\n if sbp not in self.bodyparts or sneuron not in sbp.network:\n srclist.remove((sbp, sneuron, w))\n for bp in self.bodyparts:\n if bp.input_map:\n for (tneuron, srclist) in bp.input_map.items():\n for (sbp, sneuron, w) in srclist:\n assert sbp in self.bodyparts\n\n # check whether input_map entries are still valid\n for bp in self.bodyparts:\n if bp.input_map:\n krm = []\n for k in bp.input_map.keys():\n if k not in self.bodyparts:\n krm.append(k)\n else:\n # key is valid\n toremove = []\n for (sbp, sig, w) in bp.input_map[k]:\n # check sbp is ok and src is a string or output node\n if sbp not in self.bodyparts or (isinstance(sig, node.Node) and sig not in sbp.network.outputs):\n toremove.append((sbp, sig, w))\n for x in toremove:\n bp.input_map[k].remove(x)\n for k in krm:\n del bp.input_map[k]\n\n self.connectInputNodes()\n self.sanityCheck()", "def test_blind_sig_chain_wrong_ca(self): # pylint: disable=too-many-locals\n\n test_levels = 4\n msg = os.urandom(1024)\n\n ca = ECCBlind()\n fake_ca = ECCBlind()\n signer_obj = fake_ca\n\n output = bytearray()\n\n for level in range(test_levels):\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n child_obj = ECCBlind()\n if not level:\n # unlisted CA, but a syntactically valid pubkey\n output.extend(fake_ca.pubkey())\n point_r = signer_obj.signer_init()\n pubkey = child_obj.pubkey()\n\n if level == test_levels - 1:\n msg_blinded = requester_obj.create_signing_request(point_r,\n msg)\n else:\n msg_blinded = requester_obj.create_signing_request(point_r,\n pubkey)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n if level != test_levels - 1:\n output.extend(pubkey)\n output.extend(signature)\n signer_obj = child_obj\n verifychain = ECCBlindChain(ca=ca.pubkey(), chain=str(output))\n self.assertFalse(verifychain.verify(msg, 1))", "def complementary_chain(self, chain):\n # We supose that every complex has two chain childs\n for chain_item in self.chain_dict.values():\n if chain_item != chain:\n return chain_item\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Marshal information deom the selected chainParts to create a 'simple_partition' label.
def _make_simple_partition_label(chain_dict): cps = chain_dict['chainParts'] if not (_select_simple_chainparts(cps)): raise NotImplementedError( 'chain fails substring selection: not "simple": %s' % ( chain_dict['chainName'])) label = 'simplepartition([' for cp in cps: smcstr = str(cp['smc']) if smcstr == 'nosmc': smcstr = '' for i in range(int(cp['multiplicity'])): # condition_str = '(%set,%s,%s)' % (str(cp['threshold']), # str(cp['etaRange']), # smcstr,) condition_str = '(%set,%s' % (str(cp['threshold']), str(cp['etaRange']),) if smcstr: condition_str += ',%s)' else: condition_str += ')' label += condition_str label += '])' return label
[ "def provide_partition_info(self):\n self.partition_info = True", "def FormatPartition(self, partition):\n\n fstab = self.fstab\n if fstab:\n p = fstab[partition]\n self.script.append('format(\"%s\", \"%s\", %s, \"%s\", \"%s\");' %\n (p.fs_type, common.PARTITION_TYPES[p.fs_type],\n self._GetSlotSuffixDeviceForEntry(p),\n p.length, p.mount_point))", "def x_write_partition(part, opts=\"\"):\n ui.sendsignal(\"&makelive&\", False, part, 'n' not in opts, 'l' in opts)\n # + iso? + partition + format? + larchboot?", "def _get_parts(self, parts: List) -> None:\n if len(parts) != 2:\n return\n\n range_aa_pos = self.get_aa_pos_range(parts)\n if range_aa_pos:\n self.parts[\"start_aa_flank\"] = range_aa_pos[0]\n self.parts[\"end_aa_flank\"] = range_aa_pos[1]\n self.parts[\"start_pos_flank\"] = range_aa_pos[2]\n self.parts[\"end_pos_flank\"] = range_aa_pos[3]\n self.parts[\"used_one_letter\"] = range_aa_pos[4]\n self.parts[\"inserted_sequence\"] = self.get_protein_inserted_sequence(\n parts, self.parts[\"used_one_letter\"]\n )", "def create(self, disk):\n logging.info('Adding type %d partition to disk image: %s' % (self.type, disk.filename))\n run_cmd('parted', '--script', '--', disk.filename, 'mkpart', 'primary', self.parted_fstype(), self.begin, self.end)", "def print_partition(t, par=[]):\n\n if is_leaf(t):\n if label(t) == True:\n print(' + '.join(par))\n else:\n left, right = branches(t)[0], branches(t)[1]\n print_partition(left, [str(label(t))] + par)\n print_partition(right, par)\n #print(\"total partitions: \", str(count_leaves(t)))", "def _get_parts(self, parts: List) -> None:\n raise NotImplementedError", "def export(self):\n keys = self._get_fields()\n keys.extend(('ref_prefix', 'num_units', 'fplist', 'do_erc', 'aliases', 'pin', 'footprint'))\n attribs = []\n attribs.append('{}={}'.format('name', repr(self.name)))\n attribs.append('dest=TEMPLATE')\n attribs.append('tool=SKIDL')\n for k in keys:\n v = getattr(self, k, None)\n if v:\n attribs.append('{}={}'.format(k, repr(v)))\n if self.pins:\n pin_strs = [p.export() for p in self.pins]\n attribs.append('pins=[{}]'.format(','.join(pin_strs)))\n return 'Part({})'.format(','.join(attribs))", "def setPlatformBootPartition(self):\n return [PartSpec(mountpoint=\"/boot\", size=Size(\"1GiB\"),\n weight=self.weight(mountpoint=\"/boot\"))]", "def setPlatformBootPartition(self):\n return [PartSpec(mountpoint=\"/boot\", size=Size(\"1GiB\"),\n weight=self.weight(mountpoint=\"/boot\"), lv=False)]", "def partid2nids(self, partid, ntype=...):\n ...", "def _setPartedPartition(self, partition):\n log_method_call(self, self.name)\n\n if partition is not None and not isinstance(partition, parted.Partition):\n raise ValueError(\"partition must be None or a parted.Partition instance\")\n\n log.debug(\"device %s new partedPartition %s\", self.name, partition)\n self._partedPartition = partition\n self.updateName()", "def dump_parts(self, io):\n\n # XXX refactor with Tempita\n title = \"Parts created by the docutils writer '%s'\" % self.strategy.name\n io.say(title + os.linesep)\n io.say(len(title) * '-')\n io.say(2 * os.linesep)\n io.say('Part keys: ' + 2 * os.linesep)\n\n parts = self.publish_parts(io)\n io.say(os.linesep.join(sorted(parts.keys())))\n io.say(2 * os.linesep)\n for part in parts:\n io.say(\"Value of part '%s':%s\" % (part, os.linesep))\n io.say(parts[part].encode('utf-8') + os.linesep)\n io.say(80*'-'+os.linesep)\n io.say(os.linesep)", "def get_partition():\n if selection is None:\n warning(\"You need to pick something first.\")\n return\n if not selection.obj_type in ['partition']:\n warning(\"You need to partition the selection first.\")\n return\n res = askItems([['property',[1]]],\n caption='Partition property')\n if res:\n prop = res['property']\n getPartition(selection,prop)\n highlightPartitions(selection)", "def _wrap_partitions(self, partitions):\n return [\n self.partition_type(object_id, length, width, ip)\n for (object_id, length, width, ip) in zip(*[iter(partitions)] * 4)\n ]", "def to_partition(self, partition: str):\n data_loader = deepcopy(self)\n data_loader.partition = partition\n return data_loader", "def bootpart(disks):\n return path_to_partition(disks, '/boot/foo')", "def getLabels(self, parts):\n #Check if this is a single part or list and make it a list\n if type(parts) != list:\n parts = [parts]\n labels = []\n for part in parts:\n labels.append(part.Label)\n return labels", "def choose_partition():\n # Ask the user wether the partitions should be taken from the original partitions, or from the home-made partitions\n file_name = selector([\"The original partition given by the instructor\", \"The homemade partition file\"], [\"ORIGINAL\", \"HOMEMADE\"])\n\n # Open the corresponding file\n if file_name == \"1\" or file_name == \"ORIGINAL\":\n file = open(\"./assets/partitions.txt\", \"r\")\n elif file_name == \"2\" or file_name == \"HOMEMADE\":\n file = open(\"./assets/homemade_partitions.txt\", \"r\")\n\n skip_lines(-1)\n\n # Print all song's names in the partitions\n lines = file.readlines()\n file.close()\n for i in range(0, len(lines), 2):\n print(lines[i][:-1])\n\n # Ask the user to choose for a song\n song_index = choose_number(len(lines) / 2)\n\n # Get the corresponding song's partition and convert notes to Note instances\n partition = lines[song_index * 2 - 1][:-1].replace(' ', '')\n raw_notes = get_notes_from_line(partition)\n parsed_notes = [Note(note) for note in raw_notes]\n return parsed_notes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Marshal information from the selected chainParts to create a vbenf label. Use a Reducer for elimination of unusable jets
def _make_vbenf_label(chain_parts): # toy label for development: run simple and dijet independently. # simple makes Et cuts on two jets. Independently (sharing possible) # of jets choosean by simple, the dijet # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6 assert len(chain_parts) == 1 scenario = chain_parts[0]['hypoScenario'] assert scenario.startswith('vbenf') args = _args_from_scenario(scenario) if not args: return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' arg_res = [ re.compile(r'(?P<lo>\d*)(?P<key>fbet)(?P<hi>\d*)'), re.compile(r'(?P<lo>\d*)(?P<key>mass)(?P<hi>\d*)'), re.compile(r'(?P<lo>\d*)(?P<key>et)(?P<hi>\d*)'), ] defaults = { 'et': ('101', 'inf'), 'mass': ('800', 'inf'), 'fbet': ('501', 'inf'), } argvals = {} while args: assert len(args) == len(arg_res) arg = args.pop() for r in arg_res: m = r.match(arg) if m is not None: arg_res.remove(r) gd = m.groupdict() key = gd['key'] try: lo = float(gd['lo']) except ValueError: lo = defaults[key][0] argvals[key+'lo'] = lo try: hi = float(gd['hi']) except ValueError: hi = defaults[key][1] argvals[key+'hi'] = hi assert len(args) == len(arg_res) assert len(args) == 0 return """ and ( [] simple ( [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)] ) combgen ( [(10et, 0eta320)] dijet ( [(%(masslo).0fdjmass, 26djdphi)] ) simple ( [(10et, 0eta320)(20et, 0eta320)] ) ) )""" % argvals
[ "def generate_label(protein_chain):\n label_process = [\n generate_seq_mask,\n generate_atom14_positions,\n generate_backbone_affine,\n generate_rigidgroups,\n generate_torsion_angles,\n generate_pseudo_beta,\n # generate_template_mask,\n # generate_hhblits_profile,\n # generate_masked_msa,\n ]\n\n protein_label = compose(label_process)(protein_chain)\n return protein_label", "def __extract_chain_features(self):\n nf_cmd = f\"nextflow {self.NF_EXECUTE} \" \\\n f\"--joblist {self.chain_cl_jobs_combined}\"\n if not self.local_executor:\n # not local executor -> provided config files\n # need abspath for nextflow execution\n nf_cmd += f\" -c {self.nf_chain_extr_config_file}\"\n # get timestamp to name the project and create a dir for that\n # time() returns something like: 1595861493.8344169\n timestamp = str(time.time()).split(\".\")[0]\n nf_project_name = f\"{self.project_name}_chain_feats_at_{timestamp}\"\n nf_project_path = os.path.join(self.nextflow_dir, nf_project_name)\n os.mkdir(nf_project_path) if not os.path.isdir(nf_project_path) else None\n rc = subprocess.call(nf_cmd, shell=True, cwd=nf_project_path)\n if rc != 0:\n self.die(f\"Error! Process {nf_cmd} died\")\n if not self.keep_nf_logs:\n # remove nextflow intermediate files\n shutil.rmtree(nf_project_path)", "def _make_simple_partition_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n label = 'simplepartition(['\n for cp in cps:\n smcstr = str(cp['smc'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr:\n condition_str += ',%s)'\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def vizualize_stretch_and_squash(self, chain, where=None):\n\n # if where is not None:\n # parent = where\n # else:\n # parent = chain[0].parent()\n #\n # viz_subnet = parent.createNode(\"subnet\")\n # viz_subnet.setName(\"subnet_viz_chain\")\n # viz_subnet.parm(\"tdisplay\").set(1)\n # viz_subnet.parm(\"display\").set(0)\n # vizer_list = []\n # for i in chain:\n # fetcher = viz_subnet.createNode(\"fetch\", \"fetch_\"+i.name())\n # fetcher.setColor(hou.Color((1.0, 0.4, 0.0)))\n # fetcher.parm(\"useinputoffetched\").set(1)\n # fetcher.parm(\"fetchsubnet\").set(1)\n # fetcher.parm(\"fetchobjpath\").set(fetcher.relativePathTo(i))\n # fetcher.setDisplayFlag(0)\n #\n # vizer = viz_subnet.createNode(\"geo\", \"visualize_\"+i.name())\n # vizer.node(\"file1\").destroy()\n # vizer.createNode(\"sphere\")\n #\n # vizer.parm(\"scale\").set(i.parm(\"crscalex\") )\n # vizer.parm(\"sx\").set(0.025)\n # vizer.parm(\"sy\").set(0.025)\n # vizer.parm(\"sz\").set(0.025)\n #\n # vizer.setInput(0, fetcher)\n # vizer_list.append(vizer)\n # viz_subnet.layoutChildren()\n #\n # return vizer_list", "def DecodeStage():\n\n io = Io({\n 'if_id': Input(if_bundle),\n 'inst': Input(Bits(32)),\n 'stall': Input(Bits(1)),\n 'reg_write': Input(reg_write_bundle),\n 'ras_ctrl': Output(ras_ctrl_bundle),\n 'id_ex': Output(id_ex_bundle),\n 'rs1_data': Output(Bits(C['core-width'])),\n 'rs2_data': Output(Bits(C['core-width'])),\n })\n\n inst = Wire(Bits(32))\n\n with io.if_id.valid:\n inst <<= io.inst\n with otherwise:\n inst <<= 0\n\n regfile = Instance(RegisterFile())\n\n itype = Wire(Bits(ITypes.bitwidth))\n\n regfile.r0_addr <<= Rs1(inst)\n regfile.r0_en <<= ~io.stall\n regfile.r1_addr <<= Rs2(inst)\n regfile.r1_en <<= ~io.stall\n\n regfile.w0_addr <<= io.reg_write.w_addr\n regfile.w0_en <<= io.reg_write.w_en & ~io.stall\n regfile.w0_data <<= io.reg_write.w_data\n\n #\n # inst_data is metadata about the current instruction that is passed through\n # the pipeline unrelated to control signals. It's primary use is for hazard\n # detection and data forwarding.\n #\n\n io.id_ex.ctrl.valid <<= io.if_id.valid\n io.id_ex.ctrl.inst <<= inst\n io.id_ex.ctrl.pc <<= io.if_id.pc\n\n #\n # Hook up the register read outputs.\n #\n\n io.rs1_data <<= regfile.r0_data\n io.rs2_data <<= regfile.r1_data\n\n #\n # Control is a Python function that produces the primary decode logic. It\n # matches against a set of known instructions to produce control signals for\n # later stages in the pipeline. The known instructions are encoded in the\n # 'instructions' variable above.\n #\n\n Control(inst, itype, io.id_ex.ctrl)\n\n #\n # TODO: Documentation\n #\n\n HandleRasCtrl(io.ras_ctrl, inst, io.if_id.pc)\n\n #\n # GenerateImmediate produces logic that consume the itype (instruction\n # type, which is R, I, S, B, U, or J) and produces the immediate value for\n # this instruction.\n #\n\n io.id_ex.imm <<= GenerateImmediate(inst, itype)\n\n NameSignals(locals())", "def _make_dijet_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario.startswith('dijet')\n\n arg_res = [\n re.compile(r'^(?P<lo>\\d*)(?P<key>djmass)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j1et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j1eta)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j2et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j2eta)(?P<hi>\\d*)$'),\n ]\n\n defaults = {\n 'j1et': ('100', 'inf'),\n 'j2et': ('100', 'inf'),\n 'j1eta': ('0', '320'),\n 'j2eta': ('0', '320'),\n 'djmass': ('1000', 'inf'),\n }\n\n\n args = _args_from_scenario(scenario)\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n combgen(\n [(2)(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n (%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n ]\n \n dijet(\n [(%(djmasslo).0fdjmass)])\n simple([(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n (%(j2etlo).0fet, %(j2etalo).0feta%(j2etahi).0f)])\n )\"\"\" % argvals", "def __init__(self):\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HCompositeState2ProcDef, self).__init__(name='HCompositeState2ProcDef', num_nodes=0, edges=[])\n \n \n # Set the graph attributes\n self[\"mm__\"] = ['HimesisMM']\n self[\"name\"] = \"\"\"CompositeState2ProcDef\"\"\"\n self[\"GUID__\"] = uuid.uuid3(uuid.NAMESPACE_DNS,'CompositeState2ProcDef')\n \n # match model. We only support one match model\n self.add_node()\n self.vs[0][\"mm__\"] = \"\"\"MatchModel\"\"\"\n \n # apply model node\n self.add_node()\n self.vs[1][\"mm__\"] = \"\"\"ApplyModel\"\"\"\n \n # paired with relation between match and apply models\n \n self.add_node()\n self.vs[2][\"mm__\"] = \"\"\"paired_with\"\"\"\n self.vs[2][\"attr1\"] = \"\"\"CompositeState2ProcDef\"\"\"\n \n # match class State() node\n self.add_node()\n \n self.vs[3][\"mm__\"] = \"\"\"State\"\"\" \n self.vs[3][\"attr1\"] = \"\"\"+\"\"\" \n \n \n # apply class ProcDef() node\n self.add_node()\n\n self.vs[4][\"mm__\"] = \"\"\"ProcDef\"\"\" \n self.vs[4][\"attr1\"] = \"\"\"1\"\"\"\n # apply class LocalDef() node\n self.add_node()\n\n self.vs[5][\"mm__\"] = \"\"\"LocalDef\"\"\" \n self.vs[5][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[6][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[6][\"attr1\"] = \"\"\"1\"\"\"\n # apply class New() node\n self.add_node()\n\n self.vs[7][\"mm__\"] = \"\"\"New\"\"\" \n self.vs[7][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[8][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[8][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[9][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[9][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[10][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[10][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Par() node\n self.add_node()\n\n self.vs[11][\"mm__\"] = \"\"\"Par\"\"\" \n self.vs[11][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Inst() node\n self.add_node()\n\n self.vs[12][\"mm__\"] = \"\"\"Inst\"\"\" \n self.vs[12][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Inst() node\n self.add_node()\n\n self.vs[13][\"mm__\"] = \"\"\"Inst\"\"\" \n self.vs[13][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[14][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[14][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[15][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[15][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[16][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[16][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[17][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[17][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[18][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[18][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[19][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[19][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[20][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[20][\"attr1\"] = \"\"\"1\"\"\"\n \n \n \n # apply association ProcDef--p-->LocalDef node\n self.add_node()\n self.vs[21][\"attr1\"] = \"\"\"p\"\"\"\n self.vs[21][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association ProcDef--channelNames-->Name node\n self.add_node()\n self.vs[22][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[22][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association LocalDef--p-->New node\n self.add_node()\n self.vs[23][\"attr1\"] = \"\"\"p\"\"\"\n self.vs[23][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association New--channelNames-->Name node\n self.add_node()\n self.vs[24][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[24][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association New--channelNames-->Name node\n self.add_node()\n self.vs[25][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[25][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association New--channelNames-->Name node\n self.add_node()\n self.vs[26][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[26][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association New--p-->Par node\n self.add_node()\n self.vs[27][\"attr1\"] = \"\"\"p\"\"\"\n self.vs[27][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association Par--p-->Inst node\n self.add_node()\n self.vs[28][\"attr1\"] = \"\"\"p\"\"\"\n self.vs[28][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association Par--p-->Inst node\n self.add_node()\n self.vs[29][\"attr1\"] = \"\"\"p\"\"\"\n self.vs[29][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association Inst--channelNames-->Name node\n self.add_node()\n self.vs[30][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[30][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association Inst--channelNames-->Name node\n self.add_node()\n self.vs[31][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[31][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association Inst--channelNames-->Name node\n self.add_node()\n self.vs[32][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[32][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association Inst--channelNames-->Name node\n self.add_node()\n self.vs[33][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[33][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association Inst--channelNames-->Name node\n self.add_node()\n self.vs[34][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[34][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association Inst--channelNames-->Name node\n self.add_node()\n self.vs[35][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[35][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association Inst--channelNames-->Name node\n self.add_node()\n self.vs[36][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[36][\"mm__\"] = \"\"\"directLink_T\"\"\"\n \n # backward association State---->ProcDef node\n self.add_node()\n\n self.vs[37][\"mm__\"] = \"\"\"backward_link\"\"\"\n \n \n \n \n # Add the edges\n self.add_edges([\n (0,3), # matchmodel -> match_class State()\n (1,4), # applymodel -> -> apply_class ProcDef()\n (1,5), # applymodel -> -> apply_class LocalDef()\n (1,6), # applymodel -> -> apply_class Name()\n (1,7), # applymodel -> -> apply_class New()\n (1,8), # applymodel -> -> apply_class Name()\n (1,9), # applymodel -> -> apply_class Name()\n (1,10), # applymodel -> -> apply_class Name()\n (1,11), # applymodel -> -> apply_class Par()\n (1,12), # applymodel -> -> apply_class Inst()\n (1,13), # applymodel -> -> apply_class Inst()\n (1,14), # applymodel -> -> apply_class Name()\n (1,15), # applymodel -> -> apply_class Name()\n (1,16), # applymodel -> -> apply_class Name()\n (1,17), # applymodel -> -> apply_class Name()\n (1,18), # applymodel -> -> apply_class Name()\n (1,19), # applymodel -> -> apply_class Name()\n (1,20), # applymodel -> -> apply_class Name()\n (4,21), # apply_class ProcDef() -> association p\n (21,5), # association p -> apply_class LocalDef()\n (4,22), # apply_class ProcDef() -> association channelNames\n (22,6), # association channelNames -> apply_class Name()\n (5,23), # apply_class LocalDef() -> association p\n (23,7), # association p -> apply_class New()\n (7,24), # apply_class New() -> association channelNames\n (24,8), # association channelNames -> apply_class Name()\n (7,25), # apply_class New() -> association channelNames\n (25,9), # association channelNames -> apply_class Name()\n (7,26), # apply_class New() -> association channelNames\n (26,10), # association channelNames -> apply_class Name()\n (7,27), # apply_class New() -> association p\n (27,11), # association p -> apply_class Par()\n (11,28), # apply_class Par() -> association p\n (28,13), # association p -> apply_class Inst()\n (11,29), # apply_class Par() -> association p\n (29,12), # association p -> apply_class Inst()\n (12,30), # apply_class Inst() -> association channelNames\n (30,14), # association channelNames -> apply_class Name()\n (12,31), # apply_class Inst() -> association channelNames\n (31,15), # association channelNames -> apply_class Name()\n (12,32), # apply_class Inst() -> association channelNames\n (32,16), # association channelNames -> apply_class Name()\n (12,33), # apply_class Inst() -> association channelNames\n (33,17), # association channelNames -> apply_class Name()\n (13,34), # apply_class Inst() -> association channelNames\n (34,18), # association channelNames -> apply_class Name()\n (13,35), # apply_class Inst() -> association channelNames\n (35,19), # association channelNames -> apply_class Name()\n (13,36), # apply_class Inst() -> association channelNames\n (36,20), # association channelNames -> apply_class Name()\n (4,37), # apply_class ProcDef() -> backward_association\n (37,3), # backward_association -> apply_class State()\n (0,2), # matchmodel -> pairedwith\n (2,1) # pairedwith -> applyModel\t\t\t\t\n\t\t])\n\n # Add the attribute equations\n self[\"equations\"] = [((3,'isComposite'),('constant','true')), ((6,'literal'),('constant','sh')), ((8,'literal'),('constant','exit_in')), ((9,'literal'),('constant','exack_in')), ((10,'literal'),('constant','sh_in')), ((12,'name'),('constant','C')), ((13,'name'),('constant','H')), ((14,'literal'),('constant','enp')), ((15,'literal'),('constant','exit_in')), ((16,'literal'),('constant','exack_in')), ((17,'literal'),('constant','sh_in')), ((18,'literal'),('constant','exit_in')), ((19,'literal'),('constant','exack_in')), ((20,'literal'),('constant','sh_in')), ]", "def vtkoutput1part(jobname, nodes, elem_lists, f, a, RF, NDIM, NDOF_NODE):\n \n # set output file name\n outputfile = jobname+'-output.vtk'\n \n # set the no. of decimals for real outputs\n nd = param.num_decimal_for_output\n \n with open(outputfile,'w') as output:\n \n # write header\n output.write('# vtk DataFile Version 3.1\\n')\n output.write('for FEM Programme output\\n')\n output.write('ASCII\\n')\n output.write('DATASET UNSTRUCTURED_GRID\\n')\n \n # write nodes\n output.write('POINTS '+str(len(nodes))+' double\\n')\n for node in nodes:\n for x in node:\n output.write(' '+str(round(x,nd)))\n if (len(node) == 2): output.write(' 0.0')\n output.write('\\n')\n \n # write element connec\n # calculate total no. of elems and total size of data and form the list\n # of element number for vtk output\n nelem = 0\n nsize = 0\n elnumlist = []\n for elist in elem_lists:\n nelem += len(elist.elems)\n if elist.eltype in param.tuple_truss2d2_eltypes+param.tuple_frame2d2_eltypes:\n # frame2d2 elem has 3 data: elem index and 2 node indices\n nsize += 3*len(elist.elems)\n # frame2d2 elem has eltype number 3 in vtk format (see below)\n for i in range(len(elist.elems)): elnumlist.append(3)\n elif elist.eltype in param.tuple_tri2d3_eltypes:\n # tri elem has 4 data: elem index and node indices\n nsize += 4*len(elist.elems)\n # tri elem has eltype number 5 in vtk format (see below)\n for i in range(len(elist.elems)): elnumlist.append(5)\n else:\n print('Unsupported element type in vtkoutput: '+elist.eltype)\n \n # write line for total count of elems and integer numbers to be written\n output.write('CELLS '+str(nelem)+' '+str(nsize)+'\\n')\n \n # write each element data: total no. of nodes and its nodal connec for\n # each line\n for elist in elem_lists:\n for elem in elist.elems:\n output.write(str(len(elem.cnc_node))) # total no. of nodes\n for inode in elem.cnc_node: # nodal connectivity of this elem\n output.write(' '+str(inode))\n output.write('\\n')\n \n # write the vtk eltype numbers for the different element types:\n output.write('CELL_TYPES'+' '+str(nelem)+'\\n')\n # ---- see the below table for reference -------------\n #1\tVTK_VERTEX\tVertex\n #2\tVTK_POLY_VERTEX\tVertex\n #3\tVTK_LINE\tEdge Lagrange P1\n #5\tVTK_TRIANGLE\tTriangle Lagrange P1\n #8\tVTK_PIXEL\tQuadrilateral Lagrange P1\n #9\tVTK_QUAD\tQuadrilateral Lagrange P1\n #10\tVTK_TETRA\tTetrahedron Lagrange P1\n #11\tVTK_VOXEL\tHexahedron Lagrange P1\n #12\tVTK_HEXAHEDRON\tHexahedron Lagrange P1\n #13\tVTK_WEDGE\tWedge Lagrange P1\n #21\tVTK_QUADRATIC_EDGE\tEdge Lagrange P2\n #22\tVTK_QUADRATIC_TRIANGLE\tTriangle Lagrange P2\n #23\tVTK_QUADRATIC_QUAD\tQuadrilateral Lagrange P2\n #24\tVTK_QUADRATIC_TETRA\tTetrahedron Lagrange P2\n #25\tVTK_QUADRATIC_HEXAHEDRON\tHexahedron Lagrange P2\n # ----------------------------------------------------\n for elnum in elnumlist:\n output.write(str(elnum)+'\\n')\n \n # write nodal displacement vectors\n nnode = len(nodes)\n output.write('POINT_DATA '+str(nnode)+'\\n')\n output.write('VECTORS displacement double\\n')\n for i in range(nnode):\n for j in range(NDIM):\n output.write(str(round(a[i*NDOF_NODE+j,0],nd))+' ')\n if (NDIM == 2): output.write('0.0')\n output.write('\\n')\n \n # write field data:\n # - applied forces/moments f\n # - reaction forces/moments RF\n # - additional nodal DoFs if present\n \n # determine the number of field data\n if NDOF_NODE > NDIM:\n nfield = 3\n else:\n nfield = 2\n output.write('FIELD FieldData '+str(nfield)+'\\n') \n # write applied forces and moments\n output.write('AppliedForcesMoments '+str(NDOF_NODE)+' '+str(nnode)+' double\\n')\n for i in range(nnode):\n for j in range(NDOF_NODE):\n output.write(str(round(f[i*NDOF_NODE+j,0],nd))+' ')\n output.write('\\n')\n # write reaction forces and moments\n output.write('ReactionForcesMoments '+str(NDOF_NODE)+' '+str(nnode)+' double\\n')\n for i in range(nnode):\n for j in range(NDOF_NODE):\n output.write(str(round(RF[i*NDOF_NODE+j,0],nd))+' ')\n output.write('\\n')\n # write additional DoFs\n if (NDOF_NODE > NDIM):\n output.write('AdditionalDoFs '+str(NDOF_NODE-NDIM)+' '+str(nnode)+' double\\n')\n for i in range(nnode):\n for j in range(NDIM, NDOF_NODE):\n output.write(str(round(a[i*NDOF_NODE+j,0],nd))+' ')\n output.write('\\n')\n \n output.close()", "def _make_combinationsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'combinationsTest'\n\n \n\n return \"\"\"\n combgen(\n [(2)(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def __init__(self):\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HState2ProcDef, self).__init__(name='HState2ProcDef', num_nodes=0, edges=[])\n \n \n # Set the graph attributes\n self[\"mm__\"] = ['HimesisMM']\n self[\"name\"] = \"\"\"State2ProcDef\"\"\"\n self[\"GUID__\"] = uuid.uuid3(uuid.NAMESPACE_DNS,'State2ProcDef')\n \n # match model. We only support one match model\n self.add_node()\n self.vs[0][\"mm__\"] = \"\"\"MatchModel\"\"\"\n \n # apply model node\n self.add_node()\n self.vs[1][\"mm__\"] = \"\"\"ApplyModel\"\"\"\n \n # paired with relation between match and apply models\n \n self.add_node()\n self.vs[2][\"mm__\"] = \"\"\"paired_with\"\"\"\n self.vs[2][\"attr1\"] = \"\"\"State2ProcDef\"\"\"\n \n # match class State() node\n self.add_node()\n \n self.vs[3][\"mm__\"] = \"\"\"State\"\"\" \n self.vs[3][\"attr1\"] = \"\"\"+\"\"\" \n \n \n # apply class ProcDef() node\n self.add_node()\n\n self.vs[4][\"mm__\"] = \"\"\"ProcDef\"\"\" \n self.vs[4][\"attr1\"] = \"\"\"1\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[5][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[5][\"attr1\"] = \"\"\"1\"\"\"\n \n \n \n # apply association ProcDef--channelNames-->Name node\n self.add_node()\n self.vs[6][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[6][\"mm__\"] = \"\"\"directLink_T\"\"\"\n \n \n \n \n \n # Add the edges\n self.add_edges([\n (0,3), # matchmodel -> match_class State()\n (1,4), # applymodel -> -> apply_class ProcDef()\n (1,5), # applymodel -> -> apply_class Name()\n (4,6), # apply_class ProcDef() -> association channelNames\n (6,5), # association channelNames -> apply_class Name()\n (0,2), # matchmodel -> pairedwith\n (2,1) # pairedwith -> applyModel\t\t\t\t\n\t\t])\n\n # Add the attribute equations\n self[\"equations\"] = [((4,'name'),('concat',(('constant','S'),(3,'name')))), ((5,'literal'),('constant','exack')), ]", "def generate_code(self, parts: list):\n for i in range(len(parts)):\n\n if not self._involves_this_party(parts[i][0]):\n # not our data, skip job\n continue\n\n if parts[i][1] == \"python\":\n cg = PythonCodeGen(\n self.config,\n parts[i][0],\n f\"{self.config.system_configs['CODEGEN'].workflow_name}-python-job-{i}\"\n )\n cg.generate()\n elif parts[i][1] == \"jiff\":\n cg = JiffCodeGen(\n self.config,\n parts[i][0],\n f\"{self.config.system_configs['CODEGEN'].workflow_name}-jiff-job-{i}\"\n )\n cg.generate()\n else:\n raise Exception(f\"Unrecognized backend from partition: {parts[i][1]}.\")", "def dumpgenpart(part,genparts=None,event=None,flags=[]):\n info = \">>> i=%2s, PID=%3s, status=%2s, mother=%2s\"%(part._index,part.pdgId,part.status,part.genPartIdxMother)\n if part.genPartIdxMother>=0:\n if genparts: \n moth = genparts[part.genPartIdxMother].pdgId\n info += \" (%s)\"%(moth)\n elif event:\n moth = event.GenPart_pdgId[part.genPartIdxMother]\n info += \" (%s)\"%(moth)\n for bit in flags:\n info += \", bit%s=%d\"%(bit,hasbit(part.statusFlags,bit))\n print info", "def build_stage2_6(self):\n paf, cfm = self.stage2_6.values()\n for i in range(2, 7):\n paf_ = OrderedDict([(k.replace('i', str(i)),paf[k]) for k in paf])\n cfm_ = OrderedDict([(k.replace('i', str(i)),cfm[k]) for k in cfm])\n stage_ = OrderedDict(PAF=paf_, CFM=cfm_)\n setattr(self, f'stage{i}', stage_)", "def slice_graph_bwd( endea, reg ): \r\n\tgraph = vcg_Graph.vcgGraph({\"title\":'\"Slice for %s\"' % reg, \\\r\n\t\t\"manhattan_edges\":\"no\", \"layoutalgorithm\":\"maxdepth\"})\r\n\t#\r\n\t# Retrieve the name of the current basic block\r\n\t# \r\n\tworklist = []\r\n\tdata_bib = {}\r\n\t\r\n\tstartnode = slice_node( 0, endea, reg )\t\t# start at the end of the slice node\r\n\trootnode = graph.Add_Node( startnode.to_name() )\r\n\tdata_bib[ startnode.to_name() ] = startnode\r\n\tworklist.insert( 0, rootnode )\r\n\twhile len( worklist ) > 0:\r\n\t\tcurrnode = worklist.pop()\r\n\t\tcurrslice = data_bib[ currnode.get_name() ]\r\n\t\t[tgt_reg, split] = currslice.get_target_reg_bwd()\r\n\t\tprint tgt_reg\r\n\t\tprint split\r\n\t\tif tgt_reg == \"END\":\r\n\t\t\t# Do not process this node any further\r\n\t\t\tpass\r\n\t\telif tgt_reg == \"\" or (( len( currslice.get_lines()) > 0) and \\\r\n\t\t\tcurrslice.startea != currslice.get_lines()[0][0]):\r\n\t\t\t# Do process this node further, nothing really going on \r\n\t\t\tprint \"ZEZ\"\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( 0,ref, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name() )\r\n\t\telse:\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( 0,ref, tgt_reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name())\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tif split:\r\n\t\t\t\tfor ref in xrefs:\r\n\t\t\t\t\tnewslice = slice_node( 0,ref, currslice.reg )\r\n\t\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name())\r\n\treturn [ graph, data_bib ]", "def _encode_feature_inputs(self, state):", "def onBuildModels(self):\n if self.refSeriesNumber != '-1':\n ref = self.refSeriesNumber\n refLongName = self.seriesMap[ref]['LongName']\n labelNodes = slicer.util.getNodes('*'+refLongName+'*-label*')\n\n numNodes = slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelHierarchyNode\" )\n outHierarchy = None\n\n for n in xrange(numNodes):\n node = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelHierarchyNode\" )\n if node.GetName() == 'mpReview-'+refLongName:\n outHierarchy = node\n break\n\n # Remove the previous models\n if outHierarchy:\n collection = vtk.vtkCollection()\n outHierarchy.GetChildrenModelNodes(collection)\n n = collection.GetNumberOfItems()\n if n != 0:\n for i in xrange(n):\n modelNode = collection.GetItemAsObject(i)\n slicer.mrmlScene.RemoveNode(modelNode)\n\n # if models hierarchy does not exist, create it.\n else:\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\n outHierarchy.SetScene( slicer.mrmlScene )\n outHierarchy.SetName( 'mpReview-'+refLongName )\n slicer.mrmlScene.AddNode( outHierarchy )\n\n progress = self.makeProgressIndicator(len(labelNodes))\n step = 0\n for label in labelNodes.values():\n labelName = label.GetName().split(':')[1]\n structureName = labelName[labelName[:-6].rfind(\"-\")+1:-6]\n # Only save labels with known structure names\n if any(structureName in s for s in self.structureNames):\n parameters = {}\n parameters[\"InputVolume\"] = label.GetID()\n parameters['FilterType'] = \"Sinc\"\n parameters['GenerateAll'] = True\n\n parameters[\"JointSmoothing\"] = False\n parameters[\"SplitNormals\"] = True\n parameters[\"PointNormals\"] = True\n parameters[\"SkipUnNamed\"] = True\n\n # create models for all labels\n parameters[\"StartLabel\"] = -1\n parameters[\"EndLabel\"] = -1\n\n parameters[\"Decimate\"] = 0\n parameters[\"Smooth\"] = 0\n\n parameters[\"ModelSceneFile\"] = outHierarchy\n\n progress.labelText = '\\nMaking Model for %s' % structureName\n progress.setValue(step)\n if progress.wasCanceled:\n break\n\n try:\n modelMaker = slicer.modules.modelmaker\n self.CLINode = slicer.cli.run(modelMaker, self.CLINode,\n parameters, wait_for_completion=True)\n except AttributeError:\n qt.QMessageBox.critical(slicer.util.mainWindow(),'Editor', 'The ModelMaker module is not available<p>Perhaps it was disabled in the application settings or did not load correctly.')\n step += 1\n progress.close()\n #\n\n if outHierarchy:\n collection = vtk.vtkCollection()\n outHierarchy.GetChildrenModelNodes(collection)\n n = collection.GetNumberOfItems()\n if n != 0:\n for i in xrange(n):\n modelNode = collection.GetItemAsObject(i)\n displayNode = modelNode.GetDisplayNode()\n displayNode.SetSliceIntersectionVisibility(1)\n displayNode.SetSliceIntersectionThickness(2)\n self.modelsVisibilityButton.checked = False\n self.updateViewRenderer()", "def __init__(self, part):\n config = Configuration.config # collect configuration\n self.nodes = [bsp_node.BSPNode(part)] # create root node and the list of nodes\n # calculate initial nparts objective\n nparts = 1 # nparts = sum([l.n_parts for l in self.leaves]) / nparts_original --> measures part reduction\n # calculate initial utilization objective --> measures how much of the parts fill their oriented bounding boxes\n V = np.prod(config.printer_extents)\n if config.obb_utilization:\n utilization = 1 - self.nodes[0].obb.volume / (self.nodes[0].n_parts * V)\n else:\n utilization = 1 - self.nodes[0].part.volume / (self.nodes[0].n_parts * V)\n\n # create objectives dictionary\n self.objectives = {\n 'nparts': nparts,\n 'utilization': utilization,\n 'connector': 0, # no connectors yet\n 'fragility': 0,\n 'seam': 0,\n 'symmetry': 0\n }", "def identity_block(X, f, filters, stage, block):\n \n # Defines name basis.\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieves Filters.\n F1, F2, F3 = filters\n \n # Saves the input value. This is needed later to add back to the main path. \n X_shortcut = X\n \n ##### MAIN PATH #####\n # First component of main path.\n X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n \n \n # Second component of main path.\n X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n \n # Third component of main path.\n X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n\n\n # Final step: Adds shortcut value to main path, and pass it through a RELU activation.\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n\n return X", "def make_mixture_info(parts, operation='+'):\n # type: (List[ModelInfo], str) -> ModelInfo\n # Build new parameter list\n combined_pars = []\n\n # When creating a mixture model that is a sum of product models (ie (1*2)+(3*4))\n # the parameters for models 1 & 2 will be prefixed with A & B respectively,\n # but so will the parameters for models 3 & 4. We need to rename models 3 & 4\n # so that they are prefixed with C & D to avoid overlap of parameter names.\n used_prefixes = []\n for part in parts:\n if part.composition and part.composition[0] == 'mixture':\n i = 0\n for submodel in part.composition[1]:\n npars = len(submodel.parameters.kernel_parameters)\n # List of params of one of the constituent models of part\n submodel_pars = part.parameters.kernel_parameters[i:i+npars]\n # Prefix of the constituent model\n prefix = submodel_pars[0].name[0]\n if prefix not in used_prefixes: # Haven't seen this prefix so far\n used_prefixes.append(prefix)\n i += npars\n continue\n # TODO: don't modify submodel --- it may be used elsewhere\n # Existing code probably doesn't keep a handle on the model\n # parts so its probably okay, but it's possible that a mix\n # on user defined mixture models models will change the\n # parameters used for the parts in the GUI. Even worse if the\n # same plugin is used twice. For example, twosphere.py\n # contains sphere+sphere and you create twosphere+twosphere.\n while prefix in used_prefixes:\n # This prefix has been already used, so change it to the\n # next letter that hasn't been used\n prefix = chr(ord(prefix) + 1)\n used_prefixes.append(prefix)\n prefix += \"_\"\n # Update the parameters of this constituent model to use the\n # new prefix\n for par in submodel_pars:\n # Strip {prefix}_ using par.name[2:], etc.\n # TODO: fails for AB_scale\n par.id = prefix + par.id[2:]\n par.name = prefix + par.name[2:]\n if par.length_control is not None:\n par.length_control = prefix + par.length_control[2:]\n i += npars\n\n for part in parts:\n # Parameter prefix per model, A_, B_, ...\n # Note that prefix must also be applied to id and length_control\n # to support vector parameters\n prefix = ''\n if not part.composition or part.composition[0] == 'product':\n # Model isn't a composition model, so its parameters don't have a\n # a prefix. Add the next available prefix\n prefix = chr(ord('A')+len(used_prefixes))\n used_prefixes.append(prefix)\n prefix += '_'\n\n if operation == '+':\n # If model is a sum model, each constituent model gets its own scale parameter\n scale_prefix = prefix\n if prefix == '' and getattr(part, \"operation\", '') == '*':\n # `part` is a composition product model. Find the prefixes of\n # its parameters to form a new prefix for the scale.\n # For example, a model with A*B*C will have ABC_scale.\n sub_prefixes = []\n for param in part.parameters.kernel_parameters:\n # Prefix of constituent model\n sub_prefix = param.id.split('_')[0]\n if sub_prefix not in sub_prefixes:\n sub_prefixes.append(sub_prefix)\n # Concatenate sub_prefixes to form prefix for the scale\n scale_prefix = ''.join(sub_prefixes) + '_'\n scale = Parameter(scale_prefix + 'scale', default=1.0,\n description=\"model intensity for \" + part.name)\n combined_pars.append(scale)\n for p in part.parameters.kernel_parameters:\n p = copy(p)\n p.name = prefix + p.name\n p.id = prefix + p.id\n if p.length_control is not None:\n p.length_control = prefix + p.length_control\n combined_pars.append(p)\n parameters = ParameterTable(combined_pars)\n # Allow for the scenario in which each component has all its PD parameters\n # active simultaneously. details.make_details() will throw an error if\n # too many are used from any one component.\n parameters.max_pd = sum(part.parameters.max_pd for part in parts)\n\n def random():\n \"\"\"Random set of model parameters for mixture model\"\"\"\n combined_pars = {}\n for k, part in enumerate(parts):\n prefix = chr(ord('A')+k) + '_'\n pars = part.random()\n combined_pars.update((prefix+k, v) for k, v in pars.items())\n return combined_pars\n\n model_info = ModelInfo()\n model_info.id = operation.join(part.id for part in parts)\n model_info.operation = operation\n model_info.name = '(' + operation.join(part.name for part in parts) + ')'\n model_info.filename = None\n model_info.title = 'Mixture model with ' + model_info.name\n model_info.description = model_info.title\n model_info.docs = model_info.title\n model_info.category = \"custom\"\n model_info.parameters = parameters\n model_info.random = random\n #model_info.single = any(part['single'] for part in parts)\n model_info.structure_factor = False\n #model_info.tests = []\n #model_info.source = []\n # Remember the component info blocks so we can build the model\n model_info.composition = ('mixture', parts)\n return model_info" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
dijet label. supports dijet cuts, and cuts on particpating jets
def _make_dijet_label(chain_parts): assert len(chain_parts) == 1 scenario = chain_parts[0]['hypoScenario'] assert scenario.startswith('dijet') arg_res = [ re.compile(r'^(?P<lo>\d*)(?P<key>djmass)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>j1et)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>j1eta)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>j2et)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>j2eta)(?P<hi>\d*)$'), ] defaults = { 'j1et': ('100', 'inf'), 'j2et': ('100', 'inf'), 'j1eta': ('0', '320'), 'j2eta': ('0', '320'), 'djmass': ('1000', 'inf'), } args = _args_from_scenario(scenario) argvals = {} while args: assert len(args) == len(arg_res) arg = args.pop() for r in arg_res: m = r.match(arg) if m is not None: arg_res.remove(r) gd = m.groupdict() key = gd['key'] try: lo = float(gd['lo']) except ValueError: lo = defaults[key][0] argvals[key+'lo'] = lo try: hi = float(gd['hi']) except ValueError: hi = defaults[key][1] argvals[key+'hi'] = hi assert len(args) == len(arg_res) assert len(args) == 0 return """ combgen( [(2)(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f) (%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f) ] dijet( [(%(djmasslo).0fdjmass)]) simple([(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f) (%(j2etlo).0fet, %(j2etalo).0feta%(j2etahi).0f)]) )""" % argvals
[ "def CellLabeling(self,channel,singnal,file, features,feature_channel):\n\n if self.cell_channel != None :\n\n name=\"segmentation_map\"\n\n if not os.path.exists(\"output/\"+str(file)+\"/\"+name):\n os.makedirs(\"output/\"+str(file)+\"/\"+name)\n\n\n \n for idx in range (len(self.channel_content[channel])):\n \n frame= self.channel_content[channel][idx]\n pl_.figure(figsize=(30,30))\n pl_.imshow(frame.T, cmap=\"gray\")\n \n if track_channel == \"nucl\" :\n \n for nucl in range(0,len(features[\"nucl\"][\"uid\"])):\n \n if len(features[\"nucl\"][\"uid\"][nucl])>idx :\n \n pl_.text(features[\"nucl\"][\"position\"][nucl][idx][0],features[\"nucl\"][\"position\"][nucl][idx][1],\n str(features[\"nucl\"][\"uid\"][nucl][0]+1),color=\"red\", fontsize=50)\n \n \n \n elif track_channel == \"cell\" :\n \n for cell in range(0,len(features[\"cell\"][\"uid\"])):\n \n if len(features[\"cell\"][\"uid\"][cell])> idx: #0 :\n \n pl_.text(features[\"cell\"][\"position\"][cell][idx][0],features[\"cell\"][\"position\"][cell][idx][1],\n str(features[\"cell\"][\"uid\"][cell][0]+1),color=\"red\", fontsize=50)\n \n\n\n # save figures\n \n path= \"output/\"+str(file)+\"/\"+name+\"/frame\"+str(idx)+\".jpg\"\n pl_.savefig(path)\n pl_.close()\n\n\n self.channel_content[channel]=None\n self.channel_content[\"CHERRY\"]=None", "def extract_info(config, cut, label):\n cfg = filter(lambda c: c['name'] == cut, config['physics']['cuts'])[0]\n text = \"\"\n if 'max' not in cfg:\n text += \"#geq \"\n text += str(cfg['min'])\n if 'max' in cfg and cfg['max'] != cfg['min']:\n text += '-' + str(cfg['max']) + ' ' + label + 's'\n elif cfg['min'] != 1:\n text += ' ' + label + 's'\n else:\n text += ' ' + label\n return text", "def get_crop_label(self):\r\n\t\tdata = DBF('configuration/cdl.dbf')\r\n\t\tdataset = []\r\n\t\tfor i in data:\r\n\t\t\tif i['CLASS_NAME'] != '':\r\n\t\t\t\tdataset.append(i)\r\n\t\tpattern = re.compile(r'\\bWater\\b|Undefined|Developed|Clouds|Background|Aquaculture')\r\n\t\tnon_empty = set()\r\n\t\tnon_veg = set()\r\n\t\tforest = set()\r\n\t\tcorns = set()\r\n\t\tsoybeans = set()\r\n\t\tfor i, obj in enumerate(dataset):\r\n\t\t\tif obj['CLASS_NAME'] != '':\r\n\t\t\t\tnon_empty.add(obj['VALUE'])\r\n\r\n\t\t\tif re.findall(pattern, obj['CLASS_NAME']) != []:\r\n\t\t\t\tnon_veg.add(obj['VALUE'])\r\n\r\n\t\t\tif re.findall(r'forest|Forest', obj['CLASS_NAME']) != []:\r\n\t\t\t\tforest.add(obj['VALUE'])\r\n\r\n\t\t\tif re.findall(r'corn|Corn', obj['CLASS_NAME']) != []:\r\n\t\t\t\tcorns.add(obj['VALUE'])\r\n\r\n\t\t\tif re.findall(r'soybean|Soybean', obj['CLASS_NAME']) != []:\r\n\t\t\t\tsoybeans.add(obj['VALUE'])\r\n\r\n\t\tgrass = non_empty - non_veg - corns - forest - soybeans\r\n\t\tcrop_label = ['' for i in range(256)]\r\n\t\t\r\n\t\tfor i in grass:\r\n\t\t\tcrop_label[i] = 'grass'\r\n\t\tfor i in corns:\r\n\t\t\tcrop_label[i] = 'corn'\r\n\t\tfor i in soybeans:\r\n\t\t\tcrop_label[i] = 'soybeans'\r\n\t\tfor i in forest:\r\n\t\t\tcrop_label[i] = 'forest'\r\n\r\n\t\tself.crop_label = crop_label", "def test_n_jets(self):\n tagger = Tagger(\"dummy\", output_nodes=[\"ujets\", \"cjets\", \"bjets\"])\n labels = np.concatenate([np.zeros(80), np.ones(5) * 4, np.ones(15) * 5])\n tagger.labels = np.array(labels, dtype=[(\"HadronConeExclTruthLabelID\", \"i4\")])\n with self.subTest():\n self.assertEqual(tagger.n_jets(\"ujets\"), 80)\n with self.subTest():\n self.assertEqual(tagger.n_jets(\"cjets\"), 5)\n with self.subTest():\n self.assertEqual(tagger.n_jets(\"bjets\"), 15)", "def _ncutb_seg(self, data_list):\n img = data_list[0]\n param = data_list[1]\n param_cut = data_list[2]\n if param_cut is None:\n threahold = 0.2\n else:\n threahold = param_cut[0]\n # Check if the param is the super pixel label or the num of super pixel\n # to be segmented\n try:\n num = int(param[0])\n # super pixel seg\n label1 = segmentation.slic(img, compactness=10, n_segments=num,\n max_iter=100, slic_zero=True)\n except:\n label1 = param\n # N-Cut\n # Edge detection\n edge = filters.sobel(color.rgb2gray(img))\n # Smooth the edge map\n edge = filters.gaussian(edge, 1)\n edge = filters.gaussian(edge, 1)\n # Reverse the energy map\n ne = edge.max() - edge\n rag = graph.rag_boundary(label1, ne)\n label2 = graph.cut_normalized(label1, rag, thresh=threahold)\n return label2", "def _make_vbenf_label(chain_parts):\n\n # toy label for development: run simple and dijet independently.\n # simple makes Et cuts on two jets. Independently (sharing possible)\n # of jets choosean by simple, the dijet\n # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n assert scenario.startswith('vbenf')\n args = _args_from_scenario(scenario)\n if not args:\n return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' \n arg_res = [\n re.compile(r'(?P<lo>\\d*)(?P<key>fbet)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>mass)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)'),\n ]\n\n defaults = {\n 'et': ('101', 'inf'),\n 'mass': ('800', 'inf'),\n 'fbet': ('501', 'inf'),\n }\n\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n and\n (\n []\n simple\n (\n [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)]\n )\n combgen\n (\n [(10et, 0eta320)]\n dijet\n (\n [(%(masslo).0fdjmass, 26djdphi)]\n ) \n simple\n (\n [(10et, 0eta320)(20et, 0eta320)]\n )\n )\n )\"\"\" % argvals", "def create_labelled_dataset(self):\n\n print(\"-------------------------------------------------------------------\")\n print(\" How to Use the Pole Hull Label Tool\")\n print(\"-------------------------------------------------------------------\")\n print(\"- If a hull is NOT associated to a pole: press the 1 button\")\n print(\"- If a hull IS associated to a pole: press the 2 button\")\n print(\"\\n- If any other key is pressed, the program EXITS\")\n print(\"-------------------------------------------------------------------\")\n\n detector = gate_detector.GateDetector(im_resize=3.0/4)\n\n imgs = []\n labels = []\n directory = os.path.dirname(os.getcwd())\n \n # Get absolute path of all images in the images folder\n for dirpath,_,filenames in os.walk(os.path.join(directory, 'images', 'gate')):\n for f in filenames:\n imgs.append(os.path.abspath(os.path.join(dirpath, f)))\n\n # Get the hulls from the segmented image and run the display and label program for each image\n for img in imgs:\n src = cv.imread(img, 1)\n pre = detector.preprocess(src)\n seg = detector.segment(pre)\n mor = detector.morphological(seg)\n hulls = detector.create_convex_hulls(seg)\n labels += self.display_and_label_hulls(hulls, pre)\n return labels", "def plot_various():\n load_variables(SYS_VARS.KDDCup_path_names)\n saved_preprocess = \"KDD_train_num_10.npy\"\n data = categorical_labels_conversion (SYS_VARS.KDDCup_path_train_10 , attacks_map, _ATTACK_INDEX_KDD, saved_preprocess, SYS_VARS.KDDCup_path_result, False)\n plot_attacks( attacks_data = data, a_index = _ATTACK_INDEX_KDD)\n #plot_attacks( dataset = SYS_VARS.KDDCup_path_train_10, a_index = _ATTACK_INDEX_KDD)", "def setContourLabels(mode='none', ndigits=1):\n odict = {'none':'NONE', 'float':'FLOAT', 'string':'CONLAB'}\n dislin.labdig(ndigits, 'CONTUR')\n dislin.labels(odict[mode], 'CONTUR')", "def label(self, cfg):\n rep = \"\"\n nl = \"\"\n for node in cfg.nodes:\n rep += nl + \"{}\\tgen={}\\tkill={}\\tout={}\".format(\n node, \n set(self.gen.get(node)),\n set(self.kill.get(node)),\n set(self.out.get(node)))\n nl = \"\\n\"\n return rep", "def test_get_dim_label_with_label(self):\n\n dim = self.oecd_datasets['oecd']['dimension']['id'][0]\n dims_df = pyjstat.get_dim_label(self.oecd_datasets['oecd'], dim)\n self.assertTrue(dims_df.iloc[0]['id'] == 'UNR')\n self.assertTrue(dims_df.iloc[-1]['label'] == 'Unemployment rate')", "def LabelDisks(self):\n pass", "def make_label(descriptor: dict) -> str:\n pass", "def efficient_tagged_jets_hist(datalist,discriminant, discriminant_cut, CSV_cut, bins, Difference=False, mode=\"pT_hadron\",Save=False):\n\ttitle = \"eff_tagged_jets_vs_\"+mode\n\tAllJetsHistlist = []\n\tCSVHistlist = []\n\tDiscriminantHistlist = []\n\tif mode == \"pT_hadron\":\n\t\tfeature = 2\n\telif mode == \"pT_jet\":\n\t\tfeature = 3\n\telif mode == \"decay_vx\":\n\t\tfeature = 4\n\tfor n,data in enumerate(datalist): \n print \"working on\",data[1]\n\t\tran = data[2]\n\t\tAllJetsHistlist.append(rt.TH1D(data[1]+\"_AllJets\",data[1]+\"_\"+title,bins,ran[0],ran[1]))\n\t\tAllJetsHistlist[n].SetLineColor(4)\n\t\tCSVHistlist.append(rt.TH1D(data[1]+\"_CSV\",data[1]+\"_\"+title,bins,ran[0],ran[1]))\n\t\tCSVHistlist[n].SetLineColor(3)\n\t\tDiscriminantHistlist.append(rt.TH1D(data[1]+\"_Discriminant\",data[1]+\"_\"+title,bins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n for particle in data[0]:\n\t\t\tAllJetsHistlist[n].Fill(particle[feature])\n\t\t\tif particle[1] >= CSV_cut: CSVHistlist[n].Fill(particle[feature])\n if Difference:\n L = particle[8]-particle[5]\n else:\n try:\n\t\t\t\t\tL = particle[16]/float(particle[13])\n\t\t\t\texcept ZeroDivisionError:\n\t\t\t\t\tcontinue\n\t\t\tif L >= discriminant_cut: DiscriminantHistlist[n].Fill(particle[feature])\n\tcanvaslist = []\n\tlegendlist = []\n\tTfilelist = []\n\tfor n,data in enumerate(datalist):\n\t\tcanvaslist.append(rt.TCanvas(data[1]+\"_canvas\",\"canvas\",600,600))\n\t\tcanvaslist[n].SetTitle(data[1]+\"_\"+title)\n \trt.gStyle.SetOptStat(0)\n\t\tlegendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n\t\tlegendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n\t\tlegendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n\t\tlegendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n \tAllJetsHistlist[n].GetXaxis().SetTitle(mode)\n \tAllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n \tAllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n\t\tAllJetsHistlist[n].Draw()\n\t\tCSVHistlist[n].Draw(\"SAME\")\n\t\tDiscriminantHistlist[n].Draw(\"SAME\")\n\t\tlegendlist[n].Draw()\n\t\tif Save:\n\t\t\tcanvaslist[n].SaveAs(title+\"_\"+data[1]+discriminant+\".png\")\n\t\t\tTfilelist.append(rt.TFile(\"histogram_files/pT_hists/\"+title+\"_\"+data[1]+discriminant+\".root\",\"recreate\"))\n \tAllJetsHistlist[n].Write()\n\t\t\tCSVHistlist[n].Write()\n\t\t\tDiscriminantHistlist[n].Write()", "def addLabels(per, mask, background, colored, dilatedEroded, skeletoned, lines):\n cv2.putText(per, 'Perspective', (10,30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1, cv2.LINE_AA)\n cv2.putText(mask, 'BackgroundMotionSubtraction', (10,30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1, cv2.LINE_AA)\n cv2.putText(background, 'Background', (10,30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1, cv2.LINE_AA)\n cv2.putText(colored, 'Yellow', (10,30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1, cv2.LINE_AA)\n cv2.putText(dilatedEroded, 'dilated+eroded', (10,30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1, cv2.LINE_AA)\n cv2.putText(skeletoned, 'skeletoned', (10,30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1, cv2.LINE_AA)\n # cv2.putText(lines, 'Skeleton+HoughLines', (10,30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1, cv2.LINE_AA)\n return per, mask, background, colored, dilatedEroded, skeletoned, lines", "def split_labels(y,tX):\n jet_num_col_id = 22\n no_jet_indices = np.where(tX[:,jet_num_col_id] == 0)[0]\n one_jet_indices = np.where(tX[:,jet_num_col_id] == 1)[0]\n multi_jet_indices = np.where(tX[:,jet_num_col_id] > 1)[0]\n ###\n no_jet_y = y[no_jet_indices]\n one_jet_y = y[one_jet_indices]\n multi_jet_y = y[multi_jet_indices]\n labels_set = [no_jet_y,one_jet_y,multi_jet_y]\n return labels_set", "def add_hdv(self, ROI_id, type_hdv='cum', checkbox_mode=False):\n\n appartenance_contourage = self.dicom_navigation.slice.get_appartenance_contourage(ROI_id)\n \n contourage = Contourage_from_matrice(appartenance_contourage, ROI_id) # On crée un objet 'Contourage_from_matrice' à partir du de la matrice booléenne\n\n dose_matrix = self.dicom_navigation.slice.get_dose_matrix()\n\n # Cas ou on ajoute pour la premiere fois un contourage\n if dose_matrix is None:\n return\n \n doses = Doses_from_matrice(dose_matrix) # On crée un objet 'Doses_from_matrice' à partir de la matrice de doses mise à jour\n\n var = tk.StringVar() # À VENIR... VARIABLE D'ÉTAT QUI INDIQUE SI ON EST EN MODE 'VOLUME RELATF' OU 'VOLUME ABSOLU'. CODÉ EN DUR POUR LE MOMENT\n var.set('r')\n\n self.ddc = Doses_dans_contourage(doses, contourage) # Triage des doses qui sont dans le contourage.\n\n if self.ddc.dose_max == 0: # Si la dose max est 0, on sait qu'on est à l'extérieur de la zone réduite. *** \n return\n\n if not ROI_id in self.dict_graph: \n self.dict_graph[ROI_id] = {} \n self.dict_plot[ROI_id] = {} \n self.dict_doses_max[ROI_id] = {} \n if self.dicom_navigation.var_etat_abs_rel.get() == 'a':\n self.dict_volumes_max[ROI_id] = {} \n\n self.dict_doses_max[ROI_id][type_hdv] = self.ddc.dose_max\n\n ###\n\n if self.dicom_navigation.var_etat_abs_rel.get() == 'r': # si on est en mode 'volume relatif', le range des axes sera définit différemment\n facteur = 100.0/self.ddc.nb_voxels # comme l'instance 'axe_volume' créée par les classes hdv_cumulatif et hdv_differentiel contient des données en NOMBRE DE VOXELS\n # (et non en pourcentage ou en volume réel), il faut multiplier ces données par le facteur de conversion approprié (il dépend\n # de si l'on est en mode 'relatf' ou 'absolu').\n\n if self.dicom_navigation.var_etat_abs_rel.get() == 'a': # si on est en mode 'volume absolu'.\n facteur = self.ddc.v_voxel\n self.dict_volumes_max[ROI_id][type_hdv] = self.ddc.v_voxel * self.ddc.nb_voxels \n self.y_lim = get_max_2D_dic(self.dict_volumes_max)\n\n ###\n\n if type_hdv == 'cum':\n hdv = HDV_cumulatif(self.ddc, 100)\n\n if type_hdv == 'diff':\n hdv = HDV_differentiel(self.ddc, 50)\n\n\n self.dict_graph[ROI_id][type_hdv] = hdv\n self.dict_plot[ROI_id][type_hdv], = self.fig.plot(hdv.axe_doses, facteur * hdv.axe_volume)\n\n ###\n\n self.x_lim = get_max_2D_dic(self.dict_doses_max) \n\n self.fig.set_xlim([0, 1.02*self.x_lim]) # dimension de l'axe des x\n self.fig.set_ylim([0, 1.02*self.y_lim]) # dimension de l'axe des y\n\n # Contraintes\n if self.got_contraintes and type_hdv == 'cum': # 'got_contraintes' SERA INITALISÉE À 'TRUE' LORSQUE L'ON AURA RÉCUPÉRÉ LE FICHIER DE CONTRAINTES\n self.dicom_navigation.get_dicom_contraintes().verifier_contraintes_sur_une_ROI(ROI_id)\n\n # Modifier\n if checkbox_mode:\n self.refresh_HDV()", "def segGraphCuts(self):\n path, name = os.path.split(self.fName)\n pathGC = os.path.join(path, \"graphcuts\",\"GC_\" + name)\n print \"Cargando segmentacion por Graph Cuts\"\n print \" archivo: \", pathGC\n\n segGC = sitk.ReadImage(pathGC)\n segGC = segGC / 255\n\n print \"Segmentación cargada.\"\n\n self.MaskArray.append([segGC, 'GraphCuts'])", "def segmentation_figure(label, cat, segfile):\n import matplotlib.pyplot as plt\n import numpy as np\n \n import astropy.io.fits as pyfits\n import astropy.wcs as pywcs\n from grizli import utils\n \n plt.ioff()\n \n seg = pyfits.open(segfile)\n seg_data = seg[0].data\n seg_wcs = pywcs.WCS(seg[0].header)\n \n # Randomize seg to get dispersion between neighboring objects \n np.random.seed(hash(label.split('_')[0]) % (10 ** 8))\n rnd_ids = np.append([0], np.argsort(np.random.rand(len(cat)))+1)\n \n # Make cutout\n th = pyfits.open('{0}.thumb.fits'.format(label), mode='update')\n th_wcs = pywcs.WCS(th[0].header)\n blot_seg = utils.blot_nearest_exact(seg_data, seg_wcs, th_wcs, \n stepsize=-1, scale_by_pixel_area=False)\n \n rnd_seg = rnd_ids[np.cast[int](blot_seg)]*1.\n th_ids = np.unique(blot_seg)\n \n sh = th[0].data.shape\n yp, xp = np.indices(sh)\n \n thumb_height = 2.\n fig = plt.figure(figsize=[thumb_height*sh[1]/sh[0], thumb_height])\n ax = fig.add_subplot(111)\n rnd_seg[rnd_seg == 0] = np.nan\n \n ax.imshow(rnd_seg, aspect='equal', cmap='terrain_r', \n vmin=-0.05*len(cat), vmax=1.05*len(cat))\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n \n ix = utils.column_values_in_list(cat['number'], th_ids) \n xc, yc = th_wcs.all_world2pix(cat['ra'][ix], cat['dec'][ix], 0)\n xc = np.clip(xc, 0.09*sh[1], 0.91*sh[1])\n yc = np.clip(yc, 0.08*sh[0], 0.92*sh[0])\n \n for th_id, x_i, y_i in zip(cat['number'][ix], xc, yc):\n if th_id == 0:\n continue\n \n ax.text(x_i, y_i, '{0:.0f}'.format(th_id), ha='center', va='center', fontsize=8, color='w')\n ax.text(x_i, y_i, '{0:.0f}'.format(th_id), ha='center', va='center', fontsize=8, color='k', alpha=0.95)\n \n ax.set_xlim(0, sh[1]-1)\n ax.set_ylim(0, sh[0]-1)\n ax.set_axis_off() \n \n fig.tight_layout(pad=0.01)\n fig.savefig('{0}.seg.png'.format(label))\n plt.close(fig)\n \n # Append to thumbs file\n seg_hdu = pyfits.ImageHDU(data=np.cast[int](blot_seg), name='SEG')\n if 'SEG' in th:\n th.pop('SEG')\n \n th.append(seg_hdu) \n th.writeto('{0}.thumb.fits'.format(label), overwrite=True, \n output_verify='fix')\n th.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ht label. ht cuts, and cuts on particpating jets
def _make_ht_label(chain_parts): assert len(chain_parts) == 1, '_make_ht_label, no. of chain parts != 1' scenario = chain_parts[0]['hypoScenario'] assert scenario.startswith('HT'), '_make_ht_label(): scenario does not start with HT' arg_res = [ re.compile(r'^(?P<lo>\d*)(?P<key>ht)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>et)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>eta)(?P<hi>\d*)$'), ] defaults = { 'ht': ('0', 'inf'), 'et': ('0', 'inf'), 'eta': ('0', 'inf'), } args = _args_from_scenario(scenario) argvals = {} nargs = len(args) assert len(args) <= len(arg_res), 'bad num of args %d, expected < %d' % (len(args), len(arg_res)) # obtain argument values frrom scenario while args: arg = args.pop() for r in arg_res: m = r.match(arg) if m is not None: arg_res.remove(r) gd = m.groupdict() key = gd['key'] try: lo = float(gd['lo']) except ValueError: lo = float(defaults[key][0]) argvals[key+'lo'] = lo try: hi = float(gd['hi']) except ValueError: hi = float(defaults[key][1]) argvals[key+'hi'] = hi print (argvals) assert len(argvals) == 2*nargs, 'no of args: %d, expected %d' % (len(argvals), 2*nargs) print ('sent 100') result = """ ht([(%(htlo).0fht) (%(etlo).0fet) (%(etalo).0feta%(etahi).0f) ])""" % argvals print (result) return result
[ "def beginJob(self):\n \n # CUTFLOW HISTOGRAM\n self.cutflow = TH1D('cutflow','cutflow',25,0,25)\n self.cut_none = 0\n self.cut_trig = 1\n self.cut_muon = 2\n self.cut_tau = 3\n self.cut_pair = 4\n self.cutflow.GetXaxis().SetBinLabel(1+self.cut_none, \"no cut\" )\n self.cutflow.GetXaxis().SetBinLabel(1+self.cut_trig, \"trigger\" )\n self.cutflow.GetXaxis().SetBinLabel(1+self.cut_muon, \"muon\" )\n self.cutflow.GetXaxis().SetBinLabel(1+self.cut_tau, \"tau\" )\n self.cutflow.GetXaxis().SetBinLabel(1+self.cut_pair, \"pair\" )\n \n # TREE\n self.tree = TTree('tree','tree')\n self.pt_1 = np.zeros(1,dtype='f')\n self.eta_1 = np.zeros(1,dtype='f')\n self.q_1 = np.zeros(1,dtype='i')\n self.id_1 = np.zeros(1,dtype='?')\n self.iso_1 = np.zeros(1,dtype='f')\n self.pt_2 = np.zeros(1,dtype='f')\n self.eta_2 = np.zeros(1,dtype='f')\n self.q_2 = np.zeros(1,dtype='i')\n self.id_2 = np.zeros(1,dtype='i')\n self.iso_2 = np.zeros(1,dtype='f')\n self.m_vis = np.zeros(1,dtype='f')\n self.tree.Branch('pt_1', self.pt_1, 'pt_1/F' )\n self.tree.Branch('eta_1', self.eta_1, 'eta_1/F')\n self.tree.Branch('q_1', self.q_1, 'q_1/I' )\n self.tree.Branch('id_1', self.id_1, 'id_1/O' )\n self.tree.Branch('iso_1', self.iso_1, 'iso_1/F')\n self.tree.Branch('pt_2', self.pt_2, 'pt_2/F' )\n self.tree.Branch('eta_2', self.eta_2, 'eta_2/F')\n self.tree.Branch('q_2', self.q_2, 'q_2/I' )\n self.tree.Branch('id_2', self.id_2, 'id_2/I' )\n self.tree.Branch('iso_2', self.iso_2, 'iso_2/F')\n self.tree.Branch('m_vis', self.m_vis, 'm_vis/F')", "def cut_histogram(net,typec):", "def CellLabeling(self,channel,singnal,file, features,feature_channel):\n\n if self.cell_channel != None :\n\n name=\"segmentation_map\"\n\n if not os.path.exists(\"output/\"+str(file)+\"/\"+name):\n os.makedirs(\"output/\"+str(file)+\"/\"+name)\n\n\n \n for idx in range (len(self.channel_content[channel])):\n \n frame= self.channel_content[channel][idx]\n pl_.figure(figsize=(30,30))\n pl_.imshow(frame.T, cmap=\"gray\")\n \n if track_channel == \"nucl\" :\n \n for nucl in range(0,len(features[\"nucl\"][\"uid\"])):\n \n if len(features[\"nucl\"][\"uid\"][nucl])>idx :\n \n pl_.text(features[\"nucl\"][\"position\"][nucl][idx][0],features[\"nucl\"][\"position\"][nucl][idx][1],\n str(features[\"nucl\"][\"uid\"][nucl][0]+1),color=\"red\", fontsize=50)\n \n \n \n elif track_channel == \"cell\" :\n \n for cell in range(0,len(features[\"cell\"][\"uid\"])):\n \n if len(features[\"cell\"][\"uid\"][cell])> idx: #0 :\n \n pl_.text(features[\"cell\"][\"position\"][cell][idx][0],features[\"cell\"][\"position\"][cell][idx][1],\n str(features[\"cell\"][\"uid\"][cell][0]+1),color=\"red\", fontsize=50)\n \n\n\n # save figures\n \n path= \"output/\"+str(file)+\"/\"+name+\"/frame\"+str(idx)+\".jpg\"\n pl_.savefig(path)\n pl_.close()\n\n\n self.channel_content[channel]=None\n self.channel_content[\"CHERRY\"]=None", "def _ncutb_seg(self, data_list):\n img = data_list[0]\n param = data_list[1]\n param_cut = data_list[2]\n if param_cut is None:\n threahold = 0.2\n else:\n threahold = param_cut[0]\n # Check if the param is the super pixel label or the num of super pixel\n # to be segmented\n try:\n num = int(param[0])\n # super pixel seg\n label1 = segmentation.slic(img, compactness=10, n_segments=num,\n max_iter=100, slic_zero=True)\n except:\n label1 = param\n # N-Cut\n # Edge detection\n edge = filters.sobel(color.rgb2gray(img))\n # Smooth the edge map\n edge = filters.gaussian(edge, 1)\n edge = filters.gaussian(edge, 1)\n # Reverse the energy map\n ne = edge.max() - edge\n rag = graph.rag_boundary(label1, ne)\n label2 = graph.cut_normalized(label1, rag, thresh=threahold)\n return label2", "def kinematic_cut(h277, index):\n\t# halo stars have a decomp index of 2\n\treturn h277[1].data[\"decomp\"][index] in [1, 3, 4, 5]", "def odemis_to_hyperspy(filename='sampledata/cltest.h5',specbin=1) :\r\n\r\n f=h5.File(filename,'r')\r\n shome = 'Acquisition2//ImageData/'\r\n x = f[shome + 'Image']\r\n cdesc =f['Acquisition2/PhysicalData/ChannelDescription'].value[0].decode('utf-8')\r\n #print(cdesc)\r\n\r\n cltype = None\r\n if 'Spectrum' in cdesc :\r\n cltype = 'spectrum'\r\n elif 'CL intensity' in cdesc:\r\n cltype = 'panchrom'\r\n\r\n print('<' + filename + '> original shape :' ,x.shape, cltype)\r\n\r\n # strip unused dimensions and transpose/ reverse index order\r\n if cltype == 'panchrom' :\r\n xx=x[0,0,0,:,:].transpose((1,0))\r\n # just an image..\r\n else :\r\n xx=x[:,0,0,:,:].transpose((2,1,0))\r\n\r\n if cltype == 'spectrum' :\r\n #interpolate data to linearize the wavelength scale\r\n w = f[shome + 'DimensionScaleC'].value *1e9\r\n wx = np.linspace(w.min(),w.max(),w.size)\r\n for i in np.arange(xx.shape[0]) :\r\n for k in np.arange(xx.shape[1]) :\r\n xx[i,k,:] = np.interp(wx,w,xx[i,k,:])\r\n\r\n wslope = wx[1]-wx[0]\r\n woffset = wx.min()\r\n #wx = np.arange(w.size)\r\n #wslope,woffset=np.polyfit(wx,w,1)\r\n s = hs.signals.Signal1D(xx)\r\n\r\n elif cltype == 'panchrom' :\r\n s = hs.signals.Signal2D(xx)\r\n else :\r\n print('unknown type')\r\n\r\n print('hyperspy shape :' ,s.data.shape)\r\n\r\n\r\n s.metadata.General.title = 'Odemis: ' + cdesc\r\n s.metadata.General.original_filename = filename\r\n s.metadata.General.notes = cltype\r\n s.axes_manager[0].name = 'pos x'\r\n s.axes_manager[0].scale = f[shome + 'DimensionScaleX'].value * 1e6\r\n s.axes_manager[0].offset = f[shome + 'XOffset'].value * 1e6\r\n s.axes_manager[0].units = 'um'\r\n\r\n\r\n s.axes_manager[1].name = 'pos y'\r\n s.axes_manager[1].scale = f[shome + 'DimensionScaleX'].value * 1e6\r\n s.axes_manager[1].offset = f[shome + 'YOffset'].value * 1e6\r\n s.axes_manager[1].units = 'um'\r\n\r\n if cltype == 'spectrum' :\r\n s.axes_manager[2].name = 'wavelength'\r\n s.axes_manager[2].units = 'nm'\r\n s.axes_manager[2].offset = woffset\r\n s.axes_manager[2].scale = wslope\r\n s.metadata.signal_type = 'CL'\r\n\r\n f.close()\r\n if (specbin > 1) and (cltype == 'spectrum'):\r\n return( s.rebin(scale=[1,1,specbin]) )\r\n else :\r\n return( s )\r\n #end odemis_to_hyperspy\r\n #######################\r", "def cut(S, T, graph):\n ###TODO\n pass", "def applied_watershed_model(self) -> sitk.Image :\n new_coordonates = []\n pet_spacing = self.pet_img.GetSpacing()\n volume_voxel = pet_spacing[0] * pet_spacing[1] * pet_spacing[2] * 10**(-3) #in ml \n labels = np.arange(1, self.number_of_cc+1, 1)\n for label in labels: \n pixel_roi = int(len(np.where(self.labelled_array == label)[0]))\n volume_roi = pixel_roi * volume_voxel \n if volume_roi < int(30) : \n new_coordonates.append(np.where(self.labelled_array == label))\n else : \n suv_values= self.get_suv_values_matrix(label)\n localMax = self.get_local_peak(suv_values) \n marker_array, num_features = self.define_marker_array(localMax)\n if num_features != 0 : \n new_label_mask = watershed(image=-suv_values, markers=marker_array, mask=suv_values)\n new_label_mask = new_label_mask.astype(np.uint8)\n for new_label in range(1, num_features + 1):\n if len(np.where(new_label_mask == new_label)[0]) != 0 : \n new_coordonates.append(np.where(new_label_mask == new_label))\n \n number_total_of_label = len(new_coordonates)\n watershed_array = np.zeros(self.binary_array.shape)\n for coordonate, label in zip(new_coordonates, np.arange(1, number_total_of_label+1, 1)):\n watershed_array[coordonate] = label \n pet_spacing = self.pet_img.GetSpacing()\n pet_direction = self.pet_img.GetDirection()\n pet_origin = self.pet_img.GetOrigin()\n\n watershed_img = sitk.GetImageFromArray(watershed_array)\n watershed_img.SetSpacing(pet_spacing)\n watershed_img.SetOrigin(pet_origin)\n watershed_img.SetDirection(pet_direction)\n \n return watershed_img", "def extract_info(config, cut, label):\n cfg = filter(lambda c: c['name'] == cut, config['physics']['cuts'])[0]\n text = \"\"\n if 'max' not in cfg:\n text += \"#geq \"\n text += str(cfg['min'])\n if 'max' in cfg and cfg['max'] != cfg['min']:\n text += '-' + str(cfg['max']) + ' ' + label + 's'\n elif cfg['min'] != 1:\n text += ' ' + label + 's'\n else:\n text += ' ' + label\n return text", "def cut(self):\n\t\treturn None", "def headpass1( igen, node ):", "def _ncut_seg(self, data_list):\n img = data_list[0]\n param = data_list[1]\n param_cut = data_list[2]\n if param_cut is None:\n threahold = 0.001\n else:\n threahold = param_cut[0]\n # Check if the param is the super pixel label or the num of super pixel\n # to be segmented\n try:\n num = int(param[0])\n # super pixel seg\n label1 = segmentation.slic(img, compactness=10, n_segments=num,\n slic_zero=True)\n except:\n label1 = param\n # N-Cut\n g = graph.rag_mean_color(img, label1, mode='similarity')\n try:\n label2 = graph.cut_normalized(label1, g, thresh=threahold)\n except:\n log.error('\\033[01;31mERROR\\033[0m: Unknow Error in cut_normalized \\\nfunction.')\n label2 = np.zeros(label1.shape).astype('int')\n return label2", "def hxlcut():\n run_script(hxlcut_main)", "def ThinPartThickness():", "def split_labels(y,tX):\n jet_num_col_id = 22\n no_jet_indices = np.where(tX[:,jet_num_col_id] == 0)[0]\n one_jet_indices = np.where(tX[:,jet_num_col_id] == 1)[0]\n multi_jet_indices = np.where(tX[:,jet_num_col_id] > 1)[0]\n ###\n no_jet_y = y[no_jet_indices]\n one_jet_y = y[one_jet_indices]\n multi_jet_y = y[multi_jet_indices]\n labels_set = [no_jet_y,one_jet_y,multi_jet_y]\n return labels_set", "def write_label_ps(header_lines, base_lines, tail_lines, shape_list, title, outFn, cutofflist=[0.3,0.5,0.7], mode='fill'):\n OUT = open(outFn, \"w\")\n for header_line in header_lines:\n if r'{title}' in header_line:\n header_line = header_line.format(title=title)\n OUT.writelines(header_line)\n #print(len(shape_list), len())\n for shape,base_line in zip(shape_list,base_lines):\n if mode=='label':\n OUT.writelines( _color_command_segmented(shape, cutofflist)+\"\\n\" )\n elif mode=='heatmap':\n OUT.writelines( _color_command_heatmap(shape, Gradient_Colors, 0, 1)+\"\\n\" )\n else:\n raise RuntimeError(\"Sorry: mode='fill' Not applicant now\")\n OUT.writelines(base_line)\n for tail_line in tail_lines:\n OUT.writelines(tail_line)\n OUT.close()", "def __get_ohe_label__(self, label_idx) -> List[int]:\n\n label = [0] * self.n_classes\n label[label_idx] = 1\n\n return label", "def hider(self, dimension, last, i, j):\r\n \r\n # there is some api confusion about the last row or column.\r\n # The information on last is not always reliable, so we try\r\n # to catch this and use another guess\r\n # The hide with the wrong level doesn't always raise an exception\r\n # so now we just try both. This should work in both older and\r\n # newer Statistics versions.\r\n # changed to only try the second setting if an exception is raised.\r\n \r\n if dimension == \"columns\":\r\n hideloc = max(i, last)\r\n try:\r\n self.labels.HideLabelsWithDataAt(hideloc, j)\r\n except:\r\n self.labels.HideLabelsWithDataAt(hideloc-2, j)\r\n #except:\r\n #pass\r\n else:\r\n hideloc = max(j, last)\r\n try:\r\n self.labels.HideLabelsWithDataAt(i,hideloc)\r\n except:\r\n self.labels.HideLabelsWithDataAt(i,hideloc-2)\r\n #except:\r\n #pass \r", "def cut(self,cell):\r\n self.grid[cell[0]][cell[1]] = 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
make test label for combinations helper with two simple children.
def _make_combinationsTest_label(chain_parts): assert len(chain_parts) == 1 scenario = chain_parts[0]['hypoScenario'] assert scenario == 'combinationsTest' return """ combgen( [(2)(20et, 0eta320)] simple([(40et, 0eta320) (50et, 0eta320)]) simple([(35et, 0eta240) (55et, 0eta240)]) )"""
[ "def test_many_bdd_labels_for_one_function():\n pass", "def test_recipe_nutrition_label_widget(self):\n pass", "def test_select_label(self):\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n layout = SectionLayout.objects.get(sectionlayouttranslation__name=\"Side by Side\")\n section1 = create_section(title=\"Test Section 1\", story=story, layout=layout)\n section2 = create_section(title=\"Test Section 2\", story=story, layout=layout)\n form = SectionRelationAdminForm()\n choices_list = list(form.fields['parent'].widget.choices)\n self.assertIn(story.title, choices_list[1][1])\n self.assertIn(story.title, choices_list[2][1])", "def test_labels(self):\n self.compliance_tester.test_labels(self.oi)", "def test_single_feature_label():\n pass", "def test_parent_label(self):\n l = self.d.label(1)\n l2 = self.d.label(31405)\n\n self.assertTrue(l.parent_label is None)\n self.assertTrue(l2 in l.sublabels)\n self.assertEqual(l2.parent_label, l)", "def testLabelFlatten(self):\n\n self.assertEqual(self.labels(\"This is the best sentence ever\", [\"positive\", \"negative\"], flatten=True)[0], \"positive\")", "def testLabel(self):\n\n self.assertEqual(self.labels(\"This is the best sentence ever\", [\"positive\", \"negative\"])[0][0], 0)", "def testLabelBatch(self):\n\n results = [l[0][0] for l in self.labels([\"This is the best sentence ever\", \"This is terrible\"], [\"positive\", \"negative\"])]\n self.assertEqual(results, [0, 1])", "def test(self, test, test_labels):", "def test_single_story_label():\n pass", "def test_label(self):\n xs = t.Label(t.Exactly(\"x\"), 'CustomLabel')\n self.assertEqual(writePython(xs),\n dd(\"\"\"\n def _G_label_1():\n _G_exactly_2, lastError = self.exactly('x')\n self.considerError(lastError, None)\n return (_G_exactly_2, self.currentError)\n _G_label_3, lastError = self.label(_G_label_1, \"CustomLabel\")\n self.considerError(lastError, None)\n _G_label_3\n \"\"\"))", "def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)", "def test_correct(self):\n for label, kitkat in ('A', kitkatA), ('B', kitkatB):\n with self.subTest(version=label):\n self.assertEqual(list(kitkat()), kitkat_test_sequence)", "def test_general_subset_level():\n pass", "def tests_ti_document_add_label(self):\n super().group_add_label()", "def _generateLabelAndName(self, obj, **args):\n result = []\n label = self._generateLabel(obj, **args)\n name = self._generateName(obj, **args)\n result.extend(label)\n if not len(label):\n result.extend(name)\n elif len(name) and name[0].strip() != label[0].strip():\n result.extend(name)\n return result", "def test_checkboxtextgroup(self):\r\n self.check_group('checkboxtextgroup', 'choice', 'checkbox')", "def test_defect_with_label_2(self):\n self.site.defects[0].label = 'foo'\n self.site.defects[1].label = 'bar'\n with self.assertRaises(LabelError):\n self.site.defect_with_label('banana')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(Set, float, float, int, str) > list Filters a set of Products according to the parameters. This function is responsible to determine if filtering with tags should be applied or not.
def get_matching_products(products, lat, lng, radius, tags): if tags: tag_list = tags.split(',') return list([ product for product in products if is_matching_product_with_tags( product, lat, lng, radius, tag_list ) ]) else: return list([ product for product in products if is_matching_product( product, lat, lng, radius ) ])
[ "def test_filter_by_product(self):\r\n mozlogger.info(\"test_filter_by_product\")\r\n\r\n # create fixtures\r\n product1 = self.F.ProductFactory()\r\n tag1 = self.factory\r\n tag1.product = product1\r\n tag1.save()\r\n tag2 = self.factory\r\n tag2.product = product1\r\n tag2.save()\r\n tag3 = self.factory\r\n\r\n # do test\r\n self._test_filter_list_by(u'product', str(product1.id), 2)\r\n self._test_filter_list_by(u'product', None, 1)", "def test_filter_by_product(self):\r\n p = self.F.ProductFactory.create()\r\n self.factory.create(name=\"Tag 1\", product=p)\r\n self.factory.create(name=\"Tag 2\")\r\n\r\n res = self.get(params={\"filter-product\": str(p.id)})\r\n\r\n self.assertInList(res, \"Tag 1\")\r\n self.assertNotInList(res, \"Tag 2\")", "def test_filter(self):\n fs = ProductFilterSet(data={'description': 'apple'})\n filtered_qs = fs.filter(self.qs)\n for product in filtered_qs:\n self.assertTrue('apple' in product.description.lower())", "def filter(self, filters):", "def filter_products(\n lat=None, lng=None, radius=None, tags=tuple(), count=10, **kwargs):\n filtered_products = current_app.products[:count]\n check_distance = lat and lng and radius\n\n shop_ids = set()\n for tag in tags:\n shop_ids = shop_ids.union(current_app.tags.get(tag, set()))\n\n if check_distance:\n def distance_checker(shop):\n return distance(shop.lat, shop.lng, lat, lng) < radius\n filtered_shops = filter(distance_checker, current_app.shops)\n distance_shop_ids = map(lambda shop: shop.id, filtered_shops)\n if tags:\n shop_ids = shop_ids.intersection(distance_shop_ids)\n else:\n shop_ids = distance_shop_ids\n\n if check_distance or tags:\n filtered_products = filter_and_slice_products(shop_ids, count)\n return filtered_products", "def _filter_valid_products(products_metadata: List, minio_client: MinioConnection):\n is_valid = []\n bucket_products = settings.MINIO_BUCKET_NAME_PRODUCTS\n for product_metadata in products_metadata:\n (rasters_paths, is_band) = _get_product_rasters_paths(\n product_metadata, minio_client, bucket_products\n )\n rasters_paths = list(compress(rasters_paths, is_band))\n sample_band_path = rasters_paths[0]\n sample_band_filename = _get_raster_filename_from_path(sample_band_path)\n file_path = str(\n Path(settings.TMP_DIR, product_metadata[\"title\"], sample_band_filename)\n )\n minio_client.fget_object(bucket_products, sample_band_path, file_path)\n sample_band = _read_raster(file_path)\n num_pixels = np.product(sample_band.shape)\n num_nans = np.isnan(sample_band).sum()\n nan_percentage = num_nans / num_pixels\n is_valid.append(nan_percentage < 0.2)\n products_metadata = list(compress(products_metadata, is_valid))\n return products_metadata", "def test_tag_filter(self):\n request = RequestFactory().get('/?tags=foo&tags=bar')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.qs.filters['tags__slug__in'], ['foo', 'bar'])", "def create_pool_filter(tags, tags_intersect):\n\n filters = []\n if tags_intersect:\n filters.append(all_tag_filter(tags_intersect))\n elif tags:\n filters.append(or_tag_filter(tags))\n return concat_filters(filters)", "def all_tag_filter(tags):\n\n if not isinstance(tags, list):\n tags = [tags]\n if len(tags) == 1:\n return Filters.equal(\"Tags\", tags[0])\n tag_selector = Filters.node_and([Filters.equal(\"Tags\", tag_value) for tag_value in tags])\n return tag_selector", "def prod_filter_type(prod_lst = [], ctgry = [], \\\n price_rg = [0, 99999999], db_name = \"database.json\"):\n dbs = db.load_json(db_name)\n all_prod = prod_lst if prod_lst != [] else list(dbs[\"PRODUCT_DB\"].keys())\n # filter catagory\n rt1 = []\n rt2 = []\n # if category is chosen\n if ctgry != [] or ctgry != None:\n for prod in all_prod:\n prod_summ = 1\n # for all choson category, product has a positive direction on that category\n for i in range(len(ctgry)):\n if ctgry[i] > 0:\n prod_summ *= ctgry[i] * dbs[\"PRODUCT_DB\"][prod][\"category\"][i]\n if prod_summ > 0:\n rt1.append(prod)\n else:\n rt1 = all_prod\n # filter price\n rt2 = []\n for prod in rt1:\n if int(dbs[\"PRODUCT_DB\"][prod][\"price\"]) >= int(price_rg[0]) \\\n and int(dbs[\"PRODUCT_DB\"][prod][\"price\"]) <= int(price_rg[-1]):\n rt2.append(prod)\n return rt2", "def test_filter_by_product(self):\r\n one = self.factory.create(name=\"Foo 1\")\r\n self.factory.create(name=\"Foo 2\")\r\n\r\n res = self.get(\r\n params={\"filter-product\": str(one.product.id)})\r\n\r\n self.assertInList(res, \"Foo 1\")\r\n self.assertNotInList(res, \"Foo 2\")", "def filter_queryset(self, request, queryset, view):\n # filter by tags if available.\n tags = request.query_params.get(\"tags\", None)\n\n if tags and isinstance(tags, six.string_types):\n tags = tags.split(\",\")\n return queryset.filter(tags__name__in=tags)\n\n return queryset", "def categorizeAndExcludeProducts(productList):\n\tprint('called categorizeAndExcludeProducts()')\n\n\tfor product in productList:\n\t\tproduct.isExcluded = False\n\n\t\t# split each product name into words and search for keywords\n\t\tprodName = product.name.split(' ')\n\t\tfor word in prodName:\n\t\t\tword = word.lower()\n\n\t\t\t# flag multi-pack items\n\t\t\tif word == 'pack.':\n\t\t\t\tproduct.isMultiPack = True\n\n\t\t\t# flag items to be excluded\n\t\t\tfor category in ExcludedCategories:\n\t\t\t\tif word == category:\n\t\t\t\t\tproduct.isExcluded = True\n\n\t\t# flag single-pack items\n\t\tif not product.isMultiPack:\n\t\t\tproduct.isSinglePack = True\n\n\t\t# move all remaining items into one of three lists: SinglePackProducts, MultiPackProducts, or ExcludedProducts\n\t\tif product.isExcluded:\n\t\t\tExcludedProducts.append(product)\t\t\t# products excluded by category\n\t\telif product.isSinglePack and singlePacks.willModify:\n\t\t\tSinglePackProducts.append(product)\n\t\telif product.isMultiPack and multiPacks.willModify:\n\t\t\tMultiPackProducts.append(product)\n\t\telse:\n\t\t\tExcludedProducts.append(product)\t\t\t# products excluded by quantity", "async def _filter_experiments(self, experiments, tags, qtype):\n if len(tags) > 0:\n filtered_experiments = []\n [\n filtered_experiments.append(e)\n for e in experiments\n if has_tag_requirements(await e.tags, tags, qtype)\n ]\n self._experiments = filtered_experiments\n else:\n self._experiments = experiments", "def test_filter_remove_only_bad_products(self):\n list_of_products = [self.good_product, self.bad_product]\n self.assertEqual(\n ProductValidator().filter(list_of_products),\n [self.good_product])", "def create_job_filter(tags, tags_intersect):\n filters = []\n if tags_intersect:\n filters.append(all_tag_filter(tags_intersect))\n elif tags:\n filters.append(or_tag_filter(tags))\n return concat_filters(filters)", "def get_filtered_products(**kwargs):\n \n response = []\n try:\n engine = create_db_engine()\n db_conn = engine.connect()\n with db_conn as connection:\n rows = connection.execute(\"select * from products\")\n response_list = []\n for row in rows:\n res = {}\n loc = dict(row)\n res['id'] = loc.get('id')\n res['product_id'] = loc.get('product_id')\n res['product_name'] = loc.get('product_name')\n res['mrp'] = loc.get('mrp')\n res['units'] = loc.get('units')\n res['purchase_price'] = loc.get('purchase_price')\n res['selling_price'] = loc.get('selling_price')\n res['stock'] = loc.get('stock')\n res['quantity'] = loc.get('quantity')\n \n response_list.append(res)\n return response_list\n \n #response = generate_api_response(rows)\n db_conn.close() \n except Exception as e:\n print(\"get products data engne exce---*******************\"+str(e))\n raise ValueError", "def filter_params(self, parameters: List[Dict]):\n pgs_filterd = []\n\n for group in parameters:\n if group[\"params\"] == []:\n pass\n else:\n pgs_filterd += [group]\n return pgs_filterd", "def search_by_product(self,\n **query: QueryField\n ) -> Iterable[Tuple[Iterable[Dataset], Product]]:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(Product, float, float, radius) > boolean Check if the coordinates of a shop is within a radius (in meters) using the Vincenty's formulae.
def is_matching_product(product, lat, lng, radius): return vincenty( (lat, lng), (product.shop.lat, product.shop.lng) ).meters <= radius
[ "def is_matching_product_with_tags(product, lat, lng, radius, tags):\n return vincenty(\n (lat, lng),\n (product.shop.lat, product.shop.lng)\n ).meters <= radius and any(tag in product.shop.tags for tag in tags)", "def in_roi(x_y, w_h, center, radius):\n x, y = x_y\n w, h = w_h\n vertices = [(x,y), (x+w,y), (x+w,y+h), (x,y+h)]\n for vertex in vertices:\n distance_square = (vertex[0] - center[0])**2 + (vertex[1] - center[1])**2\n if distance_square > radius**2:\n return False\n return True", "def is_on_sphere(c, r, p):\n return np.isclose(distance(c, p), r)", "def is_inside(self,x_point:float, y_point:float, z_point:float) -> bool:\n mid_to_point = Shape.eu_dis(self.x, x_point, self.y, y_point, self.z, z_point)\n return mid_to_point <= self.radius", "def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2", "def contains(self, position):\n return np.linalg.norm(position - self._center) < self._radius", "def _in_unit_circle(x, y):\n\n if math.sqrt(x**2 + y**2) <= 1:\n return True\n else:\n return False", "def isinside(x, y, mu, C, axisfactor=3.44):\n xnew, ynew = mapunit(x, y, mu, C, axisfactor)\n rsqr = xnew**2 + ynew**2\n return rsqr < 1.0", "def isInCircle(self,x1,y1,radius1):\r\n if(distance(self.x,x1,self.y,y1) < (self.radius+radius1)):\r\n return True\r\n return False", "def __contains__(self, point):\n\t\treturn self.distance(point)[0] <= self.radius", "def contains(self, loc): \n return loc.distance(self.center) <= self.radius", "def is_point_in_circle(r, p):\n return r >= math.hypot(*p)", "def isInside(x1, y1, x2, y2, x3, y3, x, y):\n # Calculate area of triangle ABC\n A = area (x1, y1, x2, y2, x3, y3)\n \n # Calculate area of triangle PBC\n A1 = area (x, y, x2, y2, x3, y3)\n \n # Calculate area of triangle PAC\n A2 = area (x1, y1, x, y, x3, y3)\n \n # Calculate area of triangle PAB\n A3 = area (x1, y1, x2, y2, x, y)\n \n # Check if sum of A1, A2 and A3\n # is same as A\n if(A == A1 + A2 + A3):\n return True\n else:\n return False", "def is_point_inside_hypersphere(point: np.array, c: List[float], r: float) -> bool:\n return np.linalg.norm(point - c) < r", "def test_vertices_distance_from_center(self):\n for vertex in self.vertices:\n actual_distance = vincenty((self.latitude, self.longitude), (vertex[0], vertex[1])).meters\n assert_almost_equal(actual_distance, self.radius_meters, 5)", "def inside(x, y, primitive):\n\n # You should implement your inside test here for all shapes\n # for now, it only returns a false test\n\n if primitive[\"shape\"] == \"circle\":\n dist_sqr = ((primitive[\"center\"][0] - x) ** 2 +\n (primitive[\"center\"][1] - y) ** 2)\n\n return dist_sqr <= primitive[\"radius\"] ** 2\n else:\n return winding_number(x, y, primitive)\n\n return False", "def test_get_radius():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_rad = get_radius(center, radius, 30)\n\n assert returned_rad == radius\n assert returned_rad != center.get_x()\n assert returned_rad != center.get_y()", "def sphere_isclose(c1, c2, *args, **kwargs):\n return np.isclose(c1.radius, c2.radius, *args, **kwargs) and np.allclose(\n c1.center, c2.center, *args, **kwargs\n )", "def point_in_circle(circle,point):\n if (point.x**2 + point.y**2 == circle.radius**2):\n return True\n else :\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(Product, float, float, radius, list) > boolean Check if the coordinates of a shop is within a radius (in meters) using the Vincenty's formulae and if the shop contains any of the tags provided.
def is_matching_product_with_tags(product, lat, lng, radius, tags): return vincenty( (lat, lng), (product.shop.lat, product.shop.lng) ).meters <= radius and any(tag in product.shop.tags for tag in tags)
[ "def is_matching_product(product, lat, lng, radius):\n return vincenty(\n (lat, lng),\n (product.shop.lat, product.shop.lng)\n ).meters <= radius", "def shops_within_radius(self, lat, lng, radius, tags=None):\n center_point = geoindex.GeoPoint(lat, lng)\n points = self.geoindex.get_nearest_points(center_point, radius, 'km')\n\n def tags_filter(shops):\n for shop in shops:\n for tag in tags:\n if tag in shop['tags']:\n yield shop\n break\n\n def get_shops():\n for point, distance in points:\n point.ref['distance'] = distance\n yield point.ref\n\n if tags:\n return tags_filter(get_shops())\n else:\n return get_shops()", "def in_roi(x_y, w_h, center, radius):\n x, y = x_y\n w, h = w_h\n vertices = [(x,y), (x+w,y), (x+w,y+h), (x,y+h)]\n for vertex in vertices:\n distance_square = (vertex[0] - center[0])**2 + (vertex[1] - center[1])**2\n if distance_square > radius**2:\n return False\n return True", "def get_matching_products(products, lat, lng, radius, tags):\n if tags:\n tag_list = tags.split(',')\n return list([\n product for product in products\n if is_matching_product_with_tags(\n product,\n lat,\n lng,\n radius,\n tag_list\n )\n ])\n else:\n return list([\n product for product in products\n if is_matching_product(\n product,\n lat,\n lng,\n radius\n )\n ])", "def contains(self, position):\n return np.linalg.norm(position - self._center) < self._radius", "def filter_products(\n lat=None, lng=None, radius=None, tags=tuple(), count=10, **kwargs):\n filtered_products = current_app.products[:count]\n check_distance = lat and lng and radius\n\n shop_ids = set()\n for tag in tags:\n shop_ids = shop_ids.union(current_app.tags.get(tag, set()))\n\n if check_distance:\n def distance_checker(shop):\n return distance(shop.lat, shop.lng, lat, lng) < radius\n filtered_shops = filter(distance_checker, current_app.shops)\n distance_shop_ids = map(lambda shop: shop.id, filtered_shops)\n if tags:\n shop_ids = shop_ids.intersection(distance_shop_ids)\n else:\n shop_ids = distance_shop_ids\n\n if check_distance or tags:\n filtered_products = filter_and_slice_products(shop_ids, count)\n return filtered_products", "def contains(self, loc): \n return loc.distance(self.center) <= self.radius", "def FindPointsWithinRadius(self, p_float, , vtkIdList):\n ...", "def is_on_sphere(c, r, p):\n return np.isclose(distance(c, p), r)", "def __contains__(self, point):\n\t\treturn self.distance(point)[0] <= self.radius", "def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2", "def is_inside(self,x_point:float, y_point:float, z_point:float) -> bool:\n mid_to_point = Shape.eu_dis(self.x, x_point, self.y, y_point, self.z, z_point)\n return mid_to_point <= self.radius", "def isInside(x1, y1, x2, y2, x3, y3, x, y):\n # Calculate area of triangle ABC\n A = area (x1, y1, x2, y2, x3, y3)\n \n # Calculate area of triangle PBC\n A1 = area (x, y, x2, y2, x3, y3)\n \n # Calculate area of triangle PAC\n A2 = area (x1, y1, x, y, x3, y3)\n \n # Calculate area of triangle PAB\n A3 = area (x1, y1, x2, y2, x, y)\n \n # Check if sum of A1, A2 and A3\n # is same as A\n if(A == A1 + A2 + A3):\n return True\n else:\n return False", "def isInCircle(self,x1,y1,radius1):\r\n if(distance(self.x,x1,self.y,y1) < (self.radius+radius1)):\r\n return True\r\n return False", "def inside(x, y, primitive):\n\n # You should implement your inside test here for all shapes\n # for now, it only returns a false test\n\n if primitive[\"shape\"] == \"circle\":\n dist_sqr = ((primitive[\"center\"][0] - x) ** 2 +\n (primitive[\"center\"][1] - y) ** 2)\n\n return dist_sqr <= primitive[\"radius\"] ** 2\n else:\n return winding_number(x, y, primitive)\n\n return False", "def is_point_inside_hypersphere(point: np.array, c: List[float], r: float) -> bool:\n return np.linalg.norm(point - c) < r", "def isNear(self, *args):\n return _almathswig.PositionAndVelocity_isNear(self, *args)", "def _intersected(positions, radius):\n P1 = positions[0]\n P2 = positions[1]\n P3 = positions[2]\n temp1 = P2 - P1\n e_x = temp1 / np.linalg.norm(temp1)\n temp2 = P3 - P1\n i = np.dot(e_x, temp2)\n temp3 = temp2 - i * e_x\n e_y = temp3 / np.linalg.norm(temp3)\n e_z = np.cross(e_x, e_y)\n d = np.linalg.norm(P2 - P1)\n j = np.dot(e_y, temp2) \n x = d / 2\n y = (-2*i*x + i*i + j*j) / (2*j)\n temp4 = radius**2 - x*x - y*y\n if temp4 < 0:\n return False\n return True", "def contains(self, point, margin=OBSTACLE_MARGIN):\n p1 = copy.copy(self.__p)\n p2 = copy.copy(point)\n if self.__axis == 'x':\n p1.x = 0\n p2.x = 0\n elif self.__axis == 'y':\n p1.y = 0\n p2.y = 0\n else:\n p1.z = 0\n p2.z = 0\n\n dist = hypot(p1.x-p2.x, hypot(p1.y-p2.y, p1.z-p2.z))\n return dist < self.__radius + margin" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a copy of a facemap proc file, but pointing to a new video. By default, the new proc file is created in the same folder as the new videofile and named videofile_proc.npy.
def copy_facemap_roi(procfile, videofile, outputfile=None): videodata = np.load(procfile, allow_pickle=True).item() videodata['filenames'] = [[videofile]] if outputfile is None: outputfile = os.path.splitext(videofile)[0]+'_proc.npy' if os.path.isfile(outputfile): print(f'File {outputfile} exists. It will not be overwritten.') return None np.save(outputfile, videodata) return outputfile
[ "def create_video(input_file, output_file):\n input_video = VideoFileClip(input_file)\n output_video = input_video.fl_image(detect_lane.fit_and_plot)\n output_video.write_videofile(output_file, audio=False)", "def process_video(lane, fname, output):\n\tclip = VideoFileClip(fname)\n\toutput_name = output\n\toutput_clip = clip.fl_image(lane.pipeline)\n\toutput_clip.write_videofile(output_name, audio=False)\n\tprint ('Video processed successfully')", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def process_video(proc_state):\n entry = proc_state.entry\n workbench = proc_state.workbench\n video_config = mgg.global_config['media_type:mediagoblin.media_types.video']\n\n queued_filepath = entry.queued_media_file\n queued_filename = proc_state.get_queued_filename()\n name_builder = FilenameBuilder(queued_filename)\n\n medium_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}-640p.webm'))\n\n thumbnail_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}.thumbnail.jpg'))\n\n # Create a temporary file for the video destination (cleaned up with workbench)\n tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False)\n with tmp_dst:\n # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square\n progress_callback = ProgressCallback(entry)\n\n dimensions = (\n mgg.global_config['media:medium']['max_width'],\n mgg.global_config['media:medium']['max_height'])\n\n # Extract metadata and keep a record of it\n metadata = transcoders.VideoTranscoder().discover(queued_filename)\n store_metadata(entry, metadata)\n\n # Figure out whether or not we need to transcode this video or\n # if we can skip it\n if skip_transcode(metadata):\n _log.debug('Skipping transcoding')\n\n dst_dimensions = metadata['videowidth'], metadata['videoheight']\n\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n did_transcode = False\n else:\n transcoder = transcoders.VideoTranscoder()\n\n transcoder.transcode(queued_filename, tmp_dst.name,\n vp8_quality=video_config['vp8_quality'],\n vp8_threads=video_config['vp8_threads'],\n vorbis_quality=video_config['vorbis_quality'],\n progress_callback=progress_callback,\n dimensions=dimensions)\n\n dst_dimensions = transcoder.dst_data.videowidth,\\\n transcoder.dst_data.videoheight\n\n # Push transcoded video to public storage\n _log.debug('Saving medium...')\n mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)\n _log.debug('Saved medium')\n\n entry.media_files['webm_640'] = medium_filepath\n\n did_transcode = True\n\n # Save the width and height of the transcoded video\n entry.media_data_init(\n width=dst_dimensions[0],\n height=dst_dimensions[1])\n\n # Temporary file for the video thumbnail (cleaned up with workbench)\n tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False)\n\n with tmp_thumb:\n # Create a thumbnail.jpg that fits in a 180x180 square\n transcoders.VideoThumbnailerMarkII(\n queued_filename,\n tmp_thumb.name,\n 180)\n\n # Push the thumbnail to public storage\n _log.debug('Saving thumbnail...')\n mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)\n entry.media_files['thumb'] = thumbnail_filepath\n\n # save the original... but only if we did a transcoding\n # (if we skipped transcoding and just kept the original anyway as the main\n # media, then why would we save the original twice?)\n if video_config['keep_original'] and did_transcode:\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n # Remove queued media file from storage and database\n proc_state.delete_queue_file()", "def process_video(self, tmp_output_folder, video_name, video_num, total_videos):\n vidcap = cv2.VideoCapture(join(tmp_output_folder, video_name))\n print(f\"Processing video {video_num}/{total_videos} with name {video_name} \\n\")\n\n input_length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n frame_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))\n frame_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vidcap.get(cv2.CAP_PROP_FPS))\n\n metadata = []\n faces_all_frames = []\n success, image = vidcap.read()\n count = 0\n frame = 0\n while success:\n if count % self.sample_every == 0:\n height, width = image.shape[:2]\n image = cv2.resize(image, (self.width, self.height), interpolation=cv2.INTER_CUBIC)\n\n # Convert from BGR color (OpenCV) to RGB color (face_recognition)\n rgb_image = image[:, :, ::-1]\n\n # Find all the faces in the current frame of video\n face_locations = face_recognition.face_locations(rgb_image)\n faces = []\n face_num = 0\n # Display the results\n for top, right, bottom, left in face_locations:\n # Draw a box around the face\n faces.append(image[top:bottom, left:right, :].copy())\n metadata.append(\n f\"{video_name},frame-{count}.face-{face_num}.jpg,{count},{face_num},{input_length},{fps},{frame_width},{frame_height},{top},{right},{bottom},{left}\\n\")\n face_num += 1\n faces_all_frames.append(faces)\n\n frame += 1\n success, image = vidcap.read()\n count += 1\n video_num += 1\n vidcap.release()\n\n with open(f\"{self.output_folder}/faces-pickle/{video_name}.pkl\", \"wb\") as f_out:\n pickle.dump(faces_all_frames, f_out)\n return metadata", "def make_video(self,namein,nameout):\n import os\n os.system(f'ffmpeg -framerate 24 -pattern_type glob -i \"{namein}*.png\" {self.respath}/{self.date}/{nameout}.mp4')", "def generate_vis_video(file_path, folder_path, op):\n if which('ffmpeg') is None:\n raise Exception('No ffmpeg found in path')\n if op == mv_str:\n cmd = 'ffmpeg -flags2 +export_mvs -export_side_data +venc_params -i ' + file_path + ' -vf codecview=mv=pf+bf+bb -y'\n else:\n cmd = 'ffmpeg -export_side_data +venc_params -i ' + file_path + ' -vf codecview=' + op + '=true -y'\n args = shlex.split(cmd)\n args.append(folder_path + '/report/' + op + '_vis.mp4')\n proc = subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)\n proc.communicate()\n if proc.returncode != 0:\n raise Exception(op + ' video generates failed, please check the version of your ffmpeg')\n print(op + ' visualization video generated')", "def convert_video(video_file, output_file_name):\n video_stream = cv2.VideoCapture(video_file)\n total_frames = video_stream.get(cv2.CAP_PROP_FRAME_COUNT)\n background = get_median_frame(video_stream)\n video_stream.release()\n #reopen for processing:\n video_stream = cv2.VideoCapture(video_file)\n #ready an output writer\n writer = cv2.VideoWriter(output_file_name, \n cv2.VideoWriter_fourcc(*\"MP4V\"), fps,(1080,1920)) #(1920,1080))\n frameCnt=0\n pos = [] #Array for the coordinates\n while(frameCnt < total_frames-1):\n frameCnt+=1\n ret, frame = video_stream.read()\n dframe = background_subtraction(frame,background)\n cnts = find_contours(dframe)\n x,y = find_lowest_contour(cnts)\n pos.append([x,y])\n if len(pos): \n cv2.polylines(frame,np.int32([pos]),False,(0, 255, 0),2)\n writer.write(cv2.resize(frame, (1080,1920))) ## size probably shoudn't be fixed.\n writer.release()\n video_stream.release()\n return pos", "def createvideo(temp, videoname, ips):\n fig = plt.figure()\n grid = empty_board(False)\n ani = FuncAnimation(fig, creation, frames = temp)\n writervideo = FFMpegWriter(fps = ips)\n ani.save(videoname, writer = writervideo)\n return", "def local_video(**kwargs):\n output_dir = run_video_preprocess(\n video_file=input_video,\n roi_locations=kwargs[\"roi_locations\"],\n preprocess_analysis=kwargs[\"preprocess_analysis\"],\n database=False\n )\n\n run_analysis_pipeline(\n preprocess_analysis=kwargs[\"preprocess_analysis\"],\n json_filepath=output_dir,\n )", "def og_load_driving_agent_make_video(pelican_agent_filepath, pelican_agent_name, panther_agent_filepath, panther_agent_name, config_file_path='/Components/plark-game/plark_game/game_config/10x10/balanced.json',video_path='/Components/plark_ai_flask/builtangularSite/dist/assets/videos'):\n logger.info(\"Load driving agent make viedo - pelican agent filepast = \" + pelican_agent_filepath)\n basicdate = str(datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\"))\n video_file = basicdate+'.mp4' \n video_file_path = os.path.join(video_path, video_file) \n os.makedirs(video_path, exist_ok=True)\n files = os.listdir(pelican_agent_filepath)\n if len(files) > 0:\n for f in files:\n if '.zip' in f:\n # load model\n metadata_filepath = os.path.join(pelican_agent_filepath, 'metadata.json')\n agent_filepath = os.path.join(pelican_agent_filepath, f)\n \n\n with open(metadata_filepath) as f:\n metadata = json.load(f)\n logger.info('Playing against:'+agent_filepath) \n if metadata['agentplayer'] == 'pelican': \n pelican_agent = Pelican_Agent_Load_Agent(agent_filepath, metadata['algorithm'])\n pelican_model = pelican_agent.model\n\n env = plark_env.PlarkEnv(driving_agent='pelican',panther_agent_filepath=panther_agent_filepath, panther_agent_name=panther_agent_name, config_file_path=config_file_path)\n basewidth,hsize = make_video(pelican_model,env,video_file_path)\n logger.info(\"This is the environment variable \" + str(env))\n\n elif metadata['agentplayer'] == 'panther':\n raise ValueError('No Pelican agent found in ', pelican_agent_filepath) \n \n else:\n raise ValueError('no agent found in ', files)\n\n return video_file, env.status,video_file_path", "def process_video(self):\n out_clip = self.clip.fl_image(self.process_image_showlane)\n out_clip.write_videofile(self.output, audio=False)", "def video(vae, vidpath, imgsize=28, frames=192, fps=24, n=30, fgisize=15, useaxes=True):\n increments = np.linspace(-1, 1, frames)\n images = [ ]\n i = 0\n for increment in increments:\n img = latent_space_grid(vae, increment, imgsize, n, fgisize, useaxes)\n images.append(img)\n i += 1\n print(f'Rendered frame {i}')\n imgs = [cv2.imread(img) for img in images]\n height, width, _ = imgs[0].shape\n fourcc = cv2.VideoWriter_fourcc(*'DIVX')\n video = cv2.VideoWriter(vidpath, fourcc, fps, (width, height))\n for img in imgs:\n video.write(img)\n\n cv2.destroyAllWindows()\n video.release()", "def analyze_video(vidNum_iter, config, pointInds_toUse, pts_spaced, session): # function needed for multiprocessing\n\n optic = config['Optic']\n\n numVids = session['num_vids']\n path_vid_allFiles = session['videos']\n lk_names = [key for key in optic.keys() if 'lk_' in key]\n lk_params = {k.split('lk_')[1]: (tuple(optic[k]) if type(optic[k]) is list else optic[k]) \\\n for k in lk_names}\n\n vid = imageio.get_reader(path_vid_allFiles[vidNum_iter], 'ffmpeg')\n # metadata = vid.get_meta_data()\n\n path_vid = path_vid_allFiles[vidNum_iter] # get path of the current vid\n video = cv2.VideoCapture(path_vid) # open the video object with openCV\n numFrames = int(video.get(\n cv2.CAP_PROP_FRAME_COUNT)) # get frame count of this vid GENERALLY INACCURATE. OFF BY AROUND -25 frames\n\n frameToSet = 0\n frame = vid.get_data(\n frameToSet) # Get a single frame to use as the first 'previous frame' in calculating optic flow\n new_frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n old_frame = new_frame_gray\n\n displacements_tmp = np.zeros((pts_spaced.shape[0], 2, np.uint64(numFrames + (numVids * 1000)))) * np.nan\n\n print(' ', end='', flush=True)\n text = \"progresser #{}\".format(vidNum_iter)\n print(f'\\n Calculating displacement field: video # {vidNum_iter + 1}/{numVids}')\n\n for iter_frame, new_frame in enumerate(tqdm(vid, total=numFrames, desc=text, position=vidNum_iter)):\n new_frame_gray = cv2.cvtColor(new_frame, cv2.COLOR_BGR2GRAY) # convert to grayscale\n\n ##calculate optical flow\n pointInds_new, status, error = cv2.calcOpticalFlowPyrLK(old_frame, new_frame_gray, pointInds_toUse, None,\n **lk_params) # Calculate displacement distance between STATIC/ANCHORED points and the calculated new points. Also note the excluded 'NextPts' parameter. Could be used for fancier tracking\n\n ## Calculate displacement and place into variable 'displacements' (changes in size every iter)\n if iter_frame == 0:\n displacements_tmp[:, :, iter_frame] = np.zeros((pts_spaced.shape[0], 2))\n else:\n displacements_tmp[:, :, iter_frame] = np.single(np.squeeze((\n pointInds_new - pointInds_toUse))) # this is the important variable. Simply the difference in the estimate\n\n old_frame = new_frame_gray # make current frame the 'old_frame' for the next iteration\n\n return displacements_tmp", "def __create_blank_video(self):\n video_meta_data = get_video_meta_data(list(self.frame_types.values())[0])\n if self.gpu:\n cmd = 'ffmpeg -y -t {} -f lavfi -i color=c=black:s={}x{} -c:v h264_nvenc -preset slow -tune stillimage -pix_fmt yuv420p \"{}\" -hide_banner -loglevel error -y'.format(\n str(video_meta_data[\"video_length_s\"]),\n str(self.video_width),\n str(self.video_height),\n self.blank_path,\n )\n else:\n cmd = 'ffmpeg -y -t {} -f lavfi -i color=c=black:s={}x{} -c:v libx264 -tune stillimage -pix_fmt yuv420p \"{}\" -hide_banner -loglevel error -y'.format(\n str(video_meta_data[\"video_length_s\"]),\n str(self.video_width),\n str(self.video_height),\n self.blank_path,\n )\n subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)\n self.frame_types[\"blank\"] = self.blank_path", "def load_driving_agent_make_video(pelican_agent_filepath, pelican_agent_name, panther_agent_filepath, panther_agent_name, config_file_path='/Components/plark-game/plark_game/game_config/10x10/balanced.json',video_path='/Components/plark_ai_flask/builtangularSite/dist/assets/videos',basic_agents_filepath='/Components/plark-game/plark_game/agents/basic', renderWidth=None, renderHeight=None):\n basicdate = str(datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\"))\n video_file = basicdate+'.mp4' \n video_file_path = os.path.join(video_path, video_file) \n os.makedirs(video_path, exist_ok=True)\n\n kwargs = {\n 'driving_agent': \"pelican\",\n 'panther_agent_filepath': panther_agent_filepath,\n 'panther_agent_name': panther_agent_name,\n }\n\n game_env = Environment()\n game_env.createNewGame(config_file_path, **kwargs)\n game = game_env.activeGames[len(game_env.activeGames)-1]\n \n agent = classes.load_agent(pelican_agent_filepath,pelican_agent_name,basic_agents_filepath,game,**kwargs)\n\n if renderHeight is None:\n renderHeight = game.pelican_parameters['render_height']\n if renderHeight is None:\n renderWidth = game.pelican_parameters['render_width']\n \n basewidth, hsize = new_make_video(agent, game, video_file_path, renderWidth, renderHeight)\n\n return video_file, game.gameState ,video_file_path", "def annotate_video(input_file, output_file):\n\tvideo = VideoFileClip(input_file)\n\tannotated_video = video.fl_image(annotate_image_array2)\n\tannotated_video.write_videofile(output_file, audio=False)", "def emv(inputVideoPath, outputVideoPath, maxLevel, freqLow, freqHigh, alpha, chromAttenuation, startFrameNumber, endFrameNumber, lambdaC=-1, app=\"color\", method=\"ideal\", roi=None): \n fps, frames = getVideoFrames(inputVideoPath, startFrameNumber, endFrameNumber)\n if app==\"color\":\n recreateFrames=emvCoreColor(frames, fps, maxLevel, freqLow, freqHigh, alpha, chromAttenuation, method)\n elif app==\"motion\":\n recreateFrames=emvCoreMotion(frames, fps, maxLevel, freqLow, freqHigh, alpha, lambdaC, chromAttenuation, method)\n saveFramesToVideoROI(frames, recreateFrames, outputVideoPath, roi)\n return", "def processVideo():\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the onsets in the array representing the synchronization light. This function assumes the onsets are periodic (with randomness within 0.5T and 1.5T). The function can also fix missing onsets.
def find_sync_light_onsets(sync_light, invert=True, fixmissing=False): # -- Find changes in synch light -- sync_light_diff = np.diff(sync_light, prepend=0) if invert: sync_light_diff = -sync_light_diff sync_light_diff[sync_light_diff < 0] = 0 sync_light_threshold = 0.2*sync_light_diff.max() sync_light_onset = sync_light_diff > sync_light_threshold # -- Find period of sync_light_onset -- sync_light_onset_ind = np.where(sync_light_onset)[0] sync_light_onset_diff = np.diff(sync_light_onset_ind) # In units of frames expected_onset_period = np.median(sync_light_onset_diff) # In units of (float) frames # -- Remove repeated onsets -- onset_freq_upper_threshold = int(1.5 * expected_onset_period) onset_freq_lower_threshold = int(0.5 * expected_onset_period) repeated_onsets = sync_light_onset_diff < onset_freq_lower_threshold repeated_onsets_ind = np.where(repeated_onsets)[0] fixed_sync_light_onset = sync_light_onset.copy() fixed_sync_light_onset[sync_light_onset_ind[repeated_onsets_ind+1]] = False # -- Fix missing onsets -- if fixmissing: missing_next_onsets = sync_light_onset_diff > onset_freq_upper_threshold missing_next_onsets_ind = np.where(missing_next_onsets)[0] for indm, missing_onset_ind in enumerate(missing_next_onsets_ind): onset_diff = sync_light_onset_diff[missing_onset_ind] n_missing = int(np.round(onset_diff / expected_onset_period))-1 #print(n_missing) last_onset_ind = sync_light_onset_ind[missing_onset_ind] next_onset_ind = sync_light_onset_ind[missing_onset_ind+1] period_missing = (next_onset_ind - last_onset_ind)//(n_missing+1) new_onset_inds = last_onset_ind + np.arange(1, n_missing+1)*period_missing #print([last_onset_ind, next_onset_ind]) #print(new_onset_inds) fixed_sync_light_onset[new_onset_inds] = True return fixed_sync_light_onset
[ "def detect_onsets(bool_arr):\n int_arr = bool_arr.astype(int)\n onsets = np.diff(int_arr, axis=1)\n onsets[onsets < 0] = 0\n\n return onsets", "def onsets_to_onset_times(onsets, fs, N, hop):\n\n onset_times = (onsets * np.arange(0, len(onsets)) * hop + N / 2) / fs \n return onset_times[onset_times > N / 2 / fs]", "def DetectPulseOnset(self, asig, fs, wMS):\n # the percentage of the maximal value of the slope sum function\n # to detect the onset\n AmplitudeRatio = .01\n\n # low pass filter\n sig = self.zpIIR(asig, 3, .1, 20, 5 * 2/fs)\n wSmp = int(np.round(wMS*fs/1000))\n\n BlankWindowRatio = .9\n\n # delta x\n diffsig = np.diff(sig)\n\n z = np.empty((sig.size - 1 - wSmp, 1))\n z[:] = np.NaN\n\n # calculate slope sum function\n for i in range(wSmp,sig.size-1):\n subsig = diffsig[i-wSmp:i]\n z[i-wSmp] = np.sum(subsig[subsig>0])\n\n z0 = np.mean(z)\n onset = [0]\n tPnt = []\n zThres = 0\n blankWin = int(np.round(400*fs/1000))\n subIdx = np.r_[onset[0]: onset[0] + 4*blankWin + 1]\n MedianArrayWinSize = 5\n\n # this value controls the final acceptance\n PrcofMaxAMP = .2\n SSFAmpArray = np.ones((MedianArrayWinSize,1))*(np.max(z) - np.min(z)) * PrcofMaxAMP\n # the percentage of maximal amplitude for threshold crossing\n DetectionThreshold = .2\n SSFCrossThresholdArray = np.ones((MedianArrayWinSize,1))*z0*DetectionThreshold\n idx = 1\n\n # Keep loop going while onsets detected\n while(1):\n\n # look for the first location where z > z0\n try:\n\n # Look in z[subIdx] (and make sure it doesn't go past z's size)\n # find first index where z > the mean of z\n tempIndex = np.trim_zeros(subIdx*(z.size>subIdx), 'b')\n ix = np.amin(np.where(z[tempIndex] > z0)[0])\n except:\n break\n\n ix = tempIndex[ix]\n tPnt.append(ix)\n srcWin = np.r_[np.maximum(0,ix - wSmp): ix + wSmp]\n #if the window has passed the length of the data, then exit\n if srcWin[-1] >= len(z):\n break\n\n # This section of code is to remove the initial zero-region in the SSF function before looking for onset (if such region exists)\n zPnt = np.where(z[srcWin] == 0)\n\n if zPnt[0].size != 0:\n zPnt = srcWin[zPnt[0]]\n\n if np.any(zPnt < ix):\n srcWin = np.r_[zPnt[np.max(np.where(zPnt < ix))]: ix + wSmp]\n\n # accept the window\n if ( np.max(z[srcWin]) - np.min(z[srcWin]) > zThres):\n\n # calculate the threshold for next cycle\n SSFAmp = (np.max(z[srcWin]) - np.min(z[srcWin])) * PrcofMaxAMP\n SSFAmpArray[np.remainder(idx, MedianArrayWinSize)] = SSFAmp\n zThres = np.median(SSFAmpArray)\n SSFCrossThresholdArray[np.remainder(idx, MedianArrayWinSize)] = np.mean(z[srcWin])*DetectionThreshold\n z0 = np.median(SSFCrossThresholdArray)\n minSSF = np.min(z[srcWin]) + SSFAmp *AmplitudeRatio\n a = srcWin[0] + np.min(np.where(z[srcWin] >= minSSF))\n onset.append(a)\n\n # adaptively determine analysis window for next cycle\n bw = blankWin\n subIdx = np.round(np.r_[a + bw: a + 3*bw])\n idx = idx + 1\n\n else:\n # no beat detected\n subIdx = np.round(subIdx + blankWin)\n\n return onset", "def get_onsets(self):\n onsets = np.array([])\n # Just concatenate onsets from all the instruments\n for instrument in self.instruments:\n onsets = np.append(onsets, instrument.get_onsets())\n # Return them sorted (because why not?)\n return np.sort(onsets)", "def _onset_by_sol(input, framesize=512, hopsize=256, fs=44100, window='hann'):\n\n # HPSSで非調波成分のみ取り出すと前処理フィルタなしでもわりあい上手くいく.\n # が,HPSSなしだと持続音が取りきれずonsetが抽出できない.\n w = 3\n m = 3\n alpha = 0.5\n flag1 = 0\n flag2 = 0\n flag3 = 0\n\n # 前処理フィルタ\n bef_b, bef_a = iir.calc_coef_fs(2.0, 200.0, -6.0, 'bpf', fs)\n hpf_b, hpf_a = iir.calc_coef_fs(2.0, 4000.0, 3.0, 'hpf2', fs)\n\n fil_data = iir.apply(input, bef_b, bef_a)\n fil_data = iir.apply(fil_data, hpf_b, hpf_a)\n\n odf = flux(fil_data, framesize, hopsize, window, fs)\n odf = moving_average_exp_cy(odf, 10)\n\n onset_data = []\n g = _calcOnsetThreshold(odf, alpha) # Threshold function\n\n sigma = sp.mean(odf) / 12.0\n\n # Onset Detection Revisited (S. Dixon, 2006)の#marupakuri.\n # オリジナルの判定法に差し替える.\n for i in xrange(len(odf)):\n flag1 = 0\n flag2 = 0\n flag3 = 0\n\n rng_start = max(i-w, 0)\n rng_end = min(i+w+1, len(odf))\n max_idx = sp.argmax(odf[rng_start:rng_end])\n if i == rng_start + max_idx:\n flag1 = 1\n\n rng_start = max(i-m*w, 0)\n rng_end = min(i+w+1, len(odf))\n if odf[i] >= (sum(odf[rng_start:rng_end]) / (m*w + w + 1) + sigma):\n flag2 = 1\n\n if odf[i] >= g[i-1]:\n flag3 = 1\n\n onset_data.append(1.0 * flag1 * flag2 * flag3)\n\n onset_data = sp.array(onset_data)\n\n return onset_data, odf, fil_data", "def get_stuttered_based_on_onsets(data, onsets, every=2,\n repeat=lambda: int(np.random.normal() * 2),\n sr=DEFAULT_SAMPLE_RATE):\n last_sec = np.size(data) / sr\n between_onsets = get_intervals_from_dividers(onsets, last_sec)\n for i in np.flip(np.arange(0, len(between_onsets), every), 0):\n for _ in range(repeat()):\n between_onsets = np.insert(between_onsets, i, between_onsets[i], axis=0)\n\n segments = [data[int(start_sec * sr):int(stop_sec * sr)]\n for start_sec, stop_sec in between_onsets]\n return np.concatenate(segments)", "def onset_backtrack(events: np.ndarray, energy: np.ndarray) -> np.ndarray:\n # Find points where energy is non-increasing\n # all points: energy[i] <= energy[i-1]\n # tail points: energy[i] < energy[i+1]\n minima = np.flatnonzero((energy[1:-1] <= energy[:-2]) & (energy[1:-1] < energy[2:]))\n\n # Pad on a 0, just in case we have onsets with no preceding minimum\n # Shift by one to account for slicing in minima detection\n minima = util.fix_frames(1 + minima, x_min=0)\n\n # Only match going left from the detected events\n results: np.ndarray = minima[util.match_events(events, minima, right=False)]\n return results", "def thresholding_onset(x, thresh):\n\n # init\n onset = np.zeros(len(x))\n\n # set to one if over threshold\n onset[x > thresh] = 1\n\n # get only single onset -> attention edge problems\n onset = onset - np.logical_and(onset, np.roll(onset, 1))\n\n return onset", "def calc_onsets(x, fs, N=1024, hop=512, adapt_frames=5, adapt_alpha=0.1, adapt_beta=1):\n\n # stft\n X = custom_stft(x, N=N, hop=hop, norm=True)\n\n # complex domain\n c = complex_domain_onset(X, N)\n\n # adaptive threshold\n thresh = adaptive_threshold(c, H=adapt_frames, alpha=adapt_alpha, beta=adapt_beta)\n\n # get onsets from measure and threshold\n onsets = thresholding_onset(c, thresh)\n\n return onsets", "def _onset_by_mfcc(input, framesize=1024, hopsize=512, fs=44100):\n th = 0.6 # onset検出のodf閾値 (移動平均あり)\n #th = 1.0 # onset検出のodf閾値 (移動平均なし)\n\n input_zero_padded = sp.r_[sp.zeros(framesize+hopsize*1), input] # 先頭onset検出のためダミーデータ挿入\n mfcc_data = mfcc(input_zero_padded, framesize, hopsize, fs, 13)[:,1:]\n d_mfcc_data = mfcc_data[1:] - mfcc_data[0:mfcc_data.shape[0]-1]\n odf = d_mfcc_data.sum(1)\n # odf = calcEMA(odf, 5)\n \n # peakを捉えて直前のdipをonsetとする\n peak_data = _peakpick(odf, th)\n peak_idx = sp.where(peak_data > 0)[0]\n \n onset_idx = []\n for cur_idx in peak_idx:\n for i in range(cur_idx, -1, -1):\n if (odf[i]-odf[i-1] <= 0 and odf[i+1]-odf[i] > 0) or i == 0:\n onset_idx.append(i)\n break\n\n onset_data = odf[onset_idx]\n \n return onset_idx, onset_data, odf", "def getOnsetTick(s):\n ticksPerQuarter = getResolution(s)\n onsets = [int(n.offset * ticksPerQuarter) for n in s.flat.notes]\n return onsets", "def _onset_by_klapuri(input, framesize=4410, hopsize=512, fs=44100):\n\n decim_factor = 128\n fbank_coefs = _makeFilterbank(fs)\n\n n_dim = len(fbank_coefs)\n fil_data = sp.zeros((n_dim, len(input)))\n #envData = sp.zeros( (nDim, int(len(inData)/decim_factor) ) )\n #logEnvData = sp.zeros((nDim, int(len(inData)/decim_factor)))\n #dEnvData = sp.zeros((nDim, int(len(inData)/decim_factor)-1))\n \n # OnsetDetectionFunctionの作成\n env_data = []\n d_env_data = []\n for i in xrange(n_dim):\n fil_data[i] = sp_sig.lfilter(fbank_coefs[i]['b'], fbank_coefs[i]['a'], input)\n cur_env_data = _odf(fil_data[i], fs, 128)\n cur_d_env_data = cur_env_data[1:] - cur_env_data[0:len(cur_env_data)-1]\n cur_d_env_data = (cur_d_env_data + abs(cur_d_env_data)) / 2.0 # 半波整流\n #logEnvData[i] = sp.log10(envData[i])\n \n env_data.append(cur_env_data)\n d_env_data.append(cur_d_env_data)\n \n env_data = sp.array(env_data)\n d_env_data = sp.array(d_env_data)\n \n # Onsetの抽出\n \n \n\n #tframesize = int(framesize / decim_factor)\n #nFrames = int(sp.ceil(len(rsmpData[0]) / tframesize))\n #st = 0\n #ed = tframesize\n #winFunc = sig.get_window('hann', tframesize*2)\n #winFunc = winFunc[0:tframesize+1]\n\n #for i in xrange(nFrames):\n # curData = rsmpData[:, st:ed]\n # for j in xrange(len(fbCoefs)):\n # # 畳込み\n # #conv = sp.convolve(curData[j], winFunc)\n # #envData[j, st*2:ed*2] = conv\n #\n # # hilbert\n # envData[j, st:ed] = abs(sig.hilbert(curData[j]))\n # #envData[j, st:ed] = calcEMA(envData[j, st:ed], 32)\n # st += tframesize\n # ed += tframesize\n # if ed > len(rsmpData[0]):\n # ed = len(rsmpData[0])\n\n return env_data, d_env_data", "def resp_onsets(resp, resp_time, onset_time, onset_chan, resp_chan_dict, offset_time=[], offset_chan=[]):\r\n import numpy as np\r\n \r\n # keeps only onset on desired channels\r\n idx = [i for i,c in enumerate(onset_chan) if c in resp_chan_dict.values()]\r\n # and transforms in ndarray\r\n onset_time = utilsfunc.in_array(onset_time)[idx]\r\n onset_chan = utilsfunc.in_array(onset_chan)[idx]\r\n # keeps only offset on desired channels\r\n idx = [i for i,c in enumerate(offset_chan) if c in resp_chan_dict.values()]\r\n # and transforms in ndarray\r\n offset_time = utilsfunc.in_array(offset_time)[idx]\r\n offset_chan = utilsfunc.in_array(offset_chan)[idx]\r\n\r\n onset_idx = np.arange(len(onset_time))\r\n try:\r\n # searchs the onset on responding channel closest but anterior to the response\r\n onset_resp_idx = np.where((onset_chan == resp_chan_dict[resp]) & ((onset_time - resp_time) < 0))[0]\r\n except KeyError:\r\n onset_resp_idx = []\r\n\r\n # if more than one, keep the one with longest latency (i.e., closest to response)\r\n if len(onset_resp_idx) > 1:\r\n onset_resp_idx = utilsfunc.in_list(onset_resp_idx[np.argmax(onset_time[onset_resp_idx])])\r\n onset_non_resp_idx = np.delete(onset_idx, onset_resp_idx)\r\n\r\n onset_resp = onset_time[onset_resp_idx]\r\n onset_non_resp = onset_time[onset_non_resp_idx]\r\n\r\n # same thing (almost) for offset\r\n offset_idx = np.arange(len(offset_time))\r\n if len(onset_resp) > 0:\r\n try:\r\n # searchs the offset on responding channel just responding onset\r\n offset_resp_idx = np.where((offset_chan == resp_chan_dict[resp]) & ((offset_time - onset_resp) > 0))[0]\r\n except KeyError:\r\n offset_resp_idx = []\r\n else:\r\n offset_resp_idx = []\r\n\r\n # if more than one, keep the one with shortest latency (i.e., closest to onset_resp)\r\n if len(offset_resp_idx) > 1:\r\n offset_resp_idx = utilsfunc.in_list(offset_resp_idx[np.argmin(offset_time[offset_resp_idx])])\r\n offset_non_resp_idx = np.delete(offset_idx, offset_resp_idx)\r\n\r\n offset_resp = offset_time[offset_resp_idx]\r\n offset_non_resp = offset_time[offset_non_resp_idx]\r\n\r\n# if len(offset_time) > 0:\r\n# offset_resp = offset_time[onset_resp_idx]\r\n# offset_non_resp = offset_time[np.invert(onset_resp_idx)]\r\n# else:\r\n# offset_resp = utilsfunc.in_array(offset_time)\r\n# offset_non_resp = utilsfunc.in_array(offset_time)\r\n\r\n return onset_resp, onset_non_resp, offset_resp, offset_non_resp", "def extract_duration_of_onsets2(onsets, offsets):\n onsets3 = []\n durations = []\n \n if len(onsets) == 0:\n return np.array([], dtype=np.int), np.array([], dtype=np.int)\n \n # This trigger will be set after each detected duration to mask out\n # subsequent onsets greedily\n onset_trigger = np.min(onsets) - 1\n \n # Iterate over onsets\n for idx, val in enumerate(onsets):\n # Skip onsets \n if val < onset_trigger:\n continue\n \n # Find upcoming offsets and skip if none\n upcoming_offsets = offsets[offsets > val]\n if len(upcoming_offsets) == 0:\n continue\n next_offset = upcoming_offsets[0]\n \n # Store duration and this onset\n onsets3.append(val)\n durations.append(next_offset - val)\n \n # Save this trigger to skip subsequent onsets greedily\n onset_trigger = next_offset\n\n return np.asarray(onsets3), np.asarray(durations)", "def get_selected_muons(muons, trigobj, mask_events, mu_pt_cut_leading, mu_pt_cut_subleading, mu_aeta_cut, mu_iso_cut): \n passes_iso = muons.pfRelIso04_all < mu_iso_cut\n passes_id = muons.mediumId == 1\n passes_subleading_pt = muons.pt > mu_pt_cut_subleading\n passes_leading_pt = muons.pt > mu_pt_cut_leading\n passes_aeta = NUMPY_LIB.abs(muons.eta) < mu_aeta_cut\n \n trigobj.masks[\"mu\"] = (trigobj.id == 13)\n \n muons_matched_to_trigobj = NUMPY_LIB.invert(mask_deltar_first(muons, muons.masks[\"all\"], trigobj, trigobj.masks[\"mu\"], 0.1))\n \n #select muons that pass these cuts\n muons_passing_id = passes_iso & passes_id & passes_subleading_pt & muons_matched_to_trigobj\n \n #select events that have muons passing cuts \n events_passes_muid = sum_in_offsets(muons, muons_passing_id, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 2\n events_passes_leading_pt = sum_in_offsets(muons, muons_passing_id & passes_leading_pt, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 1\n events_passes_subleading_pt = sum_in_offsets(muons, muons_passing_id & passes_subleading_pt, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 2\n\n base_event_sel = mask_events & events_passes_muid & events_passes_leading_pt & events_passes_subleading_pt\n \n muons_passing_os = select_muons_opposite_sign(muons, muons_passing_id & passes_subleading_pt)\n events_passes_os = sum_in_offsets(muons, muons_passing_os, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) == 2\n \n final_event_sel = base_event_sel & events_passes_os\n final_muon_sel = muons_passing_id & passes_subleading_pt & muons_passing_os\n \n return {\n \"selected_events\": final_event_sel,\n \"selected_muons\": final_muon_sel,\n }", "def compare_to_sets_single_song(song, motifs, sets):\n counters = {mot: 0 for mot in motifs}\n n = len(song[\"dTseqs\"])\n for i in range(n):\n for mot in motifs:\n if i+mot <= n:\n nextMotif = str([(song[\"dTseqs\"][k], song[\"tseqs\"][k], song[\"pitchseqs\"][k])\n for k in range(i, i+mot)])\n if nextMotif in sets[mot]:\n counters[mot] += 1\n\n # divide number of occurrences by number of sequences\n return np.array([1. - counters[mot] / float(n - mot + 1) for mot in motifs])", "def create_onsets(data, dur):\n onsets = []\n for index, row in data.iterrows():\n if row[\"duration\"] >= dur:\n onsets.append(row[\"onset\"])\n return onsets", "def all_covers(sets, size):\n unique_sets = []\n [unique_sets.append(s) for s in sets if len(s) <= size and s not in unique_sets]\n unions = unique_sets.copy()\n covers = []\n i = 0\n while i < len(unions):\n covers.append(0)\n for s in sets:\n if s.issubset(unions[i]):\n covers[i] += 1\n for s in unique_sets:\n u = unions[i].union(s)\n if len(u) <= size and u not in unions:\n unions.append(u)\n i += 1\n if unions == []:\n return []\n return sorted(unions, key=lambda s: (-covers[unions.index(s)], len(s), list(s)))", "def find_vocal_periods(vocal_band, vb_stds = 1, vd_stds = 1, vd_win = .25, fs_mic = 25000, onset_shift = 0):\n vocal_band_thresh = vb_stds * np.std(vocal_band)\n # find periods of time above the threshold...\n vocal_threshed = np.zeros(vocal_band.shape)\n for i in range(vocal_band.shape[0]):\n if vocal_band[i] > vocal_band_thresh: \n vocal_threshed[i] = 1\n else: vocal_threshed[i] = 0 \n \n # TODO it's pretty weird the way I'm windowing this, maybe I should just lowpass filter it?\n window = np.int(np.round(vd_win * fs_mic / 2) * 2) # force an even integer\n overlap = window / 2\n start_time = 0\n i = 0\n vocal_density = np.zeros(np.int(np.floor(vocal_band.shape[0]/(window-overlap))))\n while start_time + window < vocal_band.shape[0]:\n vocal_density[i] = np.mean(vocal_threshed[start_time:start_time+window])\n start_time = start_time + window - overlap\n i += 1 \n \n # sound onsets: 1 is an onset, -1 is an offset\n density_thresh = vd_stds * np.std(vocal_density)\n vd_crosses = np.zeros(len(vocal_density))\n for i in range(vocal_density.shape[0]):\n if vocal_density[i] > density_thresh:\n vd_crosses[i] = 1\n sound_onsets = vd_crosses[1:len(vd_crosses)] - vd_crosses[0:len(vd_crosses)-1] \n sound_onsetsb = np.zeros(len(sound_onsets)+1)\n sound_onsetsb[1:len(sound_onsetsb)] = sound_onsets # just aligns for n-1, must be a better way oh well\n del sound_onsets\n sound_onsets = sound_onsetsb\n del sound_onsetsb\n # this just checks that the first sound onset wasn't obscured by the beginning of the file\n for i in range(len(sound_onsets)):\n if sound_onsets[i] == -1:\n sound_onsets[0] = -1 # the case where sound onset happens on the first data point\n print(\"Warning, sound appears to onset before file start, first time point being set to an onset\")\n if sound_onsets[i] == 1:\n break \n # check for missing offset (sound continues past data file)\n for i in range(len(sound_onsets)):\n if sound_onsets[len(sound_onsets)-i-1] == 1:\n sound_onsets[len(sound_onsets)] = -1\n print(\"Warning, sound appears to continue past file end, last time point being set to an offset\")\n if sound_onsets[len(sound_onsets)-i-1] == -1:\n break\n \n # convert to a list of just onsets and offsets \n sound_onset = []\n sound_offset = []\n for i in range(len(sound_onsets)):\n if sound_onsets[i] == 1:\n sound_onset = np.append(sound_onset, [i])\n if sound_onsets[i] == -1:\n sound_offset = np.append(sound_offset, [i]) \n try:\n len(sound_onset) == len(sound_offset)\n except:\n print(\"Number of vocal onsets and offsets not equal!\")\n sound_onset = sound_onset * (window - overlap) # convert to original data points\n sound_onset = sound_onset + np.round(onset_shift * fs_mic)\n sound_offset = sound_offset * (window - overlap) # \" \"\n return sound_onset, sound_offset" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Estimate whether the animal was running during each trial. This function first smooths the running trace according to smoothsize (noncausal), it then uses the average of N presamples before the onset to to estimate whether running was higher than the threshold.
def estimate_running_each_trial(running_trace, trial_onset, smoothsize=10, presamples=4, threshold=3, showfig=False): smoothwin = np.ones(smoothsize)/(smoothsize) running_trace_smooth = np.convolve(running_trace, smoothwin, mode='same') trial_onset_ind = np.where(trial_onset)[0] presamples_inds = np.arange(-presamples, 0) + trial_onset_ind[:, np.newaxis] pretrial_avg = running_trace_smooth[presamples_inds].mean(axis=1) running_each_trial = pretrial_avg > threshold if showfig: plt.cla() plt.plot(running_trace_smooth, '0.8') plt.plot(trial_onset_ind, pretrial_avg, 'xg') plt.plot(trial_onset_ind, running_each_trial*running_trace_smooth.max(), 'og') plt.axhline(threshold, color='k') plt.legend(['running_trace_smooth', 'pretrial_avg', 'running_each_trial'], loc='upper right') plt.show() return running_each_trial, running_trace_smooth
[ "def stationarity(self, nfactor=20):\n \n tau = self.sampler.get_autocorr_time(tol=0)\n converged = np.all(tau * nfactor < self.sampler.iteration)\n return converged", "def _check_ntrial(trial_bool, category_thresh=8):\n return np.sum(trial_bool) >= category_thresh", "def stop_on_low_ais_ess(trial_id, result):\n return result[\"ais_effective_sample_size\"] < 0.1", "def full(self): ###\n return self.collected() >= self.nsamples", "def spikes(arr):\n arr = np.array(arr)\n if (arr.size == 0) or flat(arr) or monotonic(arr):\n return False\n arr = normalize(arr)\n spikes = np.where(arr > arr.mean())[0]\n rest = np.ones_like(arr, dtype=bool)\n rest[spikes] = False\n return flat(arr[rest]) and flat(np.diff(arr[spikes]))", "def pred_time(self):\n self.count_ += 1\n if self.count_ >= self.pred_freq:\n self.count_ = 0\n return True\n return False", "def identify_catch_trials(obs):\n n_trial = obs.n_trial\n is_catch = np.zeros([n_trial], dtype=bool)\n for i_trial in range(n_trial):\n # Determine which references are identical to the query.\n is_identical = np.equal(\n obs.stimulus_set[i_trial, 0], obs.stimulus_set[i_trial, 1:]\n )\n if np.sum(is_identical) > 0:\n is_catch[i_trial] = True\n return is_catch", "def is_on_hot_streak(self):\n streak_count = len(self.winning_streak)\n\n if streak_count >= 3:\n if streak_count == 3:\n self.log.info(\"SpiffyRPG: %s is on a streak of 3!\" % self.name)\n elif streak_count == 4:\n self.log.info(\"SpiffyRPG: %s is on a streak of 4!\" % self.name)\n\n return streak_count", "def get_excess_smoothing_status(self) -> bool:\n return self._excess_smoothing_live.get()", "def each_train_has_n_spikes(container, n):\n if n < 1:\n raise ValueError(\"Please provide a number greater than %d, \"\n \"when setting the condition for a minimal number \"\n \"of Spikes in each SpikeTrain.\" % n)\n sts = nt.get_all_spiketrains(container)\n return [st for st in filter(lambda x: np.size(x) >= n, sts)]", "def binSpikeTimes(num_stim, pres_duration, num_pres_per_stim, is_bright, source_on_spikes, source_off_spikes, bright_spikes, dark_spikes):\n trial_borders = args.pres_duration * np.arange(num_stim*args.num_pres_per_stim + 1)\n binned_source_on_spikes, binned_source_off_spikes = np.zeros(trial_borders.size-1), np.zeros(trial_borders.size-1)\n binned_bright_spikes, binned_dark_spikes = np.zeros(trial_borders.size-1), np.zeros(trial_borders.size-1)\n for on_st, off_st in zip(source_on_spikes,source_off_spikes):\n binned_source_on_spikes += np.histogram(on_st, bins=trial_borders)[0]\n binned_source_off_spikes += np.histogram(off_st, bins=trial_borders)[0]\n for bright_st, dark_st in zip(bright_spikes, dark_spikes):\n binned_bright_spikes += np.histogram(bright_st, bins=trial_borders)[0]\n binned_dark_spikes += np.histogram(dark_st, bins=trial_borders)[0]\n return binned_source_on_spikes, binned_source_off_spikes, binned_bright_spikes, binned_dark_spikes", "def was_pig_caught(prize):\n if prize > 20:\n return True\n return False", "def _compute_is_terminal(self):\n # by default the episode will terminate when all samples are labelled\n done = LalEnv._compute_is_terminal(self)\n # it also terminates when self.n_horizon datapoints were labelled\n if np.size(self.indeces_known) == self.n_horizon:\n done = True\n return done", "def running_std_gap(flux, window, N=3, nSigTimes=3.3):\n gap = np.zeros(len(flux))\n \n std_array = np.zeros(len(flux))\n \n \n for i in range(window, len(flux), 1):\n \n f = flux[i-window:i]\n std_array[i] = np.nanstd(f)\n \n #plt.figure()\n #plt.plot(std_array,'.')\n \n med_std = np.median(std_array)\n #print(\"mean std: %f\" % med_std)\n \n argsort = np.argsort(std_array)\n \n for i in argsort[-1*N:]:\n if std_array[i] > med_std * nSigTimes:\n gap[i-window:i] = 1\n \n isbad = gap == 1\n \n return isbad, med_std", "def DetectPulseOnset(self, asig, fs, wMS):\n # the percentage of the maximal value of the slope sum function\n # to detect the onset\n AmplitudeRatio = .01\n\n # low pass filter\n sig = self.zpIIR(asig, 3, .1, 20, 5 * 2/fs)\n wSmp = int(np.round(wMS*fs/1000))\n\n BlankWindowRatio = .9\n\n # delta x\n diffsig = np.diff(sig)\n\n z = np.empty((sig.size - 1 - wSmp, 1))\n z[:] = np.NaN\n\n # calculate slope sum function\n for i in range(wSmp,sig.size-1):\n subsig = diffsig[i-wSmp:i]\n z[i-wSmp] = np.sum(subsig[subsig>0])\n\n z0 = np.mean(z)\n onset = [0]\n tPnt = []\n zThres = 0\n blankWin = int(np.round(400*fs/1000))\n subIdx = np.r_[onset[0]: onset[0] + 4*blankWin + 1]\n MedianArrayWinSize = 5\n\n # this value controls the final acceptance\n PrcofMaxAMP = .2\n SSFAmpArray = np.ones((MedianArrayWinSize,1))*(np.max(z) - np.min(z)) * PrcofMaxAMP\n # the percentage of maximal amplitude for threshold crossing\n DetectionThreshold = .2\n SSFCrossThresholdArray = np.ones((MedianArrayWinSize,1))*z0*DetectionThreshold\n idx = 1\n\n # Keep loop going while onsets detected\n while(1):\n\n # look for the first location where z > z0\n try:\n\n # Look in z[subIdx] (and make sure it doesn't go past z's size)\n # find first index where z > the mean of z\n tempIndex = np.trim_zeros(subIdx*(z.size>subIdx), 'b')\n ix = np.amin(np.where(z[tempIndex] > z0)[0])\n except:\n break\n\n ix = tempIndex[ix]\n tPnt.append(ix)\n srcWin = np.r_[np.maximum(0,ix - wSmp): ix + wSmp]\n #if the window has passed the length of the data, then exit\n if srcWin[-1] >= len(z):\n break\n\n # This section of code is to remove the initial zero-region in the SSF function before looking for onset (if such region exists)\n zPnt = np.where(z[srcWin] == 0)\n\n if zPnt[0].size != 0:\n zPnt = srcWin[zPnt[0]]\n\n if np.any(zPnt < ix):\n srcWin = np.r_[zPnt[np.max(np.where(zPnt < ix))]: ix + wSmp]\n\n # accept the window\n if ( np.max(z[srcWin]) - np.min(z[srcWin]) > zThres):\n\n # calculate the threshold for next cycle\n SSFAmp = (np.max(z[srcWin]) - np.min(z[srcWin])) * PrcofMaxAMP\n SSFAmpArray[np.remainder(idx, MedianArrayWinSize)] = SSFAmp\n zThres = np.median(SSFAmpArray)\n SSFCrossThresholdArray[np.remainder(idx, MedianArrayWinSize)] = np.mean(z[srcWin])*DetectionThreshold\n z0 = np.median(SSFCrossThresholdArray)\n minSSF = np.min(z[srcWin]) + SSFAmp *AmplitudeRatio\n a = srcWin[0] + np.min(np.where(z[srcWin] >= minSSF))\n onset.append(a)\n\n # adaptively determine analysis window for next cycle\n bw = blankWin\n subIdx = np.round(np.r_[a + bw: a + 3*bw])\n idx = idx + 1\n\n else:\n # no beat detected\n subIdx = np.round(subIdx + blankWin)\n\n return onset", "def is_smelling(self,conc_array):\n if conc_array[int(self.x)][int(self.y)]>self.threshold:\n self.smell_timer = self.Timer(self.T,self.lamda)\n #Nav mode three and four need to know whether the moth is smelling\n #at a specific moment, for that reason they use Tfirst.\n self.Tfirst = self.T\n self.odor = True #this datum will be useful in the graphical functions\n return True\n elif self.turned_on:\n self.odor = False\n if self.smell_timer.is_running(self.T):\n return True #note - even though the there is no detection, the navigator stay in nav mode.\n else:\n self.odor = False\n return False", "def _compute_is_terminal(self):\n new_score = self.episode_qualities[-1]\n # by default the episode will terminate when all samples are labelled\n done = LalEnv._compute_is_terminal(self)\n # it also terminates when a quality reaches a predefined level\n if new_score >= self.target_quality:\n done = True\n return done", "def is_stationary(ts,th=0.8):\n res = list()\n for n,g in ts.groupby(by=ts.index.date):\n res.append(test_stationarity(g.dropna().values, verbose=0, critical='5%'))\n\n return np.sum(res)/np.size(res)>=th", "def iterative_thresholding(radiograph, mask, teeth_pref=0.5):\n threshold = cv2.mean(radiograph, mask)[0]\n while True:\n non_teeth_mean = cv2.mean(radiograph, cv2.threshold(radiograph, threshold, 255, cv2.THRESH_BINARY_INV)[1])[0]\n teeth_mean = cv2.mean(radiograph, cv2.threshold(radiograph, threshold, 255, cv2.THRESH_BINARY)[1])[0]\n prev_threshold = threshold\n threshold = teeth_pref*non_teeth_mean + (1-teeth_pref)*teeth_mean\n if prev_threshold == threshold:\n break\n return cv2.threshold(radiograph, threshold, 1, cv2.THRESH_TOZERO)[1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }