query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
A view to add a new portfolio project | def add_project(request):
if not request.user.is_superuser:
messages.error(request, 'Sorry, only store owners can do that.')
return redirect(reverse('home'))
if request.method == 'POST':
form = ProjectForm(request.POST, request.FILES)
if form.is_valid():
project = form.save()
messages.success(request, 'Project added successfully!')
return redirect(reverse('portfolio'))
else:
messages.error(request, 'Failed to add project.\
# Please ensure the form is valid')
else:
form = ProjectForm()
form = ProjectForm()
template = 'portfolio/add_project.html'
context = {
'form': form,
}
return render(request, template, context) | [
"def add_project():\n if request.method == \"POST\":\n result = add_project_to_db(\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n return render_template(\"add_project.html\")",
"def add_project(request):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = ProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project = form.save()\n messages.success(request, 'Project added successfully!')\n return redirect(reverse('project_detail', args=[project.id]))\n else:\n messages.error(request, 'Failed to add project.\\\n Please ensure the form is valid')\n else:\n form = ProjectForm()\n\n form = ProjectForm()\n template = 'portfolio/add_project.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)",
"def new_project():\n\n form = EditProjectForm()\n\n if form.validate_on_submit() and request.method == 'POST':\n\n try:\n project = Project(\n name=Project.generate_page_name(),\n title=form.title.data,\n body=form.body.data,\n created=datetime.datetime.now(),\n private=form.private.data,\n user=current_user\n )\n db.session.add(project)\n db.session.commit()\n\n return redirect(url_for('view_project', project_name=project.name))\n\n except Exception as e:\n print('[ERROR]: ', e)\n # TODO handle this error better\n return redirect(url_for('index'))\n\n # Render without any page content to auto fill editor with as we're creating a new project\n return render_template('editor.html', form=form, type_='project', new=True, data=None)",
"def add_project(request):\n\n profile = get_object_or_404(Profile, user=request.user)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n project_form = ProjectForm(request.POST, request.FILES)\n if project_form.is_valid():\n project = project_form.save(commit=False)\n project.owner = profile\n project.save()\n messages.success(request, 'Successfully created project!')\n return redirect(reverse('project_detail', args=[project.id]))\n else:\n messages.error(\n request,\n 'Failed to create project. Please ensure the form is valid'\n )\n\n project_form = ProjectForm()\n\n template = 'gameproject/add_project.html'\n context = {\n 'project_form': project_form,\n }\n\n return render(request, template, context)",
"def newproject_view(request):\n\n # Use to tell to the template that the user want to creat a new project\n is_new = True\n\n # Get all the user. Everyone may be member of the project\n users = User.objects.all()\n\n # If the view received data, try to creat a project\n if request.method == \"POST\":\n form = ProjectForm(request.user, request.POST)\n if form.is_valid():\n # Save the new project in the database\n form.save(commit=True)\n\n # redirect to the project list display page\n return redirect(\"projects\")\n else:\n # creat an empty form for the template\n form = ProjectForm(request.user)\n\n return render(request, 'newProject.html', locals())",
"def portfolio_detail():\n\n form = PortfolioAddForm()\n\n if form.validate_on_submit():\n try:\n portfolio = Portfolio(name=form.data['name'], user_id=g.user.id)\n db.session.add(portfolio)\n db.session.commit()\n except (DBAPIError, IntegrityError):\n flash('There was a problem creating your portfolio.')\n return render_template('portfolio/search.html', form=form)\n # Create portfolio was successful. Redirect to search.html\n return redirect(url_for('.company_search'))\n\n # import pdb; pdb.set_trace()\n user_portfolios = Portfolio.query.filter(\n Portfolio.user_id == g.user.id).all()\n portfolio_ids = [p.id for p in user_portfolios]\n\n return render_template('portfolio/portfolio.html', form=form)",
"def new_project(request):\n\n user = request.user\n groups = user.groups.all()\n select_group_buffer = []\n\n # Make a list of associated groups to the user for the select_group options\n for group in groups:\n group_name = group.name\n remove_epanet_group_name = group_name.split('.')[1]\n cleaned_group_name = remove_epanet_group_name.replace(\"_\", \" \").replace(\"-\", \" \")\n select_group_buffer.append((str(cleaned_group_name.title()), str(group_name)))\n\n name_input = TextInput(\n display_text='Name of Project',\n name='inputName',\n placeholder='e.g.: Bluffdale'\n )\n\n select_group = SelectInput(\n display_text='Group Owner',\n name='select_group',\n multiple=False,\n options=select_group_buffer\n )\n\n add_button = Button(\n display_text='Add EPANET Project',\n icon='glyphicon glyphicon-plus',\n style='success',\n name='submit-add-project',\n attributes='id=submit-add-project'\n )\n\n context = {'name_input': name_input,\n 'add_button': add_button,\n 'select_group': select_group}\n\n return render(request, 'epanet/new_project.html', context)",
"def add():\n form = ProjectForm(request.form)\n form.customer.choices = [(customer.id, customer.name) for customer in Customer.all()]\n if form.validate_on_submit():\n Project.create(\n number=form.number.data,\n description=form.description.data,\n customer_id=form.customer.data\n )\n flash(\"Project {} created.\".format(form.number.data))\n return redirect(url_for(\"project.projects\"))\n else:\n flash_errors(form)\n return render_template(\"projects/add.html\", form=form)",
"def new_project(request):\n\n form = ProjectForm(user=request.user)\n if request.method == \"POST\":\n form = ProjectForm(user=request.user, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect(\"/projects/{}/\".format(form.instance.id))\n return render(request, \"new-project.html\", {\"form\": form})",
"def get_add_project_form():\n\n return render_template(\"project_add.html\")",
"def get_project_add_form():\n\n return render_template(\"project_add.html\")",
"def projects():\n return render_template('index.html', id='portfolio')",
"def show_project_creation_form():\n\n return render_template(\"create_project.html\")",
"def portfolio(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/portfolio.html',\n {\n 'title':'Портфолио',\n 'message':'Your application description page.',\n 'year':datetime.now().year,\n }\n )",
"def create_project(request):\n\n\tnames = settings.PROJECT_NAMES\n\tprojects_home = settings.PROJECTS_HOME\n\n\tcontext = {\n\t\t\"template\": request.GET.get(\"template\", \"blog\"),\n\t\t\"title\": random.choice(names) + \"_\" + random.choice(names),\n\t\t\"projects_home\": projects_home,\n\t\t'error': ''\n\t}\n\n\tif request.method == \"POST\":\n\t\ttemplate = request.POST.get(\"template\")\n\t\ttitle = request.POST.get(\"title\")\n\n\t\ttry:\n\t\t\tcopy_project_template(template, title)\n\t\texcept CommandError, e:\n\t\t\tcontext['title'] = title\n\t\t\tcontext['error'] = str(e)\n\t\t\treturn render(request, 'create_project.html', context)\n\n\t\treturn redirect('open_project', project_id=title)\n\n\treturn render(request, 'create_project.html', context)",
"def get(self, request, *args, **kwargs):\n # Set initial project_lead to current user\n form = ProjectForm(initial={'project_lead': request.user},\n user=request.user)\n return render(request, 'project_create.html', {'form': form})",
"def post_project(request):\n if request.method == \"POST\":\n form = AddProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project = form.save(commit=False)\n project.profile = request.user\n project.save()\n\n return redirect(\"index\")\n else:\n form = AddProjectForm()\n\n return render(request, \"post_project.html\", {\"form\": form})",
"def create_project(request, type):\n if type == 'LINK':\n return LinkingProjectCreateView.as_view()(request)\n else:\n return DedupProjectCreateView.as_view()(request)",
"def portfolio_detail():\n return render_template('portfolio/portfolio.html')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A view to edit a portfolio project | def edit_project(request, project_id):
if not request.user.is_superuser:
messages.error(request, 'Sorry, only store owners can do that.')
return redirect(reverse('home'))
project = get_object_or_404(Project, pk=project_id)
if request.method == 'POST':
form = ProjectForm(request.POST, request.FILES, instance=project)
if form.is_valid():
form.save()
messages.success(request, 'Successfully updated project')
return redirect(reverse('portfolio'))
else:
messages.error(request, 'Failed to update project. \
# Please ensure the form is valid.')
else:
form = ProjectForm(instance=project)
messages.info(request, f'You are editing {project.name}')
template = 'portfolio/edit_project.html'
context = {
'form': form,
'project': project,
}
return render(request, template, context) | [
"def project_edit(request, project_slug):\n project = get_object_or_404(request.user.projects.live(), slug=project_slug)\n\n if project.is_imported:\n form_class = ImportProjectForm\n else:\n form_class = CreateProjectForm\n\n form = form_class(instance=project, data=request.POST or None)\n\n if request.method == 'POST' and form.is_valid():\n form.save()\n project_dashboard = reverse('projects_manage', args=[project.slug])\n return HttpResponseRedirect(project_dashboard)\n\n return render_to_response(\n 'projects/project_edit.html',\n {'form': form, 'project': project},\n context_instance=RequestContext(request)\n )",
"def project_edit(request, project_slug):\r\n project = get_object_or_404(request.user.projects.live(),\r\n slug=project_slug)\r\n\r\n form_class = ImportProjectForm\r\n\r\n form = form_class(instance=project, data=request.POST or None)\r\n\r\n if request.method == 'POST' and form.is_valid():\r\n form.save()\r\n project_dashboard = reverse('projects_detail', args=[project.slug])\r\n return HttpResponseRedirect(project_dashboard)\r\n\r\n return render_to_response(\r\n 'projects/project_edit.html',\r\n {'form': form, 'project': project},\r\n context_instance=RequestContext(request)\r\n )",
"def edit_project(request, project):\n\n project = get_object_or_404(Project, id=project, user=request.user)\n form = ProjectForm(user=request.user, instance=project)\n if request.method == \"POST\":\n form = ProjectForm(user=request.user, data=request.POST, instance=project)\n if form.is_valid():\n form.save()\n return redirect(\"/projects/{}/\".format(form.instance.id))\n return render(request, \"edit-project.html\", {\"form\": form})",
"def edit_project_view(request, project_id):\n\n # Use to tell to the template that the user want to edit a project\n is_new = False\n\n # Retrieve the project to be edited or raise an error if this project does not exist\n project = get_object_or_404(Projet, id=project_id)\n\n # Check if the logged in user is allowed to edit this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n\n # Check if the view receive data from the form\n if request.method == \"POST\":\n form = ProjectForm(request.user, request.POST)\n if form.is_valid():\n # Manually update the field using the data from form\n project.name = form.cleaned_data[\"name\"]\n project.members.set(form.cleaned_data[\"members\"])\n # Save the project. Does not creat a new project as long as the project's id is not modified\n project.save()\n return redirect(\"projects\")\n else:\n form = ProjectForm(user=request.user, instance=project)\n return render(request, 'newProject.html', locals())\n else:\n return redirect(\"projects\")\n return redirect(\"projects\")",
"def edit(**kwargs):\n return _alter_project(**kwargs)",
"def update_project(id):\n if request.method == \"POST\":\n result = update_project_to_db(\n id,\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n project = get_project(id)\n return render_template(\"edit_project.html\", **project)",
"def edit_project(request, game_project_id):\n\n profile = get_object_or_404(Profile, user=request.user)\n game_project = get_object_or_404(GameProject, pk=game_project_id)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n if game_project.owner != profile:\n messages.error(request, 'Sorry, only the project owner can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n game_project_form = ProjectForm(\n request.POST,\n request.FILES,\n instance=game_project\n )\n if game_project_form.is_valid():\n game_project_form.save(commit=False)\n game_project.owner = profile\n game_project.total_amount = 0\n for order in Order.objects.filter(\n game_project=game_project).filter(status='PA'):\n game_project.total_amount += order.donation_item.amount\n game_project.save()\n messages.success(request, 'Successfully updated project!')\n return redirect(reverse('project_detail', args=[game_project.id]))\n else:\n messages.error(\n request,\n 'Failed to update project. Please ensure the form is valid.'\n )\n else:\n game_project_form = ProjectForm(instance=game_project)\n messages.info(request, f'You are editing {game_project.title}')\n\n template = 'gameproject/edit_project.html'\n context = {\n 'game_project_form': game_project_form,\n 'game_project': game_project,\n }\n\n return render(request, template, context)",
"def portfolio_detail():\n\n form = PortfolioAddForm()\n\n if form.validate_on_submit():\n try:\n portfolio = Portfolio(name=form.data['name'], user_id=g.user.id)\n db.session.add(portfolio)\n db.session.commit()\n except (DBAPIError, IntegrityError):\n flash('There was a problem creating your portfolio.')\n return render_template('portfolio/search.html', form=form)\n # Create portfolio was successful. Redirect to search.html\n return redirect(url_for('.company_search'))\n\n # import pdb; pdb.set_trace()\n user_portfolios = Portfolio.query.filter(\n Portfolio.user_id == g.user.id).all()\n portfolio_ids = [p.id for p in user_portfolios]\n\n return render_template('portfolio/portfolio.html', form=form)",
"def project_edit(project_id):\n\n if not 'CURR_USER_KEY' in session:\n flash(\"Unauthorized access. Account needed to edit.\", \"danger\")\n return redirect(f\"/project/{project_id}\")\n\n project = Project.lookup_project(project_id)\n project = Project.lookup_project(project_id)\n if g.user.id != project.user.id:\n flash(\"Unauthorized user. Correct user account needed.\", \"danger\")\n return redirect(f\"/project/{project_id}\")\n\n form = ProjectForm(obj = project, prefix='form-project-edit-')\n\n tags_full_list = Tag.list_all()\n project_tags = [tag.name for tag in project.tags]\n tags_list_str = \"|\".join(project_tags)\n\n if form.validate_on_submit():\n project.name = form.name.data\n project.description = form.description.data\n project.user_id = g.user.id\n project.contact_info_type = form.contact_info_type.data\n project.contact_info = form.contact_info.data\n project.lat = form.lat.data\n project.long = form.long.data\n project.pic_url1 = request.form.get('pic_url1')\n project.pic_url2 = request.form.get('pic_url2')\n\n tags = request.form.get('tags')\n if (tags):\n tags = tags.split('|')\n tag_objs = []\n if (tags and len(tags) > 0):\n for name in tags:\n tag_obj = Tag.lookup_tag(name)\n if not tag_obj:\n tag_obj = Tag.create_tag(name)\n tag_objs.append(tag_obj)\n project.tags = tag_objs\n\n optional_date_keys = ['inquiry_deadline', 'work_start', 'work_end']\n optional_date_values = []\n for each in optional_date_keys:\n try:\n y, m, d = request.form.get(each).split('-')\n print(y,m,d,'out of if')\n if y and m and d:\n date = datetime(int(y), int(m), int(d))\n optional_date_values.append(date)\n print(date, 'in if', optional_date_values)\n except ValueError:\n pass\n optional_date_values.append(None)\n print('caught value error', optional_date_values)\n except AttributeError:\n optional_date_values.append(None)\n print('caught value error', optional_date_values)\n project.inquiry_deadline, project.work_start, project.work_end = optional_date_values \n db.session.commit()\n\n flash(\"Project edited successfully.\", \"success\")\n return redirect(f\"/project/{project_id}\")\n\n return render_template(\"project-edit.html\", form=form, project=project, tags_list_str=tags_list_str, tags_full_list=tags_full_list)",
"def project_detail(request, pk):\n project = Project.objects.get(pk=pk)\n context = {\n 'project': project\n }\n return render(request, 'projects/project_detail.html', context)",
"def portfolio_detail():\n return render_template('portfolio/portfolio.html')",
"def projects():\n return render_template('index.html', id='portfolio')",
"def project_admin(request, project_id):\n project = Project.objects.get(id=project_id)\n\n # Update details displayed on the page\n if request.POST:\n pname = request.POST.get(\"project_name\")\n est = request.POST.get(\"original_estimate\")\n est_strip = est.replace('£', '').replace(',', '').strip()\n Project.objects.filter(pk=project_id).update(\n project_name=pname, original_estimate=est_strip)\n\n return redirect(reverse(project_admin, args=[project_id]))\n\n # Displays Project Details and Lists of Users with access to the project\n else:\n projectForm = ProjectForm()\n userForm = ProjectUserForm()\n users = ProjectUser.objects.filter(project=project)\n stripeUser = get_object_or_404(ProjectStripeDetails, project=project)\n\n template = 'project/admin.html'\n context = {\n 'project': project,\n 'projectForm': projectForm,\n 'userForm': userForm,\n 'users': users,\n 'stripeUser': stripeUser.customer_id,\n }\n\n return render(request, template, context)",
"def portfolio(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/portfolio.html',\n {\n 'title':'Портфолио',\n 'message':'Your application description page.',\n 'year':datetime.now().year,\n }\n )",
"def view_project(project_id):\n return render_template(\n \"project/view_project.html\",\n project=Project.query.filter(Project.id == project_id).first_or_404()\n )",
"def browseProject(request):\n\n return render(request, 'profile/browse_project.html')",
"def edit():\n config.edit_project()\n # projects = config.read_config_file()\n # return projects",
"def project(request, project_id):\n project = Project.objects.get(id=project_id)\n # Make sure the project belongs to the current user.\n if project.owner != request.user:\n raise Http404\n \n task_section = project.tasksection_set.order_by('-date_added')\n context = {'project':project, 'task_section': task_section}\n return render(request, 'tasks/project.html', context )",
"def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A view to delete a project from the portfolio | def delete_project(request, project_id):
if not request.user.is_superuser:
messages.error(request, 'Sorry, only store owners can do that.')
return redirect(reverse('home'))
project = get_object_or_404(Project, pk=project_id)
project.delete()
messages.success(request, 'Project deleted!')
return redirect(reverse('portfolio')) | [
"def delete_project(request, project):\n\n project = get_object_or_404(Project, id=project, user=request.user)\n if request.method == \"POST\":\n project.delete()\n return redirect(\"/projects/\")\n return render(request, \"delete-project.html\", {\"project\": project})",
"def delete_project_view(request, id):\n\n # retrieve the project to be deleted through his id. Raise an error if the project does not exist\n project = get_object_or_404(Projet, id=id)\n\n # Check if the logged in user is allowed to delete this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n # Eventually delete the project\n project.delete()\n\n return redirect(\"projects\")",
"def delete_project(request, project_id):\n\n profile = get_object_or_404(Profile, user=request.user)\n project = get_object_or_404(GameProject, pk=project_id)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n if project.owner != profile:\n messages.error(request, 'Sorry, only the project owner can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(GameProject, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('all_projects'))",
"def delete_project(request, pk):\n p = Project.objects.get(pk=pk)\n aps = p.apostila_set.all()\n for a in aps:\n # desociates project and Apostila\n a.project.remove(p)\n p.delete()\n return redirect('url_projects')",
"def delete_project(self):\n selected_project = pm.PyNode(self.list_projects.currentItem().text())\n ldtmaya.delete_surfacing_project(selected_project)\n self.update_ui_projects()",
"def delete_project(projectname):\n response = jsonify(admin.delete_project(current_app.scoped_session(), projectname))\n return response",
"def delete(self):\n args = {\"id\": self.id}\n _perform_command(self.owner, \"project_delete\", args)\n del self.owner.projects[self.id]",
"def deleteproject():\n cust_id = request.values.get(\"cust_id\")\n removeCustomer(cust_id)\n\n return created_request(\"Good\")",
"def deleteProjectButton():\r\n \r\n chosenProject = existingProjectsTree.focus()\r\n chosenProject = existingProjectsTree.item(chosenProject)\r\n try:\r\n chosenProject = chosenProject['values'].pop(0)\r\n except:\r\n messagebox.showinfo(\"Hint\", \"Please highlight the project you wish to delete\")\r\n return\r\n Project.delete(chosenProject)\r\n updateProjectList()\r\n closeTabs()\r\n selectedProject.set(\"Create a new project or choose from existing\")\r\n window.title(\"Landscape Budget\")\r\n setMaterialChoices()\r\n updateAll()",
"def delete(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}\"\n\n self.connector.http_call(\"delete\", _url)\n\n self.project_id = None\n self.name = None",
"def remove_project(name):\n name = urllib.parse.unquote(name)\n projects = utils.load_project_overview_config()\n\n del projects[name]\n\n utils.write_project_overview_config(projects)\n\n return redirect(url_for('projects_overview'))",
"def deleteProject(self,id):\n \n #self.projects.delete(id)\n #return newProject",
"def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])",
"def test_delete_project(self):\n pass",
"def test_projects_delete(self):\n pass",
"def test_deleting_adviser_project(self):\n url = reverse('project', args=[1])\n response = self.client.delete(url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(AdviserProject.objects.count(), 1)",
"def delete_project(arn=None):\n pass",
"def test_iam_project_delete(self):\n pass",
"def on_removeProject(self):\n self.log.detail(\">>> Launch 'remove Project' ...\")\n selItems = self.tw_myProjects.selectedItems() or []\n if selItems:\n #--- Check Project ---#\n if selItems[0].project not in self.pinedProjects:\n pQt.errorDialog(\"!!! Project %r not found, Skipp !!!\" % selItems[0].project, self)\n else:\n #--- Remove Poject ---#\n self._users._user.delPinedProject(selItems[0].project)\n self._users._user.writeFile()\n #--- Refresh ---#\n self.buildTree('myProjects')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find a single database | def find_database(self, name_or_id, instance, ignore_missing=True):
instance = self._get_resource(_instance.Instance, instance)
return self._find(
_database.Database,
name_or_id,
instance_id=instance.id,
ignore_missing=ignore_missing,
) | [
"def get_database(self, name):\n try:\n return [db for db in self.list_databases()\n if db.name == name][0]\n except IndexError:\n raise exc.NoSuchDatabase(\"No database by the name '%s' exists.\" %\n name)",
"def database():\n return _databases[_active_db]",
"def select_db(cli, dbname):\n db = cli[dbname]\n return db",
"def searchDatabase(self, name: str) -> Database:\n for db in self._typeCheckerList:\n if db.name.lower() == name.lower():\n return db\n return None",
"def get_database(self, database, instance=None):\n return self._get(_database.Database, database)",
"def find_server(message, db):\n db_list = sql.database_list()\n if db in db_list:\n server = db_list[db]\n message.reply(Strings['DATABASE_SERVER'].format(db, server))\n else:\n message.reply(Strings['DATABASE_UNKNOWN'].format(db))",
"def get_database(self, model):\n for router in self.routers:\n r = router.get_database(model)\n if r is not None:\n return r\n return self.get('default')",
"def lookup(cls, connection, name):\n logger = logging.getLogger(\"marklogic\")\n\n uri = connection.uri(\"databases\", name)\n\n logger.debug(\"Reading database configuration: {0}\".format(name))\n response = connection.get(uri)\n\n result = None\n if response.status_code == 200:\n result = Database.unmarshal(json.loads(response.text))\n if 'etag' in response.headers:\n result.etag = response.headers['etag']\n\n return result",
"def get_db(self, dbname, **params):\n return Database(self._db_uri(dbname), server=self, **params)",
"def get_db():\n db = load()\n return db",
"def _find_database_key(results):\n\n for sources in results.iterkeys():\n if 'database' in sources:\n return sources",
"def database_info(self, database):\n return self.get(endpoint(self.url, '/'+database)).json()",
"def get_database_from_organisation(organisation_id):\n\n connection_string = \"mysql://{}:{}@{}/{}\".format(RDS_USERNAME,RDS_PASSWORD,RDS_HOST,RDS_DB_NAME)\n engine = create_engine(connection_string,pool_recycle=3600,convert_unicode=True,encoding='utf-8')\n connection = engine.connect()\n meta = MetaData()\n\n worksheet = Table(\"organizations\",meta ,autoload=True, autoload_with=connection)\n statement = select([worksheet.c['database_name']]).where(worksheet.c['organization_id']==organisation_id)\n\n result = connection.execute(statement)\n database_name = result.fetchone()\n connection.close()\n return database_name[0]",
"def find_dbserver(self):\n if self.dbserver_id is not None:\n return ItopapiPrototype.get_itop_class('DBServer').find(self.dbserver_id)\n return None",
"def get_db():\n client = pymongo.MongoClient('localhost', 27017)\n return client.haploqa",
"def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict",
"def checkExistingDataBase(self):\n databases = pymongo.MongoClient()\n for i in databases.list_databases():\n if i['name'] == self.db_name:\n return True",
"def get_db(self, typename):\n return self._dbs[typename]",
"def get_database_from_key(self, key):\n cluster = connections.get_cluster(self.model._shards.cluster)\n\n return cluster.db_for_key(key)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a single database | def get_database(self, database, instance=None):
return self._get(_database.Database, database) | [
"def database():\n return _databases[_active_db]",
"def get_db(self, dbname, **params):\n return Database(self._db_uri(dbname), server=self, **params)",
"def select_db(cli, dbname):\n db = cli[dbname]\n return db",
"def get_database(self, name):\n try:\n return [db for db in self.list_databases()\n if db.name == name][0]\n except IndexError:\n raise exc.NoSuchDatabase(\"No database by the name '%s' exists.\" %\n name)",
"def get_db():\n db = load()\n return db",
"def get_db():\n if 'DATABASE_URI' in os.environ:\n uri = os.environ['DATABASE_URI']\n return Database(uri=uri)\n raise EnvironmentError('DATABASE_URI environment variable is missing')",
"def get_database(database: Optional[str] = None,\n instance: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseResult:\n __args__ = dict()\n __args__['database'] = database\n __args__['instance'] = instance\n __args__['project'] = project\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:sqladmin/v1beta4:getDatabase', __args__, opts=opts, typ=GetDatabaseResult).value\n\n return AwaitableGetDatabaseResult(\n charset=pulumi.get(__ret__, 'charset'),\n collation=pulumi.get(__ret__, 'collation'),\n etag=pulumi.get(__ret__, 'etag'),\n instance=pulumi.get(__ret__, 'instance'),\n kind=pulumi.get(__ret__, 'kind'),\n name=pulumi.get(__ret__, 'name'),\n project=pulumi.get(__ret__, 'project'),\n self_link=pulumi.get(__ret__, 'self_link'),\n sqlserver_database_details=pulumi.get(__ret__, 'sqlserver_database_details'))",
"def get_db(db_name):\n db_conn_mgr = namedtuple('db_conn_mgr', ['conn', 'cursor'])\n conn = sqlite3.connect(db_name)\n cursor = conn.cursor()\n return db_conn_mgr(conn, cursor)",
"def db(self) -> DB:\n return DB.get_db()",
"def get_db():\n username, password = os.getenv('Mongo_User'), os.getenv('Mongo_Password')\n db_name = os.getenv('Mongo_DBName')\n client = MongoClient('mongodb+srv://' + username + ':' + password\n + os.getenv('Mongo_Postfix'))\n return client.get_database(db_name)",
"def get_database(self, model):\n for router in self.routers:\n r = router.get_database(model)\n if r is not None:\n return r\n return self.get('default')",
"def get_db():\n # this is a bit of a hack, since it assumes all the models talk to the same\n # db. that said a lot of our code relies on that assumption.\n # this import is here because of annoying dependencies\n return Database(settings.COUCH_DATABASE)",
"def get_db():\n client = pymongo.MongoClient('localhost', 27017)\n return client.haploqa",
"def get_db():\n if 'db' not in g:\n g.db = sqlite3.connect(\n current_app.config['DATABASE'],\n detect_types=sqlite3.PARSE_DECLTYPES\n )\n g.db.row_factory = sqlite3.Row\n\n return g.db",
"def mysql_database():\n return DATABASE",
"def get_db(self):\n return self._db",
"def get_db(request, name=None):\n\n dbname = name\n registry = request.registry\n\n if name is None:\n dbname = registry.settings.get(DBNAME)\n\n if dbname is None:\n raise ConfigurationError('There is no defined database name')\n\n mongodbs = getattr(request, '_mongo_dbs', dict())\n\n db = mongodbs.get(dbname)\n\n if db is None:\n conn = getattr(registry, '_mongo_conn', None)\n\n if conn is None:\n raise ConfigurationError(\n 'There is no database connection available')\n\n db = conn[dbname]\n\n mongodbs[dbname] = db\n request._mongo_dbs = mongodbs\n\n username = registry.settings.get(USERNAME + '.' + dbname)\n password = registry.settings.get(PASSWORD + '.' + dbname)\n\n if not username is None and not password is None:\n db.authenticate(username, password)\n\n def end_request(request):\n db.logout()\n db.connection.end_request() \n\n request.add_finished_callback(end_request)\n\n return db",
"def find_database(self, name_or_id, instance, ignore_missing=True):\n instance = self._get_resource(_instance.Instance, instance)\n return self._find(\n _database.Database,\n name_or_id,\n instance_id=instance.id,\n ignore_missing=ignore_missing,\n )",
"def get_db():\r\n\r\n if not hasattr(g, 'tedtalks_db'):\r\n g.tedtalks_db = TedTalkDatabase(app.config['DATABASE'])\r\n\r\n return g.tedtalks_db"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a single flavor | def get_flavor(self, flavor):
return self._get(_flavor.Flavor, flavor) | [
"def get(self, flavor):\n return self._get(\"/flavors/%s\" % base.getid(flavor), \"flavor\")",
"def get_flavor(name):\r\n return nova.flavors.find(name=name)",
"def flavor(self, name=None):\n return self.find(self.flavors(), name=name)",
"def get_flavor(self, flavor_id):\n return self._flavor_manager.get(flavor_id)",
"def get_flavor(self, flavor):\r\n if(self.flavors.has_key(flavor)):\r\n return self.flavors[flavor]\r\n return 0",
"def get_flavor(cls, cloud):\n return cls.get(\"flavor\", cloud)",
"def get_flavor(self, flavor_id):\n url = '%s/flavors/%s' % (self.catalog['compute'], flavor_id)\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['flavor']\n else:\n LOG.error('Get flavor failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def get_flavor(flavor_id):\n if flavor_id in get_presets['servers']['invalid_flavor_ref']:\n return not_found_response('flavors'), 404\n return ({'flavor': {'name': '512MB Standard Instance',\n 'id': flavor_id,\n 'name': 'mimic-test-flavor'}},\n 200)",
"def flavor(self):\n return self._flavor",
"def get_flavor_by_uuid(cls, flavor_uuid):\n return cls.dbdriver.get_flavor_by_uuid(flavor_uuid)",
"def _get_flavor(self, flavor_name):\n url = \"/flavors\"\n resp, body = self.nova.get(url)\n if resp.status_code not in [200, 203]:\n msg = \"Error {0} searching for flavor with name {1}\".format(\n resp.status_code, flavor_name\n )\n raise NotFound(msg)\n for flavor in body['flavors']:\n if flavor['name'] == flavor_name:\n return flavor['id']\n msg = \"Could not find flavor with name {0}\".format(flavor_name)\n raise NotFound(msg)",
"def get_flavor(self, request, tenant_id, flavor_id):\n response_data = get_flavor(flavor_id)\n request.setResponseCode(response_data[1])\n return json.dumps(response_data[0])",
"def test_get_flavor(self):\n response = self.flavors_client.get_flavor_details(self.flavor_ref)\n flavor = response.entity\n self.assertEqual(self.flavor_ref, flavor.id)",
"def test_aws_service_api_flavor_get(self):\n pass",
"def flavor_access_get(self, context, flavor_uuid):",
"def get_build_flavor():\n return adb.get_property('ro.build.flavor')",
"def get_vm_by_flavor(self, flavor):\n for vm_obj in list(self.vms.values()):\n if vm_obj.flavor == flavor:\n return vm_obj",
"def get_sec_flavor(self):\n\n rules = self._get_rules()\n return rules.child_get('sec-flavor').child_get(\n 'sec-flavor-info').child_get_string('flavor')",
"def GetFlavor(params):\n flavors = {\n 'win32': 'win',\n 'darwin': 'mac',\n 'sunos5': 'solaris',\n 'freebsd7': 'freebsd',\n 'freebsd8': 'freebsd',\n }\n flavor = flavors.get(sys.platform, 'linux')\n return params.get('flavor', flavor)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a generator of flavors | def flavors(self, **query):
return self._list(_flavor.Flavor, **query) | [
"def flavors(self, details=True):\n flv = _flavor.FlavorDetail if details else _flavor.Flavor\n return list(self._list(flv, paginated=True))",
"def show_flavors():\n return get_flavors()",
"def describe_flavors(self):\r\n print(\"\\nFlavors of ice cream: \")\r\n for flavor in self.flavors:\r\n print(\"- \" + flavor.title())",
"def display_flavors(self):\n print(\"\\n------Ice Cream Flavors------\")\n for flavor in self.flavors:\n print(flavor.title())",
"def get_all(self):\n\n flavors_controller = self.driver.manager.flavors_controller\n result = flavors_controller.list()\n\n flavor_list = [\n flavor_response.Model(item, self) for item in result]\n\n return {\n 'flavors': flavor_list\n }",
"def display_flavors(self):\r\n print(\"We have the following flavors\"\"\")\r\n for flavor in self.flavors:\r\n print(\" ...\" + str(flavor.title()))",
"def describe_flavors(self):\r\n print(\"The flavors at Sticky Sweet are \" + self.flavors_1 + \", \" + self.flavors_2 + \", and \" +\r\n self.flavors_3 + \".\")",
"def flavor(self, name=None):\n return self.find(self.flavors(), name=name)",
"def get_changed_flavors(changed_files, flavors):\n changed_flavors = []\n for f in changed_files:\n pattern = r\"^(mlflow|tests)/(.+?)(_autolog(ging)?)?(\\.py|/)\"\n # ~~~~~\n # # This group captures a flavor name\n match = re.search(pattern, f)\n\n if (match is not None) and (match.group(2) in flavors):\n changed_flavors.append(match.group(2))\n\n return changed_flavors",
"def flavors(request): # pylint: disable=unused-argument\n # We call our method\n response = BACKEND.flavors()\n return JsonResponse(response)",
"def display_flavors(self):\n for flavor in self.flavors:\n print(f\"- {flavor}\")",
"def FlavorHashes(versions, flavor):\n if isinstance(flavor, tuple):\n return [HashSelect(versions, i) for i in flavor[1:]]\n else:\n return [HashSelect(versions, flavor)]",
"def display_flavors(self):\n print('This ice cream stand serves the following flavors: ')\n for index, flavor in enumerate(self.flavors, start=1):\n print('\\t' + str(index) + ') ' + flavor.title() + '.')",
"def _generate_benchmark_variants(benchmark_spec):\n variants = []\n # Cold start.\n variants.append({\n 'name': benchmark_spec['name'] + ' (cold start)',\n 'app': benchmark_spec['app'],\n 'duration': benchmark_spec['duration'],\n 'measurements': benchmark_spec['measurements'],\n 'shell-args': benchmark_spec.get('shell-args',\n []) + _COLD_START_SHELL_ARGS})\n # Warm start.\n variants.append({\n 'name': benchmark_spec['name'] + ' (warm start)',\n 'app': benchmark_spec['app'],\n 'duration': benchmark_spec['duration'],\n 'measurements': benchmark_spec['measurements'],\n 'shell-args': benchmark_spec.get('shell-args', [])})\n return variants",
"def get(self, flavor):\n return self._get(\"/flavors/%s\" % base.getid(flavor), \"flavor\")",
"def FlavorName(flavor):\n if isinstance(flavor, tuple):\n return flavor[0]\n else:\n return flavor",
"def read_flavors(self): # Remember to undent after super!\n print(self.restaurant_name.title() + \" serves:\")\n for flavor in self.show_flavors:\n print(flavor.title())",
"def get_flavor(name):\r\n return nova.flavors.find(name=name)",
"def _create_flavors(self):\n SYSTEM_COL = self.driver.PODM.get_system_collection()\n for s in SYSTEM_COL.members_identities:\n sys = SYSTEM_COL.get_member(s)\n mem = self.conv_GiB_to_MiB(sys.memory_summary.size_gib) - 512\n proc = sys.processors.summary.count\n flav_id = str(mem) + 'MB-' + str(proc) + 'vcpus'\n res = fields.ResourceClass.normalize_name(flav_id)\n spec = 'resources:' + res\n values = {\n 'name': 'RSD-' + flav_id,\n 'flavorid': flav_id,\n 'memory_mb': mem,\n 'vcpus': proc,\n 'root_gb': 0,\n 'extra_specs': {\n spec: '1'}\n }\n if sys.identity not in self.rsd_flavors:\n try:\n LOG.debug(\"New flavor for system: %s\", sys.identity)\n rsd_flav = flavor._flavor_create(\n context.get_admin_context(), values)\n self.rsd_flavors[flav_id] = {\n 'id': rsd_flav['id'],\n 'rsd_systems': [sys.identity]\n }\n except Exception as ex:\n LOG.debug(\n \"A flavor already exists for this rsd system: %s\", ex)\n ex_flav = flavor.Flavor._flavor_get_by_flavor_id_from_db(\n context.get_admin_context(), flav_id)\n if flav_id not in self.rsd_flavors.keys():\n self.rsd_flavors[flav_id] = {\n 'id': ex_flav['id'],\n 'rsd_systems': [sys.identity]\n }\n else:\n sys_list = self.rsd_flavors[flav_id]['rsd_systems']\n sys_list.append(sys.identity)\n self.rsd_flavors[flav_id]['rsd_systems'] = sys_list"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find a single instance | def find_instance(self, name_or_id, ignore_missing=True):
return self._find(
_instance.Instance, name_or_id, ignore_missing=ignore_missing
) | [
"def find_instance(cls, identifier):\r\n for instance in cls.all:\r\n if instance.identifier == identifier:\r\n return instance\r\n return None",
"def get_instance_by_name(self, name):\n try:\n data = self.get_instances()\n if \"errors\" in data:\n return data\n\n for instance in data[\"instances\"]:\n if instance[\"name\"] == name:\n return instance\n\n return resource_not_found()\n\n except Exception as error:\n print(\"Error fetching instance with name {}. {}\".format(\n name, error))\n raise",
"def get_object(self):\n queryset = self.get_queryset() # 获取查询集\n for key in self.lookup_args:\n if self.kwargs.get(key):\n id = self.kwargs[key] # 获取查询参数值\n try:\n instance = queryset.get(id=id) # 获取当前实例\n return instance # 实例存在则返回实例\n except models.ObjectDoesNotExist: # 捕捉实例不存在异常\n raise Http404('No object found.') # 抛出404异常响应\n raise Http404('No object found.') # 若遍历所以参数都未捕捉到值,则抛出404异常响应",
"def find(self, **kwargs):\n rl = self.findall(**kwargs)\n num = len(rl)\n\n if num == 0:\n msg = \"No %s matching %s.\" % (self.resource_class.__name__, kwargs)\n raise exceptions.NotFound(msg)\n elif num > 1:\n raise exceptions.NoUniqueMatch\n else:\n return self.get(rl[0].id)",
"def find_one(cls, *args, **kwargs):\n doc = cls._get_collection().find_one(*args, **kwargs)\n if not doc:\n return doc\n return cls.from_dict(doc)",
"def get_instance(self, name_or_id):\n try:\n return self.get_instance_by_name(name_or_id)\n except KeyError:\n pass\n\n try:\n return self.get_instance_by_dns(name_or_id)\n except KeyError:\n pass\n\n return self.get_instance_by_id(name_or_id)",
"def find_exact(self, **kwargs):\n results = list(self.find(**kwargs))\n if len(results) == 1:\n return results[0]\n return None",
"def get_instance_by_name(self, name):\n instances = self.list_instances()\n instance = [i for i in instances if i.name == name]\n if not instance:\n raise KeyError(\"No instances returned with name %s\" % name)\n return instance[0]",
"def find_one(cls, index=None, value=None):\n g = cls.find(index, value)\n one = next(g, None)\n if one is None:\n raise cls.DoesNotExist\n return one",
"def find(self, **filter_args):\n obj_list = self.findall(**filter_args)\n num_objs = len(obj_list)\n if num_objs == 0:\n raise NotFound(filter_args, self)\n if num_objs > 1:\n raise NoUniqueMatch(filter_args, self, obj_list)\n return obj_list[0]",
"def find(cls, uuid):\n entries = cls.objects.filter(uuid=uuid)\n if not entries:\n return None\n else:\n return entries.first()",
"def get_instance(tag):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n reservations = ec2.get_all_instances()\n for res in reservations:\n for inst in res.instances:\n if \"tag\" in inst.tags.keys():\n if inst.tags[\"tag\"] == tag and inst.state == \"running\":\n #print \"Found %s\"%tag\n return inst\n print \"Couldn't find instance\"\n return None",
"def find_first(cls, **kwargs):\n return cls.query.filter_by(**kwargs).first()",
"def find_one(self, *args, **kwargs):\n result = None\n for value in self.find(*args, **kwargs):\n result = value\n break\n return result",
"def find_instance_id(search_name):\n print(yellow(\"Searching for {}.\".format(search_name)))\n owner_id = client('sts').get_caller_identity().get('Account')\n filters = [{'Name': 'owner-id', 'Values': [owner_id]}]\n instances = resource('ec2').instances.filter(Filters=filters).all()\n candidates = []\n for instance in instances:\n if instance.tags:\n for tag in instance.tags:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if name == search_name:\n print(green(\"Found instance {} with id {}.\".format(\n search_name, instance.id)))\n candidates.append(instance)\n\n for instance in candidates:\n if instance.state['Name'] == 'running':\n print(green(\"{} is currently running, so selecting it.\".format(\n instance.id)))\n return instance.id\n else:\n print(yellow(\"{} is currently {}, so not selecting it.\".format(\n instance.id, instance.state['Name'])))\n\n abort(\"Could not find a running instance named {}\".format(search_name))",
"def get_instance(self, ix=None, name=None):\n assert ix is None or name is None\n if ix is None:\n instance = [ex for ex in self.instances if ex.name == name]\n assert len(instance) == 1\n return instance[0]\n else:\n return self.instances[ix]",
"def get_instance(self, instance):\n return self._get(_instance.Instance, instance)",
"def find_object(self, obj_type, obj_name):\n try:\n # Simply look it up by type and name.\n obj = self.model_map['object'][obj_type][obj_name][1]\n except KeyError:\n # No dice. This object doesn't exist in the model.\n obj = None\n\n return obj",
"def first_one(self, notfound=exc.NoResultFound):\n ret = self.first()\n if ret is None and notfound is not None:\n raise notfound\n return ret"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a single instance | def get_instance(self, instance):
return self._get(_instance.Instance, instance) | [
"def _get_instance(self, id):\n if id not in self._instances:\n self._instances[id] = self._load_constructor(id)\n\n return self._instances[id]",
"def instance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance",
"def get(cls):\n return cls.instance",
"def _get_instance(self):",
"def get_instance(self, name_or_id):\n try:\n return self.get_instance_by_name(name_or_id)\n except KeyError:\n pass\n\n try:\n return self.get_instance_by_dns(name_or_id)\n except KeyError:\n pass\n\n return self.get_instance_by_id(name_or_id)",
"def get_instance_by_id(self, id):\n try:\n path = (\"/v1/instances/{}?version={}&generation={}\".format(\n id, self.cfg[\"version\"], self.cfg[\"generation\"]))\n\n return qw(\"iaas\", \"GET\", path, headers())[\"data\"]\n\n except Exception as error:\n print(\"Error fetching instance with ID {}. {}\".format(id, error))\n raise",
"def get():\r\n if cls.__singleton__ is None:\r\n raise 'Error, no singleton instance to retrieve'\r\n else:\r\n return cls.__singleton__",
"def get_instance(cls):\n # Singleton class.\n try:\n return _host_instances[cls]\n except KeyError:\n _host_instances[cls] = cls()\n return _host_instances[cls]",
"def get_object(self):\n queryset = self.get_queryset() # 获取查询集\n for key in self.lookup_args:\n if self.kwargs.get(key):\n id = self.kwargs[key] # 获取查询参数值\n try:\n instance = queryset.get(id=id) # 获取当前实例\n return instance # 实例存在则返回实例\n except models.ObjectDoesNotExist: # 捕捉实例不存在异常\n raise Http404('No object found.') # 抛出404异常响应\n raise Http404('No object found.') # 若遍历所以参数都未捕捉到值,则抛出404异常响应",
"def get_instance(self, name):\n return self.store.instance.id",
"def instance(self):\n return self.__instance",
"def get_instance(self, name):\n return self.website.instance.id",
"def get_instance(self, ix=None, name=None):\n assert ix is None or name is None\n if ix is None:\n instance = [ex for ex in self.instances if ex.name == name]\n assert len(instance) == 1\n return instance[0]\n else:\n return self.instances[ix]",
"def getInstance():\n if Car.inst is None: Car.inst = Car()\n return Car.inst",
"def get_instance_by_name(self, name):\n try:\n data = self.get_instances()\n if \"errors\" in data:\n return data\n\n for instance in data[\"instances\"]:\n if instance[\"name\"] == name:\n return instance\n\n return resource_not_found()\n\n except Exception as error:\n print(\"Error fetching instance with name {}. {}\".format(\n name, error))\n raise",
"def get_instance_by_name(self, name):\n instances = self.list_instances()\n instance = [i for i in instances if i.name == name]\n if not instance:\n raise KeyError(\"No instances returned with name %s\" % name)\n return instance[0]",
"def get_by_name(instance_name):\n return Instance.get_by_name(instance_name)",
"def getRandomInstance(self):\n return self._getInstance(RandomInstanceCreator)",
"def test_get_instance(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a user to 'prospects' unless the user is the campaign owner or is already linked to 'workers', 'prospects', or 'blacklist'. Also decline to add prospects when the campaign is not active. user A TcsUser instance to link to 'prospects' | def addProspect(self, user):
if self.is_active and (user != self.owner) and not self.prospects.filter(pk=user.id).exists() \
and not self.workers.filter(pk=user.id) and not self.blacklist.filter(pk=user.id).exists():
self.prospects.add(user)
return self
return None | [
"def addWorker(self, user):\n if (user != self.owner) and not self.workers.filter(pk=user.id).exists():\n self.workers.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.blacklist.filter(pk=user.id).exists():\n self.blacklist.remove(user)\n return self\n return None",
"def addToBlacklist(self, user):\n if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists():\n self.blacklist.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.workers.filter(pk=user.id).exists():\n self.workers.remove(user)\n return self\n return None",
"def add_propect(self, prospect: ProspectSchema):\n self.__prospects.append(prospect)",
"def testSequenceProspectiveAdds(self):\r\n # Share to prospective user + non-prospective user.\r\n self._tester.AddFollowers(self._cookie, self._vp_id,\r\n ['Email:test@test.com', self._user3.user_id])\r\n self.assertEqual(len(TestEmailManager.Instance().emails['user3@emailscrubbed.com']), 1)\r\n\r\n # Share to prospective user again, but not to non-prospective user (no email should be sent 2nd time).\r\n self._tester.AddFollowers(self._cookie, self._vp_id,\r\n ['Email:test@test.com'])\r\n self.assertEqual(len(TestEmailManager.Instance().emails['user3@emailscrubbed.com']), 1)",
"def add_player(self, user):\n # Make sure the user can play\n user_profile = user.get_profile()\n if user_profile.credits < self.entrance_fee:\n raise ValueError(\"Not enough credits to pay entrance fee.\")\n if self.is_user_playing(user):\n raise ValueError(\"User already in tournament.\")\n \n # Handle the money transfer to join the tournament\n user_profile.credits = user_profile.credits - self.entrance_fee\n user_profile.save()\n self.prize_pool = self.prize_pool + self.entrance_fee\n self.save()\n \n # Join the tournament\n new_player = Player(user=user,\n tournament=self,\n credits=self.starting_credits)\n new_player.save()\n return True",
"def _CreateProspective(self):\r\n self._new_user, _ = yield User.CreateProspective(self._client,\r\n self._new_user_id,\r\n self._webapp_dev_id,\r\n self._identity_key,\r\n self._op.timestamp)\r\n\r\n # If system user is defined, then create the welcome conversation.\r\n # For now, add a check to ensure the welcome conversation is not created in production.\r\n if system_users.NARRATOR_USER is not None:\r\n # Checkpoint the allocated asset id range used to create the welcome conversation.\r\n if self._op.checkpoint is None:\r\n # NOTE: Asset ids are allocated from the new user's ids. This is different than the\r\n # usual practice of allocating from the sharer's ids. \r\n self._unique_id_start = yield gen.Task(User.AllocateAssetIds,\r\n self._client,\r\n self._new_user_id,\r\n CreateProspectiveOperation._ASSET_ID_COUNT)\r\n\r\n checkpoint = {'id': self._unique_id_start}\r\n yield self._op.SetCheckpoint(self._client, checkpoint)\r\n else:\r\n self._unique_id_start = self._op.checkpoint['id']\r\n\r\n yield self._CreateWelcomeConversation()\r\n\r\n # Add an analytics entry for this user.\r\n analytics = Analytics.Create(entity='us:%d' % self._new_user_id,\r\n type=Analytics.USER_CREATE_PROSPECTIVE,\r\n timestamp=self._op.timestamp,\r\n payload=self._reason)\r\n yield gen.Task(analytics.Update, self._client)\r\n\r\n yield Operation.TriggerFailpoint(self._client)",
"def check_professor(doc_user):\n info = doc_user[\"user_info\"]\n my_sharing_calendar = col_sharing.find_one({\"User\": doc_user[\"_id\"]})\n if info[\"professor\"]:\n logger.info('{}: sharing calendar start'.format(\n doc_user[\"user_id\"]))\n my_sharing_calendar = {\"User\": doc_user[\"_id\"],\n \"schedules\": []}\n col_sharing.insert_one(my_sharing_calendar)\n return True\n \n return False",
"def ValidateCreateProspectiveUsers(self, op_dict, contacts):\r\n users = []\r\n for contact_dict in contacts:\r\n # Look up user in the model.\r\n if 'user_id' in contact_dict:\r\n users.append(self.GetModelObject(User, contact_dict['user_id']))\r\n else:\r\n # Look up identity and user in db in order to get various server-generated ids.\r\n identity_key = contact_dict['identity']\r\n actual_ident = self._RunAsync(Identity.Query, self.client, identity_key, None)\r\n actual_user = self._RunAsync(User.Query, self.client, actual_ident.user_id, None)\r\n\r\n # Determine whether the op is causing a new prospective user to be created.\r\n expected_user = self.GetModelObject(User, actual_user.user_id, must_exist=False)\r\n\r\n if expected_user is None:\r\n user_dict = {'user_id': actual_user.user_id,\r\n 'webapp_dev_id': actual_user.webapp_dev_id}\r\n identity_type, value = identity_key.split(':', 1)\r\n if identity_type == 'Email':\r\n user_dict['email'] = value\r\n elif identity_type == 'Phone':\r\n user_dict['phone'] = value\r\n ident_dict = {'key': identity_key, 'authority': 'Viewfinder'}\r\n self.ValidateUpdateUser('create prospective user',\r\n op_dict,\r\n user_dict,\r\n ident_dict,\r\n device_dict=None,\r\n is_prospective=True)\r\n\r\n analytics = Analytics.Create(entity='us:%d' % actual_user.user_id,\r\n type=Analytics.USER_CREATE_PROSPECTIVE)\r\n self.ValidateCreateDBObject(Analytics, **analytics._asdict())\r\n\r\n users.append(actual_user)\r\n\r\n return users",
"def testProspectiveUser(self):\r\n self._CreateSimpleTestAssets()\r\n new_user, _, _ = self._CreateProspectiveUser()\r\n users = self._Resolve(['Email:prospective@emailscrubbed.com'])\r\n self.assertEqual(users, [{'identity': 'Email:prospective@emailscrubbed.com',\r\n 'user_id': 5,\r\n 'labels': []}])\r\n\r\n # Activate the user.\r\n self._UpdateOrAllocateDBObject(User,\r\n user_id=new_user.user_id,\r\n labels=[User.REGISTERED])\r\n users = self._Resolve(['Email:prospective@emailscrubbed.com'])\r\n self.assertEqual(users, [{'user_id': new_user.user_id,\r\n 'identity': 'Email:prospective@emailscrubbed.com',\r\n 'labels': [User.REGISTERED]}])",
"def add_talk(talk):\n # Check if this user is already registered\n exists = check_attendee_exists(talk.userId, talk.profile)\n if not exists[0]:\n return False\n\n talk.put()\n return True",
"def toggle_interested(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n workshop = self.context['workshop']\n\n if workshop in profile.interested_workshops.all():\n workshop.interested_users.remove(profile)\n else:\n workshop.interested_users.add(profile)",
"def _CreateProspectiveUser(self):\r\n assert getattr(self, '_episode_id'), 'call _CreateSimpleTestAssets first'\r\n vp_id, ep_ids = self._tester.ShareNew(self._cookie,\r\n [(self._episode_id, self._photo_ids)],\r\n ['Email:prospective@emailscrubbed.com'])\r\n\r\n identity = self._RunAsync(Identity.Query, self._client, 'Email:prospective@emailscrubbed.com', None)\r\n return self._RunAsync(User.Query, self._client, identity.user_id, None), vp_id, ep_ids[0]",
"def add_professor(current_user, course_id):\n # retrieve course and check if valid\n course = CourseModel.get_course_by_uuid(course_id)\n if not course:\n return custom_response({'error': 'course_id does not exist'}, 400)\n\n # check permissions\n if not current_user in course.professors:\n return custom_response({'error': 'permission denied'}, 400)\n\n req_data = request.get_json()\n new_professor_netId = req_data.get(\"netId\")\n\n # check if professor already part of course\n new_professor = ProfessorModel.get_professor_by_netId(new_professor_netId)\n if new_professor in course.professors:\n return custom_response({'error': 'already teaching this course'}, 400)\n\n # append new prof to the course's list of professors\n course.professors.append(new_professor)\n db.session.commit()\n\n return custom_response({'message': 'professor added to course', 'netId': new_professor_netId}, 201)",
"def create_users(cls):\n for p in Player.objects.exclude(race__can_play=False):\n p.get_extension(GrandChallengeUser)",
"def create_user_student_or_prof(sender, instance, created, **kwargs):\n if created:\n if not instance.is_staff:\n Student.objects.create(user=instance)\n elif not instance.is_superuser:\n Professor.objects.create(user=instance)",
"def test_add_professional_to_pool(self, instance_obj, session):\n professionals = models.Professional.query().all()\n assert len(professionals) == 3\n assert len(instance_obj.professionals) == 0\n\n for prof in professionals:\n instance_obj.professionals.append(prof)\n assert prof in instance_obj.professionals\n assert instance_obj in prof.pools\n\n assert len(instance_obj.professionals) == 3",
"def create_profile_of_user(sender, instance, created, **kwargs):\n if created:\n RevolvUserProfile.objects.get_or_create(user=instance)",
"def test_add_coach_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_ADD_COACH, self.classrooms[1]))",
"def addParticipant(self, participant):\n if len(self.participants) < self.maxParticipants:\n self.participants[participant.discordId] = participant\n else:\n raise ValueError('Max number of participants has been reached')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove the user from the lists of workers and prospects, if applicable, and add the user to the blacklist. Note that adding somebody as a worker removes the person from the blacklist. user A TcsUser instance to link to the blacklist | def addToBlacklist(self, user):
if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists():
self.blacklist.add(user)
if self.prospects.filter(pk=user.id).exists():
self.prospects.remove(user)
if self.workers.filter(pk=user.id).exists():
self.workers.remove(user)
return self
return None | [
"async def blacklist(\n self, ctx: commands.Context, user: Union[ConvertUserAPI, discord.Member] = None\n ):\n if user is None:\n return await ctx.send_help()\n\n guild = ctx.guild\n async with self.config.guild(guild).blacklist() as data:\n if user.id not in data:\n data.append(user.id)\n await ctx.maybe_send_embed(\n _(\"{user} ({user.id}) will always be kicked on rejoin\").format(user=user)\n )\n else:\n data.remove(user.id)\n await ctx.maybe_send_embed(\n _(\n \"{user} ({user.id}) will be checked on new joins by the automated system\"\n ).format(user=user)\n )",
"async def remove_blacklist(self, ctx, user: discord.Member):\r\n if user.id in self.bot.blacklist:\r\n self.bot.blacklist.remove(user.id)\r\n utils.save_json(self.bot.blacklist, self.bot.blacklist_file_path)\r\n await ctx.channel.send(\"Done.\")\r\n else:\r\n await ctx.channel.send(\"This user wasn't even blacklisted.\")",
"async def add_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n try:\r\n self.settings['blacklist'].append(user.id)\r\n await ctx.send(\"User blacklisted.\")\r\n except:\r\n await ctx.send(\"An error occured.\")\r\n else:\r\n await ctx.send(\"User already blacklisted.\")",
"async def add_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.bot.blacklist:\r\n self.bot.blacklist.append(user.id)\r\n utils.save_json(self.bot.blacklist, self.bot.blacklist_file_path)\r\n await ctx.channel.send(\"Done.\")\r\n else:\r\n await ctx.channel.send(user.name + \"#\" + user.discriminator + \" (\" +\r\n str(user.id) + \") is already blacklisted.\")",
"async def remove_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n await ctx.send(\"User is not blacklisted.\")\r\n else:\r\n self.settings['blacklist'].remove(user.id)\r\n await ctx.send(\"User removed from blacklist.\")",
"def addWorker(self, user):\n if (user != self.owner) and not self.workers.filter(pk=user.id).exists():\n self.workers.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.blacklist.filter(pk=user.id).exists():\n self.blacklist.remove(user)\n return self\n return None",
"async def _blacklist_remove(self, ctx, users: commands.Greedy[discord.Member]):\n query = 'UPDATE users SET blacklisted = false WHERE user_id = $1'\n\n for user in users:\n await self.bot.cursor.execute(query, user.id)\n\n await ctx.send(f'✅ Successfully removed **{\", \".join(str(x) for x in users)}** from blacklist.')\n await self.bot.user_cache.refresh()",
"async def remove_blacklist_id(self, ctx, user_id: int):\r\n if user_id in self.bot.blacklist:\r\n self.bot.blacklist.remove(user_id)\r\n utils.save_json(self.bot.blacklist, self.bot.blacklist_file_path)\r\n await ctx.channel.send(\"Done.\")\r\n else:\r\n await ctx.channel.send(\"This ID wasn't even in the blacklist.\")",
"async def blacklist(self, ctx, *, user_id: int):\n user = self.bot.get_user(user_id)\n if user is None:\n await self.bot.send(ctx, ':warning: I couldn\\'t find that user.')\n return\n\n if user_id in self.bot.blacklist:\n await self.bot.send(ctx, ':warning: That user has alreaady been blacklisted!')\n\n self.bot.blacklist.append(user_id)\n with open('data/gblacklist.txt', 'a') as blacklist_file:\n blacklist_file.write(str(user_id) + '\\n')\n await self.bot.send(ctx, f':white_check_mark: **{ctx.message.author.name}**' +\n f', I\\'ve blacklisted `{user}` from using Lilac commands!')\n\n await self.bot.send(user, 'You\\'ve been blacklisted from using Lilac commands globally.' +\n ' Contact one of the devs to appeal.')",
"async def blacklist_remove(self, ctx: commands.Context, target):\r\n table = \"user_blacklist\" if isinstance(target, discord.User) else \"guild_blacklist\"\r\n\r\n if isinstance(target, discord.User):\r\n check = await self.check_user(target.id, table)\r\n target = target.id\r\n else:\r\n check = await self.check_user(int(target), table)\r\n target = int(target)\r\n\r\n if check[0]:\r\n await self.remove_blacklist(target, table)\r\n await ctx.message.add_reaction(self.bot.custom_emojis.tick)\r\n else:\r\n await ctx.error(description=f\"{table.split('_')[0].title()} is not blacklisted.\")",
"def remove_users(self, userIds=[\"375805\", \"1971259\", \"1991105\", \"1961142\",\n \"2208193\", \"2154231\", \"2200202\", \"2197351\", \"375570\"]):\n submissions = [x for x in self.submissions if x['userId'] not in userIds]\n self.submissions = submissions",
"async def whitelist(\n self, ctx: commands.Context, user: Union[ConvertUserAPI, discord.Member] = None\n ):\n if user is None:\n return await ctx.send_help()\n\n guild = ctx.guild\n async with self.config.guild(guild).whitelist() as data:\n if user.id not in data:\n data.append(user.id)\n await ctx.maybe_send_embed(\n _(\"{user} ({user.id}) will bypass the automation check\").format(user=user)\n )\n else:\n data.remove(user.id)\n await ctx.maybe_send_embed(\n _(\n \"{user} ({user.id}) will be checked on new joins by the automated system\"\n ).format(user=user)\n )",
"async def add_blacklist_id(self, ctx, user_id: int):\r\n if user_id not in self.bot.blacklist:\r\n self.bot.blacklist.append(user_id)\r\n utils.save_json(self.bot.blacklist, self.bot.blacklist_file_path)\r\n await ctx.channel.send(\"Done.\")\r\n else:\r\n await ctx.channel.send(\"This ID is already in the blacklist.\")",
"def delete_from_blacklist(self, user_id, blacklist_user_id):\n try:\n self.table.delete().where(and_(\n self.table.c.user_id == user_id,\n self.table.c.blacklisted_id == blacklist_user_id )).execute() \n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible",
"def removeWorker(self, user):\n if user == self.owner:\n return None\n # Without these queries, there's no way to tell if anything actually gets removed.\n # Calling remove() on a user that is not in the set does not raise an error.\n if self.workers.filter(pk=user.id).exists():\n self.workers.remove(user)\n return self\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n return self\n return None",
"async def blacklist_global(self, ctx, user: discord.User, *, reason):\n await self.bot.db.execute(\n \"INSERT IGNORE blacklisted_user VALUES (%s, %s)\", user.id, reason\n )\n self.bot.cache.blacklist[\"global\"][\"user\"].add(user.id)\n await util.send_success(ctx, f\"**{user}** can no longer use Miso Bot!\")",
"async def whitelist(self, ctx, *, user_id: int):\n user = self.bot.get_user(user_id)\n if user is None:\n await self.bot.send(ctx, ':warning: I couldn\\'t find that user.')\n return\n\n if user_id not in self.bot.blacklist:\n await self.bot.send(ctx, ':warning: That user is not blacklisted!' +\n ' You cannot whitelist someone who has not been blacklisted.')\n return\n\n self.bot.blacklist.remove(user_id)\n with open('data/gblacklist.txt', 'w') as blacklist_file:\n blacklist_file.writelines([str(uid) for uid in self.bot.blacklist])\n\n await self.bot.send(ctx, f':white_check_mark: I\\'ve whitelisted `{user}`.' +\n ' They are now able to use Lilac commands.')\n\n await self.bot.send(user, 'You\\'ve been whitelisted to use Lilac commands, which means you are now ' +\n 'unblacklisted -- you can use Lilac commands.')",
"def add_to_blacklist(self, user_id, blacklist_user_id):\n try:\n self.table.insert().values( user_id=user_id,\n blacklisted_id=blacklist_user_id).execute()\n except sqlalchemy.exc.IntegrityError as e:\n if e.orig.args[0] == 1062 :\n # duplicate entry, don't care !\n pass\n elif e.orig.args[0] == 1452 :\n self.log(e, self.identifier)\n raise egg_errors.UnknownUserOrBadgeIDException\n else:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible\n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible",
"async def blacklist_add(self, ctx: commands.Context, target, *, reason: str = \"No reason given.\"):\r\n table = \"user_blacklist\" if isinstance(target, discord.User) else \"guild_blacklist\"\r\n guild = None\r\n\r\n try:\r\n check = await self.check_user(target.id, table)\r\n except Exception:\r\n guild = discord.utils.get(self.bot.guilds, id=int(target))\r\n if not guild:\r\n return\r\n\r\n check = await self.check_user(int(target), table)\r\n\r\n if not check[0]:\r\n if isinstance(target, discord.User):\r\n await self.add_blacklist(target.id, table, reason)\r\n else:\r\n await self.add_blacklist(int(target), table, reason)\r\n\r\n await ctx.message.add_reaction(self.bot.custom_emojis.tick)\r\n if not isinstance(target, discord.User):\r\n embed = discord.Embed(color=self.bot.colors.red,\r\n description=f\"Your guild / server has been blacklisted. \"\r\n f\"If you wish to know the reason, join the \"\r\n f\"[Support server]({self.bot.invite_url})\")\r\n await guild.owner.send(embed=embed)\r\n await guild.leave()\r\n self.bot.logger.info(f\"Added guild with ID {target} to blacklist.\")\r\n else:\r\n self.bot.logger.info(f\"Added user with ID {target.id} to blacklist\")\r\n else:\r\n await ctx.error(description=f\"{table.split('_')[0].title()} is already blacklisted.\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove the user from 'prospects' and 'blacklist', if applicable, and add the user to 'workers'. Note that adding somebody as a worker removes the person from the blacklist. user A TcsUser instance to link to workers | def addWorker(self, user):
if (user != self.owner) and not self.workers.filter(pk=user.id).exists():
self.workers.add(user)
if self.prospects.filter(pk=user.id).exists():
self.prospects.remove(user)
if self.blacklist.filter(pk=user.id).exists():
self.blacklist.remove(user)
return self
return None | [
"def addToBlacklist(self, user):\n if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists():\n self.blacklist.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.workers.filter(pk=user.id).exists():\n self.workers.remove(user)\n return self\n return None",
"def removeWorker(self, user):\n if user == self.owner:\n return None\n # Without these queries, there's no way to tell if anything actually gets removed.\n # Calling remove() on a user that is not in the set does not raise an error.\n if self.workers.filter(pk=user.id).exists():\n self.workers.remove(user)\n return self\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n return self\n return None",
"def addProspect(self, user):\n if self.is_active and (user != self.owner) and not self.prospects.filter(pk=user.id).exists() \\\n and not self.workers.filter(pk=user.id) and not self.blacklist.filter(pk=user.id).exists():\n self.prospects.add(user)\n return self\n return None",
"async def blacklist(\n self, ctx: commands.Context, user: Union[ConvertUserAPI, discord.Member] = None\n ):\n if user is None:\n return await ctx.send_help()\n\n guild = ctx.guild\n async with self.config.guild(guild).blacklist() as data:\n if user.id not in data:\n data.append(user.id)\n await ctx.maybe_send_embed(\n _(\"{user} ({user.id}) will always be kicked on rejoin\").format(user=user)\n )\n else:\n data.remove(user.id)\n await ctx.maybe_send_embed(\n _(\n \"{user} ({user.id}) will be checked on new joins by the automated system\"\n ).format(user=user)\n )",
"async def remove_blacklist(self, ctx, user: discord.Member):\r\n if user.id in self.bot.blacklist:\r\n self.bot.blacklist.remove(user.id)\r\n utils.save_json(self.bot.blacklist, self.bot.blacklist_file_path)\r\n await ctx.channel.send(\"Done.\")\r\n else:\r\n await ctx.channel.send(\"This user wasn't even blacklisted.\")",
"async def add_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n try:\r\n self.settings['blacklist'].append(user.id)\r\n await ctx.send(\"User blacklisted.\")\r\n except:\r\n await ctx.send(\"An error occured.\")\r\n else:\r\n await ctx.send(\"User already blacklisted.\")",
"async def add_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.bot.blacklist:\r\n self.bot.blacklist.append(user.id)\r\n utils.save_json(self.bot.blacklist, self.bot.blacklist_file_path)\r\n await ctx.channel.send(\"Done.\")\r\n else:\r\n await ctx.channel.send(user.name + \"#\" + user.discriminator + \" (\" +\r\n str(user.id) + \") is already blacklisted.\")",
"async def remove_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n await ctx.send(\"User is not blacklisted.\")\r\n else:\r\n self.settings['blacklist'].remove(user.id)\r\n await ctx.send(\"User removed from blacklist.\")",
"async def _blacklist_remove(self, ctx, users: commands.Greedy[discord.Member]):\n query = 'UPDATE users SET blacklisted = false WHERE user_id = $1'\n\n for user in users:\n await self.bot.cursor.execute(query, user.id)\n\n await ctx.send(f'✅ Successfully removed **{\", \".join(str(x) for x in users)}** from blacklist.')\n await self.bot.user_cache.refresh()",
"def remove_users(self, userIds=[\"375805\", \"1971259\", \"1991105\", \"1961142\",\n \"2208193\", \"2154231\", \"2200202\", \"2197351\", \"375570\"]):\n submissions = [x for x in self.submissions if x['userId'] not in userIds]\n self.submissions = submissions",
"async def whitelist(\n self, ctx: commands.Context, user: Union[ConvertUserAPI, discord.Member] = None\n ):\n if user is None:\n return await ctx.send_help()\n\n guild = ctx.guild\n async with self.config.guild(guild).whitelist() as data:\n if user.id not in data:\n data.append(user.id)\n await ctx.maybe_send_embed(\n _(\"{user} ({user.id}) will bypass the automation check\").format(user=user)\n )\n else:\n data.remove(user.id)\n await ctx.maybe_send_embed(\n _(\n \"{user} ({user.id}) will be checked on new joins by the automated system\"\n ).format(user=user)\n )",
"def join(self, user):\n self.players.add(user)\n if user.pk not in self.queue:\n self.queue.append(user.pk)\n self.save()",
"def claim_watches(user):\r\n Watch.objects.filter(email=user.email).update(email=None, user=user)",
"async def remove_blacklist_id(self, ctx, user_id: int):\r\n if user_id in self.bot.blacklist:\r\n self.bot.blacklist.remove(user_id)\r\n utils.save_json(self.bot.blacklist, self.bot.blacklist_file_path)\r\n await ctx.channel.send(\"Done.\")\r\n else:\r\n await ctx.channel.send(\"This ID wasn't even in the blacklist.\")",
"def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)",
"def claim_watches(user_id):\n user = get_user_model().objects.get(id=user_id)\n Watch.objects.filter(email=user.email).update(email=None, user=user)",
"def remove_user(self, user, money, betsize, beton):\r\n print(\"removing user \" + user + \" from a room\")\r\n self.self_lock.acquire()\r\n rm = self.rooms[self.users[user]]\r\n rm[0].removeUser(user)\r\n rm[1] -= 1\r\n\r\n self.users.pop(user)\r\n self.self_lock.release()",
"def enqueue_user(self, user_to_add: \"Session\"):\n if user_to_add in self.__users:\n raise DuplicateUser()\n self.__users.append(user_to_add)",
"async def blacklist_remove(self, ctx: commands.Context, target):\r\n table = \"user_blacklist\" if isinstance(target, discord.User) else \"guild_blacklist\"\r\n\r\n if isinstance(target, discord.User):\r\n check = await self.check_user(target.id, table)\r\n target = target.id\r\n else:\r\n check = await self.check_user(int(target), table)\r\n target = int(target)\r\n\r\n if check[0]:\r\n await self.remove_blacklist(target, table)\r\n await ctx.message.add_reaction(self.bot.custom_emojis.tick)\r\n else:\r\n await ctx.error(description=f\"{table.split('_')[0].title()} is not blacklisted.\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return active constituent voters who have not been contacted since the last election and have not been served to a supporter in the last two days. Don't limit the size of the result set here; let APIs do that. | def getVotersToContact(self):
two_days_ago = date.today() - timedelta(2)
year_ago = date.today() - timedelta(365)
return self.voters.filter(
Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago),
Q(campaignstovoters__last_contacted=None) | Q(campaignstovoters__last_contacted__lt=year_ago),
campaignstovoters__is_active=True,
is_active=True) | [
"def get_past_incidents(self):\n return Incident.objects.filter(services__in=self,\n end_date__lte=datetime.now())",
"def _inactiveplayers():\n\n rosters = _activerosters()\n dbrosters = _eidset() # players not in rosters scrape but in db.\n notactive = dbrosters.difference(rosters)\n return notactive",
"def current_unavailable_periods_for_reviewers(team):\n today = datetime.date.today()\n\n unavailable_period_qs = UnavailablePeriod.objects.filter(\n Q(end_date__gte=today) | Q(end_date=None),\n Q(start_date__lte=today) | Q(start_date=None),\n team=team,\n ).order_by(\"end_date\")\n\n res = defaultdict(list)\n for period in unavailable_period_qs:\n res[period.person_id].append(period)\n\n return res",
"def user_get_unused_votes_today(self):\n today = datetime.date.today()\n one_day_interval = (today, today + datetime.timedelta(1))\n\n used_votes = Vote.objects.filter(\n user = self,\n voted_at__range = one_day_interval\n ).count()\n\n available_votes = askbot_settings.MAX_VOTES_PER_USER_PER_DAY - used_votes\n return max(0, available_votes)",
"def get_most_active_voters(self):\n\n # determinate statuses of voters by activity in polls\n users_with_active_voters_status = self.model.polls.users_with_active_voters_status()\n\n # filter only active voters\n active_voters = users_with_active_voters_status.filter(is_active_voter=True)\n\n # return active voters in a descending order\n return active_voters.order_by('-count_votes')",
"def getVotersToDial(self):\n return self.getVotersToContact().exclude(\n (Q(phone_number1='') | Q(wrong_phone_number1__gt=1)),\n (Q(phone_number2='') | Q(wrong_phone_number2__gt=1)))",
"def get_old_entries(self):\n return self.filter(SteamPlayerCount.measured_at < Measurable.past()).all()",
"def get_current_incidents(self):\n return Incident.objects.filter(services=self,\n end_date__gt=datetime.now(),\n end_date=None)",
"def continuing_less_expenses(self):\n log_str = \"continuing_less_expenses() Method.\\n\"\n non_expenses = []\n for i in self.available_cand:\n if not i.return_expenses:\n non_expenses.append(i)\n log_str += \"{} is below expenses quota.\\n\".format(i.name)\n\n self.write_log(log_str)\n return non_expenses",
"def getUnconfirmedVolunteers(self, query):\n query = Volunteer.query(Volunteer.confirmed == False)\n return query",
"async def get_non_voters(self, guild: discord.Guild, uservotes: dict):\n\n player_role = guild.get_role(\n await self.config.guild(guild).player_id()\n )\n\n for member in guild.members:\n if player_role in member.roles:\n userkey = f\"{member.name}#{member.discriminator}\"\n if userkey not in uservotes:\n uservotes[userkey] = \"No vote\"\n\n return uservotes",
"def remove_expired(cls):\n max_trailers = 10\n current_trailers = cls.get_all(collection='approved_trailers')\n current_trailers.reverse()\n queued_trailers = cls.get_all(collection='queued_trailers')\n\n if len(current_trailers) >= max_trailers and len(queued_trailers) > 0:\n for trailer in current_trailers:\n time_active = trailer.date.timetuple().tm_yday - datetime.now().timetuple().tm_yday\n if time_active >= 14 and len(queued_trailers) > 0:\n cls.move(trailer, 'approved_trailers', 'archived_trailers')\n cls.move(queued_trailers[0], 'queued_trailers', 'approved_trailers')",
"def get_all_volunteers(self):\n volunteers = []\n for user in User.objects.all():\n if not OcAuth(user.id).is_admin():\n volunteers.append(user)\n return volunteers",
"def most_active_clients(self):\n\n client_dict = {}\n for client in self.client_repo.list:\n client_dict[client.id] = 0\n\n for rental in self.rental_repo.list:\n if rental.rented_date is not None:\n key = rental.client_id\n client_dict[key] += int((rental.returned_date - rental.rented_date).days)\n\n result = []\n for key in client_dict:\n result.append(MovieRentedDays(self.client_repo.find_client(key).name, client_dict[key]))\n result.sort(key=lambda x: x.rented_days, reverse=True)\n return result",
"def get_victors(self):\n if self.is_game_over():\n scores = [p.get_score() for p in self.state.get_players()]\n if len(scores) == 0:\n return []\n max_score = max(scores)\n victors = []\n for p in self.state.get_players():\n if p.get_color() not in self.violators and p.get_score() == max_score:\n victors.append(self.players[p.get_color()])\n return victors\n else:\n return None",
"def _findAllAvailabilities(self):\n # find availabilities for each user\n for x in self.users:\n if not x.findAvailability(self.start, self.stop):\n print \"Error occurred in finding availability.\"\n\n # add the availability for first user\n for i in self.users[0].availabilities['times']:\n self.availableTimes.append(i)\n\n # trim times that are not listed in each individual persons availability\n # list comprehension\n for i in range(1, len(self.users)):\n self.availableTimes = [elem for elem in self.availableTimes if elem in self.users[i].availabilities['times']]",
"def find_outdated_game_dates(self):\n state = 'preview.gameData.status.detailedState'\n old = self._db.Games.find({state : {'$nin' : ['Final']}})\n return set([x['date'] for x in old])",
"def dead_mates(player):\n return _get_available_mates(player, status=-1)",
"def get_players_not_in_tournament(self):\r\n\r\n players = self.base_ctrl.get_all_players()\r\n tournament_players = [player[0] for player in self.tournament_model.players]\r\n players_not_in = [player for player in players if player not in tournament_players]\r\n\r\n return players_not_in"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return active constituent voters with valid phone contact information who have not been contacted since the last election. Don't limit the size of the result set here; let APIs do that. | def getVotersToDial(self):
return self.getVotersToContact().exclude(
(Q(phone_number1='') | Q(wrong_phone_number1__gt=1)),
(Q(phone_number2='') | Q(wrong_phone_number2__gt=1))) | [
"def getVotersToContact(self):\n two_days_ago = date.today() - timedelta(2)\n year_ago = date.today() - timedelta(365)\n return self.voters.filter(\n Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago),\n Q(campaignstovoters__last_contacted=None) | Q(campaignstovoters__last_contacted__lt=year_ago),\n campaignstovoters__is_active=True,\n is_active=True)",
"def get_active_contact_no_subscriber(self):\n # The list of active contacts that doesn't\n # exist in SMSCampaignSubscriber\n\n #TODO : This might kill performance on huge phonebook...\n query = \\\n 'SELECT dc.id, dc.phonebook_id, dc.contact, dc.last_name, \\\n dc.first_name, dc.email, dc.city, dc.description, \\\n dc.status, dc.additional_vars, dc.created_date, dc.updated_date \\\n FROM dialer_contact as dc \\\n INNER JOIN dialer_phonebook ON \\\n (dc.phonebook_id = dialer_phonebook.id) \\\n INNER JOIN sms_campaign_phonebook ON \\\n (dialer_phonebook.id = sms_campaign_phonebook.phonebook_id) \\\n WHERE sms_campaign_phonebook.smscampaign_id = %s \\\n AND dc.status = 1 \\\n AND dc.id NOT IN \\\n (SELECT sms_campaign_subscriber.contact_id \\\n FROM sms_campaign_subscriber \\\n WHERE sms_campaign_subscriber.sms_campaign_id = %s)' % \\\n (str(self.id), str(self.id),)\n\n raw_contact_list = Contact.objects.raw(query)\n return raw_contact_list",
"def get_all_active_members(debug, contactsUrl):\n\n valid_date = str(datetime.date.today() - datetime.timedelta(days=7)) # 7 days ago in yyyy-mm-dd format\n\n #params = {'$filter': 'member eq true AND Status eq Active',\n # '$async': 'false'}\n params = {'$filter': \"member eq true AND ( Status eq Active OR ( Status eq PendingRenewal AND 'Renewal due' ge \" + valid_date + \"))\",\n '$async': 'false'}\n request_url = contactsUrl + '?' + urllib.parse.urlencode(params)\n if debug: print('Making api call to get contacts')\n return api.execute_request(request_url).Contacts",
"def get_all_contacts_since(cls, date, with_full_data=True):\n select = 1000\n skip = 0\n all_contacts = []\n\n contacts = cls.get_contacts_since(date, with_full_data, select, skip)\n num_of_entries = len(contacts)\n while num_of_entries > 0:\n all_contacts.extend(contacts)\n if num_of_entries < select:\n break\n\n skip += select\n contacts = cls.get_contacts_since(\n date, with_full_data, select, skip)\n num_of_entries = len(contacts)\n\n return all_contacts",
"def get_most_active_voters(self):\n\n # determinate statuses of voters by activity in polls\n users_with_active_voters_status = self.model.polls.users_with_active_voters_status()\n\n # filter only active voters\n active_voters = users_with_active_voters_status.filter(is_active_voter=True)\n\n # return active voters in a descending order\n return active_voters.order_by('-count_votes')",
"def sms_eligible(elders):\n return elders.filter(\n declined=False,\n user__is_active=True,\n phone__isnull=False,\n )",
"def to_exclude(self):\n midnight = now().replace(hour=0, minute=0, microsecond=0)\n return CenterOpen.objects.filter(\n creation_date__gte=midnight,\n ).values_list('phone_number', flat=True)",
"def scraped_candidates(self):\n from calaccess_processed.models import ScrapedCandidateProxy\n filer_ids = [i.identifier for i in self.identifiers.filter(scheme=\"calaccess_filer_id\")]\n return ScrapedCandidateProxy.objects.filter(scraped_id__in=filer_ids).order_by(\"-election\")",
"def filter_contacts_unprocessed(contacts):\n return [c for c in contacts if c[ISPROCESSED_FIELD_ID] != ISPROCESSED_FIELD_YES]",
"def dead_mates(player):\n return _get_available_mates(player, status=-1)",
"def get_recent_contacts(user, limit=5, timespan_days=14) -> typing.List[Contact]:\n timespan_recent = datetime.now().astimezone() - timedelta(days=timespan_days)\n contacts_recent = (\n Contact.objects.filter(interactions__was_at__gt=timespan_recent)\n .filter(user=user)\n .annotate(count=Count(\"interactions\"))\n .order_by(\"-count\")[:limit]\n )\n return list(contacts_recent)",
"def get_not_contacted(self, seller_id):\n return self.contactcampaignstatus_set.filter(seller_id=seller_id, status__in=[1, 3])",
"def get_last_conversations(self):\n email_token = auth.current_user()[0]\n user_data, last_messages = self.friend_database.get_conversations(email_token)\n last_messages = [{k:v for k,v in m._asdict().items() if k != \"hidden_to\"} for m in last_messages]\n for i in range(len(last_messages)):\n last_messages[i][\"timestamp\"] = last_messages[i][\"timestamp\"].isoformat()\n response = []\n for i in range(len(last_messages)):\n response.append({\"user\": user_data[i], \"last_message\": last_messages[i]})\n return json.dumps(response), 200",
"def get_past_incidents(self):\n return Incident.objects.filter(services__in=self,\n end_date__lte=datetime.now())",
"def recruitable_mates(player):\n return _get_available_mates(player, status=3)",
"def _GatherExistingContacts(self):\r\n self._all_contacts_dict, self._contacts_to_delete = \\\r\n yield FetchContactsOperation._GetAllContactsWithDedup(self._client, self._user_id)\r\n\r\n for contact in self._all_contacts_dict.itervalues():\r\n # Compute total count of present and removed contacts to be used for enforcing Contact.MAX_CONTACTS_LIMIT\r\n # and determining if a removed contacts reset is needed.\r\n if contact.IsRemoved():\r\n self._all_removed_contacts_count += 1\r\n else:\r\n self._all_present_contacts_count += 1\r\n\r\n # Create contact dict from contact list filtered for the appropriate contact source.\r\n if ((self._identity.authority == 'Facebook' and contact.contact_source == Contact.FACEBOOK) or\r\n (self._identity.authority == 'Google' and contact.contact_source == Contact.GMAIL)):\r\n self._existing_contacts_dict[contact.contact_id] = contact",
"def ongoing_competitions():\n now = timezone.now()\n return Competition.objects.filter(start__lte=now, end__gte=now)",
"def get_past_meeting_participants(self, meeting_uuid: str) -> list:\n\n logging.info(\"Gathering Zoom meeting participant data...\")\n # Note: artificial rate limit\n # more detail can be found here:\n # https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits\n @sleep_and_retry\n @limits(calls=39, period=5)\n def make_requests(\n page_number: int = 1, next_page_token: str = None, result_list: list = None\n ) -> list:\n logging.info(\"Making meeting partcipants request %s\", page_number)\n\n result = self.zoom.do_request(\n \"get\",\n \"metrics/meetings/\" + meeting_uuid + \"/participants\",\n {\"type\": \"past\", \"page_size\": 300, \"next_page_token\": next_page_token},\n )\n\n page_number += 1\n\n if \"participants\" in result.keys():\n result_list += result[\"participants\"]\n else:\n result_list += [\n {\"error_code\": result[\"code\"], \"error\": result[\"message\"]}\n ]\n\n if \"code\" in result.keys():\n logging.error(\"Error: %s %s\", result[\"code\"], result[\"message\"])\n\n if \"next_page_token\" in result.keys() and result[\"next_page_token\"] != \"\":\n make_requests(\n page_number=page_number,\n next_page_token=result[\"next_page_token\"],\n result_list=result_list,\n )\n\n return result_list\n\n result_list = make_requests()\n\n return result_list",
"def get_allowed_vos():\n return get_vos(filter_by_existing_users(filter_out_bans(read_mapfiles(), read_banfile())))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove the user from 'workers' or 'prospects', if applicable. user A TcsUser instance to remove from workers | def removeWorker(self, user):
if user == self.owner:
return None
# Without these queries, there's no way to tell if anything actually gets removed.
# Calling remove() on a user that is not in the set does not raise an error.
if self.workers.filter(pk=user.id).exists():
self.workers.remove(user)
return self
if self.prospects.filter(pk=user.id).exists():
self.prospects.remove(user)
return self
return None | [
"def addWorker(self, user):\n if (user != self.owner) and not self.workers.filter(pk=user.id).exists():\n self.workers.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.blacklist.filter(pk=user.id).exists():\n self.blacklist.remove(user)\n return self\n return None",
"def removeUser(self, user):\n if self.inRoster(user):\n del self.roster[user.nick.lower()]",
"def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)",
"def remove_user(self, user):\n if not isinstance(user, User):\n raise UserError\n\n if self.users.get(user.name):\n del self.users[user.name]",
"def deep_user_remove(self, username): \n self.remove_user_from_escalations(username)\n self.remove_user_from_schedules(username)\n self.remove_user_from_teams(username)\n self.delete_user(username)",
"def remove_user(self, user, money, betsize, beton):\r\n print(\"removing user \" + user + \" from a room\")\r\n self.self_lock.acquire()\r\n rm = self.rooms[self.users[user]]\r\n rm[0].removeUser(user)\r\n rm[1] -= 1\r\n\r\n self.users.pop(user)\r\n self.self_lock.release()",
"def remove_users(self, userIds=[\"375805\", \"1971259\", \"1991105\", \"1961142\",\n \"2208193\", \"2154231\", \"2200202\", \"2197351\", \"375570\"]):\n submissions = [x for x in self.submissions if x['userId'] not in userIds]\n self.submissions = submissions",
"def user_delink(self, user):\n index = self.context.trainers.index(self.request.user)\n self.context.trainers.pop(index)\n self.context.save()\n return \"OK\"",
"def remove_user(user):\n # user.confirmed = False\n # user = get_user_by_phone(phone_num)\n db.session.delete(user)\n db.session.commit()\n\n return user\n # DELETE FROM users WHERE user.phone_num == phone)",
"def remove_cc(self, user):\n if not self.ticket_id:\n logging.error(\"No ticket ID associated with ticket object. Set ticket ID with set_ticket_id(ticket_id)\")\n return\n\n if isinstance(user, list):\n params = {'cc': {'remove': user}}\n else:\n params = {'cc': {'remove': [user]}}\n\n if self.token:\n params['token'] = self.token\n\n # Attempt to edit ticket.\n try:\n r = self.s.put(\"{0}/{1}\".format(self.rest_url, self.ticket_id), json=params)\n r.raise_for_status()\n if 'bugs' in r.json():\n if r.json()['bugs'][0]['changes'] == {}:\n logging.error(\"No changes made to ticket. Possible invalid field or lack of change in field.\")\n return\n if 'message' in r.json():\n logging.error(r.json()['message'])\n return\n logging.debug(\"Removing user(s) from cc list: Status Code: {0}\".format(r.status_code))\n logging.info(\"Removing user(s) from cc list {0} - {1}\".format(self.ticket_id, self.ticket_url))\n except requests.RequestException as e:\n logging.error(\"Error removing user(s) from cc list\")\n logging.error(e.args[0])",
"def deluser(self, user: 'User'):\n try:\n del self.memberships[user.nick]\n except KeyError:\n self.connection.log(\"Attempted to remove a non-existent user from a channel\")",
"def unassign(self, user):\n db.run_in_transaction(self._unassignTx, user)",
"def _purge_user(self, user):\n self.user_order.remove(user)\n del self.user_queue[user]\n del self.user_skip[user]",
"def test_teams_remove_user_from_team_v1(self):\n pass",
"def delete_user(network, user):\n if user in network:\n del network[user]\n for u in network:\n connections = get_connections(network, u)\n if user in connections:\n i = connections.index(user)\n del connections[i]\n return network",
"def test_teams_remove_user_from_team_v2(self):\n pass",
"def remove_user(self, user_name):\n return self.users.pop(user_name.lower(), None)",
"def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)",
"def remove_user(self, user):\n\n data = user.to_json()\n key = \"%s:%s\" % (self.channel_id, user.username)\n\n logging.info(data)\n # remove our users timestamp\n affected = self.redis_server.zrem(ENVIRONMENT['REDIS_PREFIX'] + 'users_timestamp',key)\n logging.info(\"removed user timestamp(%d): %s\" % (affected, key))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the number of voters a user has contacted for the campaign. | def voterContactCount(self, user):
return self.votercontact_set.filter(user=user).count() | [
"def number_of_volunteers(self):\n return self._number_of_volunteers",
"def get_number_of_ver_sponsors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'VerificationSponsor'])\n return n_agents",
"def nay_voter_cnt(self):\n\n return len(self._nay_voters())",
"def get_votecount(self):\r\n countlist = SongVote.objects.filter(user=self.user)\r\n return len(countlist);",
"def guestCount(self):\n return len( self.guests )",
"def get_agent_number_of_players(players):\n return sum([count_players(player) for player in players\n if player.startswith('agent')])",
"def abstain_voter_cnt(self):\n\n return len(self._abstain_voters())",
"def prepare_viewers_count(self, obj):\n if hasattr(obj, 'raccess'):\n return obj.raccess.view_users.all().count()\n else:\n return 0",
"def getVotersToContact(self):\n two_days_ago = date.today() - timedelta(2)\n year_ago = date.today() - timedelta(365)\n return self.voters.filter(\n Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago),\n Q(campaignstovoters__last_contacted=None) | Q(campaignstovoters__last_contacted__lt=year_ago),\n campaignstovoters__is_active=True,\n is_active=True)",
"def number_of_users(self) -> NumberOfUsers:\n return self._number_of_users",
"async def _vote_count(\n self, ctx: Context, *, channel: discord.TextChannel = None\n ):\n\n guild: discord.Guild = ctx.guild\n\n if not channel:\n channel = await self.get_vote_channel(guild)\n if isinstance(channel, str):\n return await ctx.send(channel)\n\n history = await channel.history(oldest_first=True).flatten()\n if len(history) > 100:\n return await ctx.send(_(\n \"I couldn't identify a voting channel. Please specify one explicitly.\"\n ))\n else:\n history = await channel.history(oldest_first=True).flatten()\n if len(history) > 100:\n return await ctx.send(_(\n \"That channel has too many messages!\"\n \" Please ask a host for manual vote count.\"\n ))\n\n if len(history) < 1:\n return await ctx.send(_(\"{} is empty.\").format(channel.mention))\n\n user_votes = {}\n player_role = guild.get_role(\n await self.config.guild(guild).player_id()\n )\n\n for message in history:\n author = message.author\n if player_role not in author.roles:\n continue\n vote = self.get_vote_from_message(message)\n if not vote:\n continue\n user_votes[f\"{author.name}#{author.discriminator}\"] = vote\n\n user_votes = await self.get_non_voters(guild, user_votes)\n\n votes = {}\n for user in user_votes:\n val = user_votes[user].capitalize()\n try:\n votes[val].append(user)\n except KeyError:\n votes[val] = [user]\n\n # max votes first\n votes = dict(sorted(\n votes.items(), key=lambda item: len(item[1]), reverse=True\n ))\n\n # Pop and add stuff back to dict for ordering purpose.\n try:\n votes[\"VTNL\"] = votes.pop(\"Vtnl\")\n except KeyError:\n pass\n try:\n votes[\"No vote\"] = votes.pop(\"No vote\")\n except KeyError:\n pass\n\n txt = \"\"\n\n for i, vote in enumerate(votes, start=1):\n voters = votes[vote]\n\n if vote == \"VTNL\":\n txt += _(\"\\n\\n**{}** - {} ({})\").format(vote, len(voters), \", \".join(voters))\n elif vote == \"No vote\":\n txt += _(\"\\n\\n**Not voting** - {} ({})\").format(len(voters), \", \".join(voters))\n else:\n txt += _(\"\\n{}. **{}** - {} ({})\").format(i, vote, len(voters), \", \".join(voters))\n\n title = _(\"Vote Count\")\n\n embed = discord.Embed(\n color=0x00CDFF, title=title,\n description=_(\"__Counting from {} channel.__\\n\\n{}\").format(\n channel.mention, txt.strip()\n )\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\n f\"**{title}**\\n\\n__Counting from {channel.mention}\"\n f\" channel.__\\n\\n{txt.strip()}\"\n )",
"def count_subscribers(self):\n return self.request(\"count:Contact\", [ None ])",
"def current_number_of_invaders(self):\n return len(self.invaders)",
"def get_number_of_invitees():\n\n response = send_category_request(\n ACTION_API_URL,\n \"Kategori:Wikipedianer som har fått en inbjudan till fikarummet\"\n )\n pages = response[\"query\"][\"pages\"]\n category_id = list(pages.keys())[0]\n number_of_invitees = pages[category_id][\"categoryinfo\"][\"pages\"]\n return number_of_invitees",
"async def membercount(self):\n await self.bot.say(\"{} has {:,} members!\".format(self.bot.server.name, self.bot.server.member_count))",
"async def command_user_count(client, message):\n server = next(iter(client.servers))\n response = \"**This server has {0} members.**\".format(server.member_count)\n await client.send_message(message.channel, response)",
"async def users(ctx):\n\n if ctx.channel.name.lower() in channels:\n await ctx.send(f\"\"\"# of members: {ctx.guild.member_count}\"\"\")",
"def number_users(self):\n return len(self.users)",
"def get_videos_count(self, user_settings=None, user_id=None, login=None, email=None):\n return objects_module.users.get_videos_count(self.khoros_object, user_settings, user_id, login, email)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an indented representation of the nested dictionary. | def pretty_repr(self, num_spaces=4):
def pretty_dict(x):
if not isinstance(x, dict):
return repr(x)
rep = ''
for key, val in x.items():
rep += f'{key}: {pretty_dict(val)},\n'
if rep:
return '{\n' + _indent(rep, num_spaces) + '}'
else:
return '{}'
return f'FrozenDict({pretty_dict(self._dict)})' | [
"def _format_dict(self, dict_, indent=0):\n prefix = indent*\" \"*4\n output = \"{\\n\"\n for key, val in sorted(dict_.items()):\n if isinstance(val, dict):\n rval = self._format_dict(val, indent+1)\n else:\n rval = repr(val)\n output += prefix + \" \"*4 + repr(key) + \" : \" + rval + \",\\n\"\n output += prefix + \"}\"\n return output",
"def pretty(d, indent=0):\r\n for key, value in d.items():\r\n depth = 0\r\n print('\\t' * indent + str(key)+':'),\r\n if isinstance(value, dict):\r\n if depth == 0:\r\n print(\" \")\r\n depth+=1\r\n pretty(value, indent+1)\r\n else:\r\n print(' ' + str(value))",
"def show_nest(p:PropertyDict, nest_level=0, indent_len:Optional[int]=None, pd_header=\"<PropertyDict>\"):\n _max_len = max(map(len, p.keys()))\n max_len = _max_len + nest_level * (_max_len if indent_len is None else indent_len)\n delimiter = \"\\n \"\n def showitem(v):\n if isinstance(v, Tensor):\n return tensor_utils.show(v)\n elif isinstance(v, dict):\n return \"dict({})\".format(\", \".join([\"{}={}\".format(k, showitem(v)) for k, v in v.items()]))\n else:\n return repr(v)\n\n unnested = dict(filter(lambda kv: not isinstance(kv[1], PropertyDict), p.items()))\n unnested_str = delimiter.join([\n *[(\"{:>\"+ str(max_len)+ \"}: {}\").format(k, showitem(v)) for k, v in unnested.items()\n ]\n ])\n\n nested = dict(filter(lambda kv: isinstance(kv[1], PropertyDict), p.items()))\n nested_str = delimiter.join([\n *[(\"{:>\"+ str(max_len)+ \"}: {}\").format(k + pd_header, \"\\n\"+show_nest(v, nest_level=nest_level+1)) for k, v in nested.items()\n ]\n ])\n\n return unnested_str + delimiter + nested_str",
"def print_dictionary(\n d, nested_level=0, output=sys.stdout, spacing=' ', separator=None,\n):\n if separator:\n print(separator, file=output)\n\n if type(d) == dict:\n print('%s{' % (nested_level * spacing), file=output)\n for k, v in list(d.items()):\n if hasattr(v, '__iter__'):\n print('%s%s:' % ((nested_level + 1) * spacing, k), file=output)\n print_dictionary(v, nested_level + 1, output)\n else:\n print(\n '%s%s: %s' % ((nested_level + 1) * spacing, k, v),\n file=output\n )\n print('%s}' % (nested_level * spacing), file=output)\n elif type(d) == list:\n print('%s[' % (nested_level * spacing), file=output)\n for v in d:\n if hasattr(v, '__iter__'):\n print_dictionary(v, nested_level + 1, output)\n else:\n print('%s%s' % ((nested_level + 1) * spacing, v), file=output)\n print('%s]' % (nested_level * spacing), file=output)\n else:\n print('%s%s' % (nested_level * spacing, d), file=output)",
"def nestedDictPrinter(dictName):\n for dictName, key in dictName.items():\n \"\"\"loops through the keys in the nested dictionary, dictName, and then\n prints the nested dictionary names along with its values.\n \"\"\"\n print(dictName.title() + \":\") # prints nested dictionary name\n for items in key:\n \"\"\"loops through nested dictionary key values and and prints the\n key along with the value with a tab before.\n \"\"\"\n print(f\"\\t{items.title()} - {key[items]}\")",
"def pretty_print(dictionary: dict):\n return json.dumps(dictionary, indent=4)",
"def repr_dict(dct, indent):\n # pprint represents OrderedDict objects using the tuple init syntax,\n # which is not very readable. Therefore, dictionaries are iterated over.\n if dct is None:\n return 'None'\n if not isinstance(dct, Mapping):\n raise TypeError(\"Object must be a mapping, but is a {}\"\n .format(type(dct)))\n if isinstance(dct, OrderedDict):\n kind = 'ordered'\n ret = '%s {\\n' % kind # non standard syntax for the kind indicator\n for key in six.iterkeys(dct):\n value = dct[key]\n ret += _indent('{!r}: {!r},\\n'.format(key, value), 2)\n else: # dict\n kind = 'sorted'\n ret = '%s {\\n' % kind # non standard syntax for the kind indicator\n for key in sorted(six.iterkeys(dct)):\n value = dct[key]\n ret += _indent('{!r}: {!r},\\n'.format(key, value), 2)\n ret += '}'\n ret = repr_text(ret, indent=indent)\n return ret.lstrip(' ')",
"def deep_hash_to_text(d,**kwargs):\n\n\tspacer = kwargs.get('spacer',' ')\n\tlines = kwargs.get('lines',[])\n\tindent = kwargs.get('indent',0)\n\tfor key,value in d.iteritems():\n\t\tlines.append(spacer*indent+str(key))\n\t\tif isinstance(value,dict): deep_hash_to_text(value,indent=indent+1,lines=lines,spacer=spacer)\n\t\telse: lines.append(spacer*(indent+1)+str(value))\n\treturn '\\n'.join(lines)",
"def nice_dict_format(d):\n return ''.join([key+\": \"+str(d[key])+\"\\n\" for key in list(d.keys())])",
"def print_json_tree(d, indent=0):\n for key, value in d.items():\n print(' ' * indent + str(key), end=' ')\n if isinstance(value, dict):\n print(); print_json_tree(value, indent+1)\n else:\n print(\":\", str(type(d[key])).split(\"'\")[1], \"-\", str(len(str(d[key]))))",
"def pretty_dict(d):\n return '{%s}' % ', '.join('%r: %r' % (k, v)\n for k, v in sorted(d.items(), key=repr))",
"def tree_view(dictionary, level=0, sep=\"| \"):\n return \"\".join([\"{0}{1}\\n{2}\".format(sep * level, k,\n tree_view(v, level + 1, sep=sep) if isinstance(v, dict)\n else \"\") for k, v in dictionary.items()])",
"def recursive_dict_key_print(dict_in, spacer=\"\"):\n if type(dict_in) is not dict:\n return\n next_spacer = spacer + \" \"\n for key, value in dict_in.items():\n try:\n print(spacer, f\"{key} : {value.shape}\")\n except(AttributeError):\n print(spacer, key)\n recursive_dict_key_print(value, next_spacer)",
"def json_pretty_print(dictionary):\n return json.dumps(dictionary, sort_keys=True,\n indent=2, separators=(',', ': '))",
"def _str_indented(self, depth: int = 0) -> str:\r\n if self.is_empty():\r\n return ''\r\n else:\r\n s = ' ' * depth + f'{self.value} ({self.weight})\\n'\r\n for subtree in self.subtrees:\r\n s += subtree._str_indented(depth + 1)\r\n return s",
"def print_dict(d, wrap=0):\n pt = PrettyTable(['Property', 'Value'],\n caching=False, print_empty=False)\n pt.aligns = ['l', 'l']\n for (prop, value) in six.iteritems(d):\n if value is None:\n value = ''\n value = _word_wrap(value, max_length=wrap)\n pt.add_row([prop, value])\n # encoded = encodeutils.safe_encode(pt.get_string(sortby='Property'))\n # if six.PY3:\n # encoded = encoded.decode()\n # print(encoded)\n # print(pt)\n return pt",
"def _str_indented(self, depth: int) -> str:\n if self.is_empty():\n return ''\n else:\n answer = depth * ' ' + str(self._root) + '\\n'\n answer += self._left._str_indented(depth + 1)\n answer += self._right._str_indented(depth + 1)\n return answer",
"def print_structure_dict(dict_in, prefix=''):\n for crrt_key in dict_in.keys():\n print(prefix + \"-- \" + crrt_key)\n\n if isinstance(dict_in[crrt_key], dict):\n print_structure_dict(dict_in[crrt_key], prefix + \" \")",
"def _walk(self, d, depth=0):\n\n output = ''\n indent = 3\n header_width = 35 - depth*indent\n\n for k, v in sorted(d.items(), key=lambda x: x[0]):\n if isinstance(v, dict):\n output += \"\".ljust(depth * indent)+k+'\\n'\n output += self._walk(v, depth + 1)\n else:\n if isinstance(v, np.ndarray):\n # np array or matrix\n shape = v.shape\n if len(shape) == 1:\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : \" + \"array (%d)\" % (v.shape[0]) + '\\n'\n\n elif len(shape) == 2:\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : \" + \"matrix (%d,%d)\" % (v.shape[0], v.shape[1]) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], str):\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n for item_id, item in enumerate(v):\n output += \"\".ljust((depth + 1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : \" + str(item) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], np.ndarray):\n # List of arrays\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n for item_id, item in enumerate(v):\n if len(item.shape) == 1:\n output += \"\".ljust((depth+1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : array (%d)\" % (item.shape[0]) + '\\n'\n\n elif len(item.shape) == 2:\n output += \"\".ljust((depth+1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : matrix (%d,%d)\" % (item.shape[0], item.shape[1]) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], dict):\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n\n for item_id, item in enumerate(v):\n output += \"\".ljust((depth + 1) * indent) + \"[\"+str(item_id)+\"]\" + '\\n'\n output += self._walk(item, depth + 2)\n\n else:\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : \" + str(v) + '\\n'\n\n return output"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new FrozenDict with additional or replaced entries. | def copy(
self, add_or_replace: Mapping[K, V] = MappingProxyType({})
) -> 'FrozenDict[K, V]':
return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type] | [
"def fromkeys(iterable, value=None):\n return FrozenDict(dict.fromkeys(iterable, value))",
"def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')",
"def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}",
"def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore\n elif isinstance(x, dict):\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n else:\n return x",
"def _freeze_dict_save(self, new_dict):\n\n # Save the dictionary to disk.\n state_file = os.path.join(self._statedir, \"frozen_dict\")\n tmp_file = os.path.join(self._statedir, \"frozen_dict.new\")\n\n try:\n with open(tmp_file, \"w\") as tf:\n json.dump(\n (self.__FROZEN_DICT_VERSION, new_dict), tf)\n portable.rename(tmp_file, state_file)\n except EnvironmentError as e:\n raise apx._convert_error(e)\n self.__rebuild_image_catalogs()",
"def mutable_variables(self) -> Union[VariableDict, Dict[str, Any]]:\n self._populate_collections()\n xs = {\n k: v for k, v in self._variables.items() if in_filter(self.mutable, k)\n }\n if config.flax_return_frozendict:\n return freeze(xs)\n return xs",
"def new_dict(key, value, n_keys=0):\n # With JIT disabled, ignore all arguments and return a Python dict.\n return dict()",
"def copy(self) -> \"NestedDict[T]\":\n return NestedDict(self.items())",
"def _new_empty_basic_map(self):\n return OrderedDict()",
"def clone(self):\n return dict_util.clone(self)",
"def construct_kv_dict(self):\r\n key1 = self.key_factory('existing_field')\r\n key2 = self.key_factory('other_existing_field')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}",
"def __freeze_dict_load(self):\n\n state_file = os.path.join(self._statedir, \"frozen_dict\")\n if os.path.isfile(state_file):\n try:\n version, d = json.load(file(state_file))\n except EnvironmentError as e:\n raise apx._convert_error(e)\n except ValueError as e:\n raise apx.InvalidFreezeFile(state_file)\n if version != self.__FROZEN_DICT_VERSION:\n raise apx.UnknownFreezeFileVersion(\n version, self.__FROZEN_DICT_VERSION,\n state_file)\n return d\n return {}",
"def _attrdict_copy(d):\n if isinstance(d, collections.Iterable):\n return AttrDict(dict(d))\n else:\n return AttrDict(vars(d))",
"def impl_new_dict(key, value, n_keys=0):\n if any([\n not isinstance(key, Type),\n not isinstance(value, Type),\n ]):\n raise TypeError(\"expecting *key* and *value* to be a numba Type\")\n\n keyty, valty = key, value\n\n def imp(key, value, n_keys=0):\n if n_keys < 0:\n raise RuntimeError(\"expecting *n_keys* to be >= 0\")\n dp = _dict_new_sized(n_keys, keyty, valty)\n _dict_set_method_table(dp, keyty, valty)\n d = _make_dict(keyty, valty, dp)\n return d\n\n return imp",
"def copy(self):\n return AttrDict(dict(self).copy())",
"def copy(self) -> AF:\n if self._base == OrderedDict:\n kopied = dict(self)\n else:\n kopied = self._base.copy(self)\n return self.__class__(kopied, use_fuzzy=self.use_fuzzy, dottable=self._dottable)",
"def copy(self):\n return type(self)(**{k: v.copy() if isinstance(v, AttrDict) else v for k, v in self.items()})",
"def freeze(d):\n if isinstance(d, dict):\n # make dictionaries lowercase for comparison\n d = lowercase_dict(d)\n return frozenset((key, freeze(value)) for key, value in d.items())\n elif isinstance(d, list):\n return tuple(freeze(value) for value in d)\n return d",
"def __init__(self, items=None, cutoff=.6):\n super(FuzzyDict, self).__init__()\n\n if items:\n self.update(items)\n self.cutoff = cutoff\n\n # short wrapper around some super (dict) methods\n self._dict_contains = lambda key: \\\n super(FuzzyDict, self).__contains__(key)\n\n self._dict_getitem = lambda key: \\\n super(FuzzyDict, self).__getitem__(key)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deep copy unfrozen dicts to make the dictionary FrozenDict safe. | def _prepare_freeze(xs: Any) -> Any:
if isinstance(xs, FrozenDict):
# we can safely ref share the internal state of a FrozenDict
# because it is immutable.
return xs._dict # pylint: disable=protected-access
if not isinstance(xs, dict):
# return a leaf as is.
return xs
# recursively copy dictionary to avoid ref sharing
return {key: _prepare_freeze(val) for key, val in xs.items()} | [
"def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore\n elif isinstance(x, dict):\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n else:\n return x",
"def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')",
"def freeze(d):\n if isinstance(d, dict):\n # make dictionaries lowercase for comparison\n d = lowercase_dict(d)\n return frozenset((key, freeze(value)) for key, value in d.items())\n elif isinstance(d, list):\n return tuple(freeze(value) for value in d)\n return d",
"def copy(self):\n return type(self)(**{k: v.copy() if isinstance(v, AttrDict) else v for k, v in self.items()})",
"def deepish_copy(org):\n out = dict().fromkeys(org)\n for k,v in org.items():\n try:\n out[k] = v.copy() # dicts, sets\n except AttributeError:\n try:\n out[k] = v[:] # lists, tuples, strings, unicode\n except TypeError:\n out[k] = v # ints\n \n return out",
"def dict2frozenset(d):\n return frozenset(d.items())",
"def copy(self) -> \"NestedDict[T]\":\n return NestedDict(self.items())",
"def __drop_depth_0_keys(target_dict):\n ret = [\n copy.deepcopy(target_dict[key])\n for key in target_dict.keys()\n if hasattr(target_dict[key], \"keys\")]\n new_dict = Dict()\n for tmp in ret:\n new_dict += tmp\n return new_dict",
"def _attrdict_copy(d):\n if isinstance(d, collections.Iterable):\n return AttrDict(dict(d))\n else:\n return AttrDict(vars(d))",
"def clone(self):\n return dict_util.clone(self)",
"def copy_deep2(self) -> \"SuperDict\":\n return json.loads(json.dumps(self))",
"def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]",
"def safe_deepcopy_env(obj):\n cls = obj.__class__\n result = cls.__new__(cls)\n memo = {id(obj): result}\n for k, v in obj.__dict__.items():\n if k not in ['viewer']:\n setattr(result, k, copy.deepcopy(v, memo=memo))\n else:\n setattr(result, k, None)\n return result",
"def copy(x):\n if isinstance(x, dict):\n d = AttrDict()\n for k, v in x.iteritems():\n d[copy(k)] = copy(v)\n return d\n elif isinstance(x, list):\n return map(copy, x)\n return x",
"def freeze(value):\n if isinstance(value, list):\n value = tuple(freeze(e) for e in value)\n elif isinstance(value, set):\n value = frozenset(freeze(e) for e in value)\n elif isinstance(value, dict):\n value = frozendict({k: freeze(v) for k, v in value.items()})\n elif isinstance(value, tuple):\n value = tuple(freeze(e) for e in value)\n else:\n pass\n return value",
"def dictcopy(dicttocopy):\n new_dict = dict()\n for key, value in dicttocopy.items():\n new_dict[key] = set(value)\n\n return new_dict",
"def copyx(x):\n if isinstance(x, AttrDict):\n d = {}\n for k, v in x.iteritems():\n d[copyx(k)] = copyx(v)\n return d\n elif isinstance(x, list):\n return map(copy, x)\n return x",
"def dict_copies(my_dict, num_copies):\r\n return [dict(my_dict) for num in range(num_copies)] # add dict(my_dict) to make copies of my_dict to avoid referencing problems\r",
"def mutable_variables(self) -> Union[VariableDict, Dict[str, Any]]:\n self._populate_collections()\n xs = {\n k: v for k, v in self._variables.items() if in_filter(self.mutable, k)\n }\n if config.flax_return_frozendict:\n return freeze(xs)\n return xs"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unfreeze a FrozenDict. Makes a mutable copy of a `FrozenDict` mutable by transforming it into (nested) dict. | def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:
if isinstance(x, FrozenDict):
# deep copy internal state of a FrozenDict
# the dict branch would also work here but
# it is much less performant because jax.tree_util.tree_map
# uses an optimized C implementation.
return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore
elif isinstance(x, dict):
ys = {}
for key, value in x.items():
ys[key] = unfreeze(value)
return ys
else:
return x | [
"def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}",
"def pop(\n x: Union[FrozenDict, Dict[str, Any]], key: str\n) -> Tuple[Union[FrozenDict, Dict[str, Any]], Any]:\n\n if isinstance(x, FrozenDict):\n return x.pop(key)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n value = new_dict.pop(key)\n return new_dict, value\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')",
"def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')",
"def dict2frozenset(d):\n return frozenset(d.items())",
"def freeze(value):\n if isinstance(value, list):\n value = tuple(freeze(e) for e in value)\n elif isinstance(value, set):\n value = frozenset(freeze(e) for e in value)\n elif isinstance(value, dict):\n value = frozendict({k: freeze(v) for k, v in value.items()})\n elif isinstance(value, tuple):\n value = tuple(freeze(e) for e in value)\n else:\n pass\n return value",
"def test_unfreeze_env(self):\n frozen = {'_PROJECTENV_foo': '$foo', 'foo': 'boo', 'bing': 'bang'}\n result = commands.unfreeze_env(frozen)\n self.assertEqual(result, {'foo': '$_PROJECTENV_foo', 'bing': None,\n '_PROJECTENV_foo': None})",
"def freeze(d):\n if isinstance(d, dict):\n # make dictionaries lowercase for comparison\n d = lowercase_dict(d)\n return frozenset((key, freeze(value)) for key, value in d.items())\n elif isinstance(d, list):\n return tuple(freeze(value) for value in d)\n return d",
"def fl_unfreeze_form(ptr_flform):\n _fl_unfreeze_form = library.cfuncproto(\n library.load_so_libforms(), \"fl_unfreeze_form\", \\\n None, [cty.POINTER(xfdata.FL_FORM)], \\\n \"\"\"void fl_unfreeze_form(FL_FORM * form) \"\"\")\n library.check_if_flinitialized()\n library.verify_flformptr_type(ptr_flform)\n library.keep_elem_refs(ptr_flform)\n _fl_unfreeze_form(ptr_flform)",
"def test_v1alpha3_unfreeze(self):\n pass",
"def unflatten(flat_dict):\n # Actually, nest the dictionary. If the dictionary has numbers as the most\n # outside values, like {'0': 1, '1': 2, '2': 3, '3': 4}, it causes an\n # unexpected error.\n fix_dict = dict([('a ' + key, value) for key, value in flat_dict.items()])\n\n return _unflatten(fix_dict, separator = ' ')['a']",
"def unfreeze(self) -> None:\n raise NotImplementedError",
"def restore_dict(data):\n return dict((k, restore_value(v)) for k, v in iteritems(data))",
"def test_v1_unfreeze(self):\n pass",
"def _unparse_dict(d, strategies=None):\n\n def _unparse_val(val):\n for instance_type, func in strategies:\n if isinstance(val, instance_type):\n return func(val)\n else:\n return val\n\n strategies = strategies or []\n out = dict()\n for k, v in d.items():\n if isinstance(v, dict):\n v = _unparse_dict(v, strategies=strategies)\n elif isinstance(v, list):\n v = [_unparse_val(val) for val in v]\n elif isinstance(v, tuple):\n v = tuple(_unparse_val(val) for val in v)\n else:\n v = _unparse_val(v)\n out[k] = v\n return out",
"def fromkeys(iterable, value=None):\n return FrozenDict(dict.fromkeys(iterable, value))",
"def revert_dictionary(dictionary):\n return {v: k for k, v in dictionary.items()}",
"def unfreeze(self):\n if self._frozen:\n self._frozen = False\n if self in _graphicsManager._frontHierarchy:\n _graphicsManager.beginRefresh()\n _graphicsManager.addCommandToQueue(('unfreeze', self))\n _graphicsManager.completeRefresh()",
"def fl_unfreeze_all_forms():\n _fl_unfreeze_all_forms = library.cfuncproto(\n library.load_so_libforms(), \"fl_unfreeze_all_forms\", \\\n None, [], \\\n \"\"\"void fl_unfreeze_all_forms() \"\"\")\n library.check_if_flinitialized()\n _fl_unfreeze_all_forms()",
"def immutable_dict_2_dict(imm_dict):\n m_dict = {}\n\n for key, val in imm_dict.to_dict(flat=False).items():\n if len(val) > 1 or key.endswith('[]'):\n m_dict[key.strip('[]')] = val\n else:\n m_dict[key] = val[0]\n\n return m_dict"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new dict with additional and/or replaced entries. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.copy`. | def copy(
x: Union[FrozenDict, Dict[str, Any]],
add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(
{}
),
) -> Union[FrozenDict, Dict[str, Any]]:
if isinstance(x, FrozenDict):
return x.copy(add_or_replace)
elif isinstance(x, dict):
new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x
new_dict.update(add_or_replace)
return new_dict
raise TypeError(f'Expected FrozenDict or dict, got {type(x)}') | [
"def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]",
"def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}",
"def extend_dict(source_dict, diff=None, deep=False):\n if deep:\n new_dict = deepcopy(source_dict)\n else:\n new_dict = copy(source_dict)\n\n if diff:\n new_dict.update(diff)\n return new_dict",
"def updated_with(orig_dict, *new_values):\n newdict = dict(orig_dict)\n for vals in new_values:\n if vals:\n newdict.update(vals)\n return newdict",
"def dict_merge(base, upd, inplace=False):\n assert quacks_like_dict(base), quacks_like_dict(upd)\n dst = base if inplace else deepcopy(base)\n\n stack = [(dst, upd)]\n while stack:\n current_dst, current_src = stack.pop()\n for key in current_src:\n if key not in current_dst:\n current_dst[key] = current_src[key]\n else:\n if quacks_like_dict(current_src[key]) and quacks_like_dict(current_dst[key]) :\n stack.append((current_dst[key], current_src[key]))\n else:\n current_dst[key] = current_src[key]\n return dst",
"def copy_dict(source_dict, diffs):\n result = dict(source_dict)\n result.update(diffs)\n return result",
"def test_dict_merge_immutable():\n x1 = {'one': 1, 'two': 2}\n x1_cop = x1.copy()\n ir.dict_merge(x1, {'three': 3, 'two': None})\n assert x1 == x1_cop\n ir.dict_merge({'ten': 10, 'one': '1'}, x1)\n assert x1 == x1_cop",
"def extend(primary: Mapping, *others: Mapping, in_place=False):\n others = flatten(others)\n if not in_place:\n primary = dict(primary or {})\n for other in others:\n if other is None:\n continue\n for key, value in other.items():\n primary[key] = value\n return primary",
"def _attrdict_copy(d):\n if isinstance(d, collections.Iterable):\n return AttrDict(dict(d))\n else:\n return AttrDict(vars(d))",
"def test_patch_dictionary():\n orig_dict = {\n \"email\": \"user@inveniosoftware.org\",\n \"username\": \"user\",\n }\n\n # patch some existing properties, add new ones, and leave some as is\n patch_dict = {\n \"email\": \"admin@inveniosoftware.org\",\n \"profile\": {\n \"full_name\": \"Test User\",\n },\n \"extra\": [1, 2, 3],\n }\n\n expected = {\n \"email\": \"admin@inveniosoftware.org\",\n \"username\": \"user\",\n \"profile\": {\n \"full_name\": \"Test User\",\n },\n \"extra\": [1, 2, 3],\n }\n\n patch_dictionary(orig_dict, patch_dict)\n assert orig_dict == expected",
"def _UpdateDict( target, override ):\n\n for key, value in override.items():\n current_value = target.get( key )\n if not _IsDict( current_value ):\n target[ key ] = value\n elif _IsDict( value ):\n target[ key ] = _UpdateDict( current_value, value )\n else:\n target[ key ] = value\n\n return target",
"def overwrite_dict(dict_base, dict_new, base_path=None):\n assert isinstance(dict_new, dict)\n for k in dict_new:\n # Add the current key to the path\n k_path = str(k) if base_path is None else f'{base_path}.{str(k)}'\n # Make sure that the key in the new dictionary matches one from the base dictionary\n assert k in dict_base, f'Could not find path {k_path} in the base dictionary'\n # Check that the types match between the base dictionary entry and the new one\n if dict_base[k] is not None:\n assert isinstance(type(dict_base[k]), type(dict_new[k])), \\\n 'The types at {} in the base dictionary do not match (expected {}, got {})'.format(\n k_path, str(type(dict_base[k])), str(type(dict_new[k])))\n # Recursively replace dictionary entries\n if isinstance(dict_base[k], dict):\n overwrite_dict(dict_base[k], dict_new[k], k_path)\n else:\n # Simply copy over leaf entries\n dict_base[k] = dict_new[k]",
"def deepish_copy(org):\n out = dict().fromkeys(org)\n for k,v in org.items():\n try:\n out[k] = v.copy() # dicts, sets\n except AttributeError:\n try:\n out[k] = v[:] # lists, tuples, strings, unicode\n except TypeError:\n out[k] = v # ints\n \n return out",
"def get_added_dicts(a, b):\n tmp = copy.deepcopy(a)\n for key, val in b.iteritems():\n if key not in tmp:\n tmp[key] = val\n return tmp",
"def mergedict(x, y):\n z = x.copy()\n z.update(y)\n return z",
"def copy_dict(value, impl=dict):\n if isinstance(value, tuple):\n return (copy_dict(e) for e in value)\n if isinstance(value, list):\n return [copy_dict(e) for e in value]\n if isinstance(value, dict):\n new_value = impl()\n for k, v in value.items():\n new_value[k] = copy_dict(v, impl=impl)\n return new_value\n return value",
"def _update(self, dict) -> \"SuperDict\":\n temp_dict = SuperDict.from_dict(self)\n temp_dict.update(dict)\n return temp_dict",
"def filter_dict_with_renamed_keys(orig_dict, key_rename_dict, *, optional=False):\n return {\n new_key: orig_dict[key]\n for key, new_key in key_rename_dict.items()\n if not optional or key in orig_dict\n }",
"def test_merge_overwrite_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = \"b\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new dict where one entry is removed. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.pop`. | def pop(
x: Union[FrozenDict, Dict[str, Any]], key: str
) -> Tuple[Union[FrozenDict, Dict[str, Any]], Any]:
if isinstance(x, FrozenDict):
return x.pop(key)
elif isinstance(x, dict):
new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x
value = new_dict.pop(key)
return new_dict, value
raise TypeError(f'Expected FrozenDict or dict, got {type(x)}') | [
"def dictRemove(d, e):\n if e in d:\n d.pop(e)",
"def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore\n elif isinstance(x, dict):\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n else:\n return x",
"def drop_item(d, value=None, key=None):\n d = deepcopy(d)\n return {k: v for (k, v) in d.items() if k != key and v != value}",
"def remove_dict_key(dict_in: dict, key: str):\n new_dict = dict_in.copy()\n new_dict.pop(key, None)\n return new_dict",
"def dict_without_key(dictionary, key):\n del dictionary[key]\n return dictionary",
"def remove_key(key):\n def remove(d):\n if not isinstance(d, dict):\n raise TypeError(\"remove_key must take in a dict\")\n return {k: v for (k,v) in d.items() if k != key}\n return remove",
"def test_pop_with_default(self):\n x = adict(a='x', b='y', c='z')\n val = x.pop('b', 'w')\n self.assertEqual(val, 'y')\n val = x.pop('d', 'w')\n self.assertEqual(val, 'w')",
"def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]",
"def pop_dict(dic):\n print(\"poping_dict\")\n print(dic)\n keys_to_pop = []\n for key in dic.keys():\n keys_to_pop.append(key)\n print(key)\n #del dic[key]\n print(keys_to_pop)\n for to_pop in keys_to_pop:\n #dic.pop(to_pop)\n del dic[to_pop]\n print (dic)\n print(dic)\n return dic",
"def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')",
"def dict_exclude(d, remove):\n\tassert type(remove) is list\n\tif isinstance(d, dict):\n\t\t#recursively call for nested dicts\n\t\treturn {key:dict_exclude(value, remove) for key,value in d.iteritems() if key not in remove}\n\treturn d",
"def prune_dict(d, predicate):\n\n keys = [k for k, v in d.items() if predicate(k, v)]\n for k in keys:\n del d[k]",
"def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}",
"def cut(d, k):\n\tif isinstance(d, dict):\n\t\tn = d.copy()\n\t\tif k in n:\n\t\t\tdel n[k]\n\t\treturn n\n\treturn [v for v in d if v != k]",
"def _remove_pk(obj: dict, pk_slot_name: str) -> dict:\n if pk_slot_name in obj:\n obj = copy(obj)\n del obj[pk_slot_name]\n return obj",
"def detachDict(dict, key1, key2):\n\n for key in dict.keys():\n if key == key1 or key == key2:\n del dict[key]\n else:\n for subkey in dict[key].keys():\n if subkey == key1 or subkey == key2:\n del dict[key][subkey]",
"def remove_dict(dictionary, context_name):",
"def popitem(d, last=True):\n if last:\n return d.popitem()\n else:\n first_key = next(iter(d.keys()))\n return first_key, d.pop(first_key)",
"def pop(key, d):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an indented representation of the nested dictionary. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.pretty_repr`. If x is any other dtype, this function will return `repr(x)`. | def pretty_repr(x: Any, num_spaces: int = 4) -> str:
if isinstance(x, FrozenDict):
return x.pretty_repr()
else:
def pretty_dict(x):
if not isinstance(x, dict):
return repr(x)
rep = ''
for key, val in x.items():
rep += f'{key}: {pretty_dict(val)},\n'
if rep:
return '{\n' + _indent(rep, num_spaces) + '}'
else:
return '{}'
return pretty_dict(x) | [
"def pretty_repr(self, num_spaces=4):\n\n def pretty_dict(x):\n if not isinstance(x, dict):\n return repr(x)\n rep = ''\n for key, val in x.items():\n rep += f'{key}: {pretty_dict(val)},\\n'\n if rep:\n return '{\\n' + _indent(rep, num_spaces) + '}'\n else:\n return '{}'\n\n return f'FrozenDict({pretty_dict(self._dict)})'",
"def repr_dict(dct, indent):\n # pprint represents OrderedDict objects using the tuple init syntax,\n # which is not very readable. Therefore, dictionaries are iterated over.\n if dct is None:\n return 'None'\n if not isinstance(dct, Mapping):\n raise TypeError(\"Object must be a mapping, but is a {}\"\n .format(type(dct)))\n if isinstance(dct, OrderedDict):\n kind = 'ordered'\n ret = '%s {\\n' % kind # non standard syntax for the kind indicator\n for key in six.iterkeys(dct):\n value = dct[key]\n ret += _indent('{!r}: {!r},\\n'.format(key, value), 2)\n else: # dict\n kind = 'sorted'\n ret = '%s {\\n' % kind # non standard syntax for the kind indicator\n for key in sorted(six.iterkeys(dct)):\n value = dct[key]\n ret += _indent('{!r}: {!r},\\n'.format(key, value), 2)\n ret += '}'\n ret = repr_text(ret, indent=indent)\n return ret.lstrip(' ')",
"def pretty(d, indent=0):\r\n for key, value in d.items():\r\n depth = 0\r\n print('\\t' * indent + str(key)+':'),\r\n if isinstance(value, dict):\r\n if depth == 0:\r\n print(\" \")\r\n depth+=1\r\n pretty(value, indent+1)\r\n else:\r\n print(' ' + str(value))",
"def printdict(x):\r\n # colors\r\n r = \"\\033[1;31m\"\r\n b = \"\\033[1;34m\"\r\n g = \"\\033[1;32m\"\r\n n = \"\\033[0m\"\r\n if not hasattr(x, 'keys'):\r\n print(x)\r\n return\r\n print('keys:', r, [key for key in x.keys()], n)\r\n for k in x.keys():\r\n print(\"['{}{}{}']:\".format(r, k, n))\r\n if not hasattr(x[k], 'keys'):\r\n print(x[k])\r\n continue\r\n print(' keys:', g, [key for key in x[k].keys()], n)\r\n for k2 in x[k].keys():\r\n print(\" ['{}{}{}']['{}{}{}']:\".format(r, k, n, g, k2, n))\r\n if hasattr(x[k][k2], 'keys'):\r\n print(' keys:', b, [key for key in x[k][k2].keys()], n)\r\n for k3 in x[k][k2].keys():\r\n print(\" ['{}{}{}']['{}{}{}']['{}{}{}']:\".format(r, k, n, g, k2,\r\n n, b, k3, n))\r\n if isinstance(x[k][k2][k3], np.ndarray):\r\n print(' shape:', x[k][k2][k3].shape)\r\n else:\r\n print(' ', sep='', end='')\r\n pprint.pprint(x[k][k2][k3], indent=2)\r\n else:\r\n print(' shape:', x[k][k2].shape)",
"def pretty_dict(d):\n return '{%s}' % ', '.join('%r: %r' % (k, v)\n for k, v in sorted(d.items(), key=repr))",
"def _format_dict(self, dict_, indent=0):\n prefix = indent*\" \"*4\n output = \"{\\n\"\n for key, val in sorted(dict_.items()):\n if isinstance(val, dict):\n rval = self._format_dict(val, indent+1)\n else:\n rval = repr(val)\n output += prefix + \" \"*4 + repr(key) + \" : \" + rval + \",\\n\"\n output += prefix + \"}\"\n return output",
"def nice_dict_format(d):\n return ''.join([key+\": \"+str(d[key])+\"\\n\" for key in list(d.keys())])",
"def print_dictionary(\n d, nested_level=0, output=sys.stdout, spacing=' ', separator=None,\n):\n if separator:\n print(separator, file=output)\n\n if type(d) == dict:\n print('%s{' % (nested_level * spacing), file=output)\n for k, v in list(d.items()):\n if hasattr(v, '__iter__'):\n print('%s%s:' % ((nested_level + 1) * spacing, k), file=output)\n print_dictionary(v, nested_level + 1, output)\n else:\n print(\n '%s%s: %s' % ((nested_level + 1) * spacing, k, v),\n file=output\n )\n print('%s}' % (nested_level * spacing), file=output)\n elif type(d) == list:\n print('%s[' % (nested_level * spacing), file=output)\n for v in d:\n if hasattr(v, '__iter__'):\n print_dictionary(v, nested_level + 1, output)\n else:\n print('%s%s' % ((nested_level + 1) * spacing, v), file=output)\n print('%s]' % (nested_level * spacing), file=output)\n else:\n print('%s%s' % (nested_level * spacing, d), file=output)",
"def show_nest(p:PropertyDict, nest_level=0, indent_len:Optional[int]=None, pd_header=\"<PropertyDict>\"):\n _max_len = max(map(len, p.keys()))\n max_len = _max_len + nest_level * (_max_len if indent_len is None else indent_len)\n delimiter = \"\\n \"\n def showitem(v):\n if isinstance(v, Tensor):\n return tensor_utils.show(v)\n elif isinstance(v, dict):\n return \"dict({})\".format(\", \".join([\"{}={}\".format(k, showitem(v)) for k, v in v.items()]))\n else:\n return repr(v)\n\n unnested = dict(filter(lambda kv: not isinstance(kv[1], PropertyDict), p.items()))\n unnested_str = delimiter.join([\n *[(\"{:>\"+ str(max_len)+ \"}: {}\").format(k, showitem(v)) for k, v in unnested.items()\n ]\n ])\n\n nested = dict(filter(lambda kv: isinstance(kv[1], PropertyDict), p.items()))\n nested_str = delimiter.join([\n *[(\"{:>\"+ str(max_len)+ \"}: {}\").format(k + pd_header, \"\\n\"+show_nest(v, nest_level=nest_level+1)) for k, v in nested.items()\n ]\n ])\n\n return unnested_str + delimiter + nested_str",
"def pretty_print(dictionary: dict):\n return json.dumps(dictionary, indent=4)",
"def recursive_dict_key_print(dict_in, spacer=\"\"):\n if type(dict_in) is not dict:\n return\n next_spacer = spacer + \" \"\n for key, value in dict_in.items():\n try:\n print(spacer, f\"{key} : {value.shape}\")\n except(AttributeError):\n print(spacer, key)\n recursive_dict_key_print(value, next_spacer)",
"def print_dict(d, wrap=0):\n pt = PrettyTable(['Property', 'Value'],\n caching=False, print_empty=False)\n pt.aligns = ['l', 'l']\n for (prop, value) in six.iteritems(d):\n if value is None:\n value = ''\n value = _word_wrap(value, max_length=wrap)\n pt.add_row([prop, value])\n # encoded = encodeutils.safe_encode(pt.get_string(sortby='Property'))\n # if six.PY3:\n # encoded = encoded.decode()\n # print(encoded)\n # print(pt)\n return pt",
"def print_json_tree(d, indent=0):\n for key, value in d.items():\n print(' ' * indent + str(key), end=' ')\n if isinstance(value, dict):\n print(); print_json_tree(value, indent+1)\n else:\n print(\":\", str(type(d[key])).split(\"'\")[1], \"-\", str(len(str(d[key]))))",
"def dictPrintFormat(d, key_name = 'Key', value_name = 'Value'):\r\n \r\n max_len_key = len(key_name)\r\n max_len_val = len(value_name)\r\n \r\n for k, v in d.items():\r\n if len(str(k)) > max_len_key:\r\n max_len_key = len(str(k))\r\n if len(str(v)) > max_len_val:\r\n max_len_val = len(str(v))\r\n \r\n header = \"{key:<{key_size}} {val:<{val_size}}\".format(\r\n key = key_name,\r\n val = value_name,\r\n key_size = max_len_key,\r\n val_size = max_len_val)\r\n \r\n separator = \"-\"*(max_len_key + max_len_val + 2) \r\n \r\n values = \"\\n\".join([\"{key:<{key_size}} {val:<{val_size}}\".format(\r\n key = k,\r\n val = v,\r\n key_size = max_len_key,\r\n val_size = max_len_val) for k, v in d.items()])\r\n \r\n printout = \"\\n\".join([header, separator, values])\r\n \r\n return printout",
"def nestedDictPrinter(dictName):\n for dictName, key in dictName.items():\n \"\"\"loops through the keys in the nested dictionary, dictName, and then\n prints the nested dictionary names along with its values.\n \"\"\"\n print(dictName.title() + \":\") # prints nested dictionary name\n for items in key:\n \"\"\"loops through nested dictionary key values and and prints the\n key along with the value with a tab before.\n \"\"\"\n print(f\"\\t{items.title()} - {key[items]}\")",
"def deep_hash_to_text(d,**kwargs):\n\n\tspacer = kwargs.get('spacer',' ')\n\tlines = kwargs.get('lines',[])\n\tindent = kwargs.get('indent',0)\n\tfor key,value in d.iteritems():\n\t\tlines.append(spacer*indent+str(key))\n\t\tif isinstance(value,dict): deep_hash_to_text(value,indent=indent+1,lines=lines,spacer=spacer)\n\t\telse: lines.append(spacer*(indent+1)+str(value))\n\treturn '\\n'.join(lines)",
"def _str_indented(self, depth: int = 0) -> str:\r\n if self.is_empty():\r\n return ''\r\n else:\r\n s = ' ' * depth + f'{self.value} ({self.weight})\\n'\r\n for subtree in self.subtrees:\r\n s += subtree._str_indented(depth + 1)\r\n return s",
"def json_pretty_print(dictionary):\n return json.dumps(dictionary, sort_keys=True,\n indent=2, separators=(',', ': '))",
"def test_recursive_repr(self):\n x = adict()\n x['y'] = adict()\n x['y']['x'] = x\n if six.PY3:\n output = \"{'y': {'x': **RECURSION**}}\"\n else:\n output = \"{u'y': {u'x': **RECURSION**}}\"\n self.assertEqual(repr(x), output)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load a subset of the COCO dataset. | def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None, class_names=None,
class_map=None, return_coco=False, auto_download=False):
if auto_download is True:
self.auto_download(dataset_dir, subset, year)
coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year))
if subset == "minival" or subset == "valminusminival":
subset = "val"
image_dir = "{}/{}{}".format(dataset_dir, subset, year)
# Select class_ids from class_names:
if class_names:
class_ids = sorted(coco.getCatIds(catNms=class_names))
# Load all classes or a subset?
if not class_ids:
# All classes
class_ids = sorted(coco.getCatIds())
# All images or a subset?
if class_ids:
image_ids = []
for id in class_ids:
imgs = [] # list of images to add to image_ids
# Select at most COCO_IMAGES_PER_OBJECT and select only the images
# that have at most COCO_MAX_NUM_MASK_PER_IMAGE masks inside them:
for imgid in list(coco.getImgIds(catIds=[id])):
if len(imgs) >= COCO_IMAGES_PER_OBJECT:
break
if len(coco.loadAnns(coco.getAnnIds(imgIds=[imgid], catIds=class_ids, iscrowd=None))) <= COCO_MAX_NUM_MASK_PER_IMAGE:
imgs.append(imgid)
image_ids.extend(imgs)
#image_ids.extend(list(coco.getImgIds(catIds=[id]))[:COCO_IMAGES_PER_OBJECT])
# Remove duplicates
image_ids = list(set(image_ids))
else:
# All images
image_ids = list(coco.imgs.keys())
# Add classes
for i in class_ids:
self.add_class("coco", i, coco.loadCats(i)[0]["name"])
# Add images
for i in image_ids:
#print(len(coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None))))
self.add_image(
"coco", image_id=i,
path=os.path.join(image_dir, coco.imgs[i]['file_name']),
width=coco.imgs[i]["width"],
height=coco.imgs[i]["height"],
annotations=coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return coco | [
"def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None,\n class_map=None, return_coco=False):\n\n coco = COCO(\"{}/annotations/instances_{}{}.json\".format(dataset_dir, subset, year))\n if subset == \"minival\" or subset == \"valminusminival\":\n subset = \"val\"\n image_dir = \"{}/{}{}\".format(dataset_dir, subset, year)\n\n # Load all classes or a subset?\n if not class_ids:\n # All classes\n class_ids = sorted(coco.getCatIds())\n\n # All images or a subset?\n if class_ids:\n image_ids = []\n for id in class_ids:\n image_ids.extend(list(coco.getImgIds(catIds=[id])))\n # Remove duplicates\n image_ids = list(set(image_ids))\n else:\n # All images\n image_ids = list(coco.imgs.keys())\n\n # Add classes\n for i in class_ids:\n self.add_class(\"coco\", i, coco.loadCats(i)[0][\"name\"])\n\n # Add images\n for i in image_ids:\n self.add_image(\n \"coco\", image_id=i,\n path=os.path.join(image_dir, coco.imgs[i]['file_name']),\n width=coco.imgs[i][\"width\"],\n height=coco.imgs[i][\"height\"],\n annotations=coco.loadAnns(coco.getAnnIds(\n imgIds=[i], catIds=class_ids, iscrowd=None)))\n if return_coco:\n return coco",
"def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None,\n class_map=None, return_coco=False, auto_download=False):\n\n if auto_download is True:\n self.auto_download(dataset_dir, subset, year)\n\n coco = COCO(\"{}/annotations/instances_{}{}.json\".format(dataset_dir, subset, year))\n if subset == \"minival\" or subset == \"valminusminival\":\n subset = \"val\"\n image_dir = \"{}/{}{}\".format(dataset_dir, subset, year)\n\n # Load all classes or a subset?\n if not class_ids:\n # All classes\n class_ids = sorted(coco.getCatIds())\n\n # All images or a subset?\n if class_ids:\n image_ids = []\n for id in class_ids:\n image_ids.extend(list(coco.getImgIds(catIds=[id])))\n # Remove duplicates\n image_ids = list(set(image_ids))\n else:\n # All images\n image_ids = list(coco.imgs.keys())\n\n # Add classes\n for i in class_ids:\n self.add_class(\"coco\", i, coco.loadCats(i)[0][\"name\"])\n\n # Add images\n for i in image_ids:\n self.add_image(\n \"coco\", image_id=i,\n path=os.path.join(image_dir, coco.imgs[i]['file_name']),\n width=coco.imgs[i][\"width\"],\n height=coco.imgs[i][\"height\"],\n annotations=coco.loadAnns(coco.getAnnIds(\n imgIds=[i], catIds=class_ids, iscrowd=None)))\n if return_coco:\n return coco",
"def load_coco(self, dataset_dir, subset, class_ids=None, return_coco=False):\n\n if subset == \"training\":\n coco_file = COCO(\"{}/Extension_75_{}.json\".format(dataset_dir, subset))\n else:\n coco_file = COCO(\"{}/GT_{}_(new-split).json\".format(dataset_dir, subset))\n # coco_file = COCO(\"{}/GT_{}_(new-split).json\".format(dataset_dir, subset))\n\n # Load all classes or a subset?\n if not class_ids:\n # All classes\n class_ids = sorted(coco_file.getCatIds())\n\n # All images or a subset?\n if class_ids:\n image_ids = []\n for i in class_ids:\n image_ids.extend(list(coco_file.getImgIds(catIds=[i])))\n # Remove duplicates\n image_ids = list(set(image_ids))\n else:\n # All images\n image_ids = list(coco_file.imgs.keys())\n\n # Add classes\n for i in class_ids:\n self.add_class(\"PanorAMS\", i, coco_file.loadCats(i)[0][\"name\"])\n\n # Add images\n for i in image_ids:\n self.add_image(\n \"PanorAMS\", image_id=i,\n path=coco_file.imgs[i]['file_name'],\n width=coco_file.imgs[i][\"width\"],\n height=coco_file.imgs[i][\"height\"],\n annotations=coco_file.loadAnns(coco_file.getAnnIds(\n imgIds=[i], catIds=class_ids, iscrowd=None)))\n if return_coco:\n return coco_file",
"def load(cfg, train_mode, split, shot, query,\n bs, test_bs, num_workers, pin_memory,\n ret_name=False):\n if train_mode == \"train\":\n dataset = COCOTrain(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=bs,\n shuffle=True,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n else:\n dataset = COCOTest(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=test_bs, # Large batch for evaluation\n shuffle=False,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n num_classes = 80\n return dataset, data_loader, num_classes",
"def load_cooc(t_start, t_end):\n cooc = [load_npz('../data/historical/cooc_{}.npz'.format(r)) for r in range(t_start, t_end+1, 10)]\n cooc = [x[get_noun_indices_t(t_start), :] for x in cooc]\n cooc = [x[:, get_adj_indices()] for x in cooc]\n cooc = np.array([x.tocoo().toarray() for x in cooc])\n assert cooc.ndim == 3\n if cooc.shape[0] == 1:\n cooc = cooc[0]\n return cooc",
"def __select_subset_data(self):\n\n path=self.filepath/\"Data\"\n datapreprocess_2_selectsubsetofdata.DataPreprocessing(path,self.duplicateimagesdatafile, self.subsetselectdatafileprefix )",
"def seq_subset_load(infile, subset_mode, subset_args):\n from analysis.sequence_ops import feat_collect, feature_coords, \\\n coord_chop, get_seq_subset_by_coords \n from analysis.seqfile_ops import load_multifasta, surefmt_load, \\\n write_fasta\n from analysis.text_manipulation import adaptive_list_load\n if subset_mode is 'flatfile':\n # in this case the sequence file MUST be multifasta\n try: subset = load_multifasta(infile)\n except: raise\n else:\n print \"set of\", len(subset), \"sequence segments\"\n subset_file = infile\n else:\n # load the query single sequence file (convert format if necessary)\n try: seq_record = surefmt_load(infile, 'fasta', 'generic_dna')\n except: raise\n else: print \"query sequence loaded from\", infile\n # load or generate coordinate pairs for target segments\n if subset_mode is 'coordinates':\n try:\n coords_file = subset_args['file']\n header = subset_args['header']\n columns = subset_args['columns']\n coords_list = adaptive_list_load(coords_file, header, columns)\n except: raise\n else: print len(coords_list), \"segments loaded from\", infile\n elif subset_mode is 'features':\n try:\n feat_mode = subset_args\n features = feat_collect(infile, feat_mode)\n coords_list = feature_coords(features)\n print coords_list\n except: raise\n else: print len(coords_list),\"features loaded from\", infile\n elif subset_mode is 'size':\n try:\n size = subset_args['size']\n chop_mode = subset_args['chop_mode']\n coords_list = coord_chop(len(seq_record.seq), size, chop_mode)\n except: raise\n else: print len(coords_list), \"segments generated to fit\", size\n else:\n print \"ERROR: A mode MUST be specified.\"\n coords_list = None\n # collect subset of sequence segments using resulting coords_list\n try: subset = get_seq_subset_by_coords(seq_record, coords_list)\n except: raise\n else: print \"subset of\", len(subset), \"sequence segments\"\n # save subset to multifasta file for later use or reference\n subset_file = seq_record.id+'_subset.fas'\n try: write_fasta(subset_file, subset)\n except: raise\n else: print \"subset written to fasta file\", subset_file\n return subset, subset_file",
"def generate_coco_dataset(args):\n\targs.data_root = Path(args.data_root)\n\targs.save_root = Path(args.save_root)\n\targs.save_root.mkdir()\n\n\tgenerate_coco_dataset_sub(args, 'train', 'A', args.cat1)\n\tgenerate_coco_dataset_sub(args, 'train', 'B', args.cat2)\n\tgenerate_coco_dataset_sub(args, 'val', 'A', args.cat1)\n\tgenerate_coco_dataset_sub(args, 'val', 'B', args.cat2)",
"def load_subcolumns_from_netcdf(self, file_name):\n my_file = xr.open_dataset(file_name)\n self.ds = xr.merge([self.ds, my_file])\n my_file.close()",
"def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))",
"def download_coco_dataset():\n # Create file structure\n os.makedirs(os.path.join(\"data\", \"coco\", \"train\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"dev\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"test\"), exist_ok=True)\n # Download the train, dev and test datasets\n print(\"Downloading COCO dataset.\")\n url = \"http://images.cocodataset.org/zips/train2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"train2014.zip\"))\n url = \"http://images.cocodataset.org/zips/val2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"val2014.zip\"))\n url = \"http://images.cocodataset.org/zips/test2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"test2014.zip\"))\n print(\"Done downloading COCO dataset.\")\n # Unzip the files\n print(\"Extracting COCO dataset.\")\n # Extract Train dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"train2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"train2014\"),\n os.path.join(\"data\", \"coco\", \"train\", \"dummy\"),\n )\n # Extract Validation dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"val2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"val2014\"),\n os.path.join(\"data\", \"coco\", \"dev\", \"dummy\"),\n )\n # Extract Test dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"test2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"test2014\"),\n os.path.join(\"data\", \"coco\", \"test\", \"dummy\"),\n )\n print(\"Done extracting COCO dataset.\")",
"def load_subset_data(data_path, subset_name, timesteps):\n\n selected_subset_paths = subset_paths(os.path.join(data_path, subset_name))\n selected_subset_arrays = subset_arrays(selected_subset_paths)\n\n load_selected_timesteps = lambda x: np.load(x)\n\n if timesteps is not None:\n selected_subset_timesteps = load_selected_timesteps(timesteps)\n else:\n selected_subset_timesteps = np.array(range(int(np.sum(selected_subset_arrays[\"seq_lens\"]))))\n\n return selected_subset_arrays, selected_subset_timesteps",
"def get_coco_dataset():\n ds = AttrDict()\n # classes = [\n # '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n # 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n # 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n # 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n # 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n # 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n # 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n # 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n # 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n # 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n # 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n # 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n # 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n # ]\n # classes = ['__background__', 'lane']\n #\n base_classes = [\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n ]\n classes = ['__background__',\n 'guard rail',\n # 'car',\n 'dashed',\n 'solid',\n 'solid solid',\n 'dashed dashed',\n 'dashed-solid',\n 'solid-dashed',\n 'yellow dashed',\n 'yellow solid',\n 'yellow solid solid',\n 'yellow dashed dashed',\n 'yellow dashed-solid',\n 'yellow solid-dashed',\n 'boundary',\n 'fork_line',\n 'fork_edge',\n 'arrow_s',\n 'arrow_r',\n 'arrow_l',\n 'arrow_lr',\n 'arrow_inclined_r',\n 'arrow_r_s',\n 'arrow_l_s',\n 'sidewalk',\n 'handrail'\n ]\n base_classes.extend(classes[1:])\n classes = base_classes\n ds.classes = {i: name for i, name in enumerate(classes)}\n return ds",
"def F_subset_S5PHCHO(self,path): \n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_OFFL_L2__HCHO___'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n maxsza = self.maxsza\n maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n # not sure about cloud fraction\n # the time_utc string is empty?! why are you doing this to the user!\n data_fields = ['/PRODUCT/SUPPORT_DATA/INPUT_DATA/cloud_fraction_crb',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_albedo',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time',\\\n '/PRODUCT/delta_time',\\\n '/PRODUCT/formaldehyde_tropospheric_vertical_column',\\\n '/PRODUCT/formaldehyde_tropospheric_vertical_column_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['cloud_fraction','latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','albedo','latc','lonc','qa_value','time','delta_time',\\\n 'column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n f1 = outp_nc['SolarZenithAngle'] <= maxsza\n f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f1 & f2 & f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])",
"def load_dataset_CIFAR10(batch_size=64, download=True, transform=None, ignore_label=False, num_workers=4):\n data_dir = os.path.join('.', 'data','CIFAR10')\n \n trainset = torchvision.datasets.CIFAR10(root=data_dir, train=True,\n download=download,\n transform=transform)\n \n testset = torchvision.datasets.CIFAR10(root=data_dir, train=False,\n download=download,\n transform=transform)\n \n if ignore_label:\n trainset = IgnoreLabelDataset(trainset)\n testset = IgnoreLabelDataset(testset)\n \n trainloader, testloader = get_data_loader(batch_size, trainset, testset, num_workers=num_workers)\n \n return trainloader, testloader",
"def load_data ():\n \n data_set = datasets.load_breast_cancer()\n return data_set",
"def load_dataset():\n try:\n data_path = ROOT_PATH.joinpath('data', 'Complete_TAVG_Daily_LatLong1_1880.nc')\n ds = xarray.open_dataset(data_path)\n return ds\n except FileNotFoundError:\n raise",
"def load_huc_data(hucfilename,geosubset):\n \n # use the NC_Reader class to facilitate subseting from lat/lon data\n hucreader=NC_Reader(None,filelist=[hucfilename],\n geo_subset=geosubset,\n readvars=[\"data\"],ntimes=1,\n latvar=\"lat\",lonvar=\"lon\")\n # just read the first element from the reader\n hucdata=hucreader.next()[0]\n # close the reader now that we are done with it. \n hucreader.close()\n return hucdata",
"def test_cooccurrence_sample_loader_interface(self):\n torch.manual_seed(3141592)\n batch_size = 3\n sector_factor = 3\n num_batches = 10\n\n cooccurrence_path = os.path.join(\n h.CONSTANTS.TEST_DIR, 'test-sample-loader')\n sampler = h.loader.GPUSampleLoader(\n cooccurrence_path, batch_size=batch_size, verbose=False)\n\n # Figure out the number of batches we expect, given the total number\n # of cooccurrence counts in the data, and the chosen batch_size.\n Nxx_data, I, J, Nx, Nxt = h.cooccurrence.CooccurrenceSector.load_coo(\n cooccurrence_path, sector_factor, verbose=False)\n\n # Confirm the shape, number, and dtype of batches.\n num_batches_seen = 0\n for batch_num in range(num_batches):\n for batch_id, batch_data in sampler:\n num_batches_seen += 1\n self.assertEqual(batch_id.shape, (batch_size*2, 2))\n self.assertEqual(batch_data, None)\n self.assertEqual(batch_id.dtype, torch.LongTensor.dtype)\n self.assertEqual(num_batches_seen, num_batches)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates this store's current state with incoming data from the network. data should be a mapping containing 'metacontacts', 'order', and 'info' structures (see comment at top of file) | def update_data(self, data):
rebuild = False
# This method needs to substitute some defaultdicts for the normal
# dictionaries that come back from the server.
# Metacontact information
#if data['metacontacts']
mc_dict = data.get('metacontacts', {})
if not isinstance(mc_dict, dict):
log.critical('invalid metacontacts dictionary')
mc_dict = {}
# Contact information like SMS numbers and email addresses.
self.info = defaultdict(dict)
si = self.info
if 'info' in data:
for (k, v) in data['info'].iteritems():
if isinstance(k, str):
cmpk = k.decode('utf8')
else:
cmpk = k
if not isinstance(cmpk, unicode):
continue
if cmpk.startswith('Meta') or any((cmpk.endswith('_' + prot)
for prot in protocols.iterkeys())):
if any(v.values()):
si[k] = v
for c, v in si.iteritems():
for attr in ('email', 'sms'):
if attr in v:
self.contact_info_changed(c, attr, v[attr])
self.metacontacts = MetaContactManager(self, mc_dict)
if hasattr(self, 'new_sorter'):
on_thread('sorter').call(self.new_sorter.removeAllContacts)
rebuild = True
# Manual ordering of groups
try:
self.order = deepcopy(data['order'])
self.order['groups'] = list(oset(self.order['groups']))
contacts = self._filtered_contacts()
self.order['contacts'] = defaultdict(list)
self.order['contacts'].update(contacts)
except Exception:
log.critical('error receiving order')
self._init_order()
# note: loading tofrom data from the network is deprecated. this data
# now goes out to disk. see save/load_local_data
if 'tofrom' in data and isinstance(data['tofrom'], dict) and \
'im' in data['tofrom'] and 'email' in data['tofrom']:
self.dispatch.set_tofrom(deepcopy(data['tofrom']))
if rebuild:
self.rebuild()
self.update_order() | [
"def update(self, data):\n\n\t\tself.data = data\n\t\tself.last_update = time.time()",
"def update_data():\n pass",
"def set_observed_data(self, data):\n self.state_space.data = data",
"def update_data(self):\n pass",
"def _update_model(self, new_state, data):",
"def _update_data(self, data, update_original=False):\n self._data.update(dict((key, self._deserialize(key, value))\n for key, value in data.items()))\n\n if update_original:\n self._original_data = copy.deepcopy(self._data)",
"def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True",
"def update(self, data, classification):\n pass",
"def update_current_data(self, data):\n if self.current_data is not None:\n current_results = self.get_results()\n self._history.append((self.current_data, current_results))\n\n self.current_data = data",
"def mems(state, data):\n if state['event'] == 'pc-changed':\n state['mems'] = {}\n else:\n state['mems'][word(data[0])['value']] = value(data[1])",
"def update_state(self, data):\n aid = data['aid']\n logger.debug(\"Got update from accessory with aid: %d\", aid)\n accessory = self.accessories[aid]\n service_data = data['services']\n for service, char_data in service_data.items():\n service_obj = accessory.get_service(service)\n for char, value in char_data.items():\n char_obj = service_obj.get_characteristic(char)\n with self.update_lock:\n char_obj.set_value(value)",
"def update_state(self, data):\n connection = self.create_connection()\n cursor = connection.cursor()\n try:\n sql_str = \"\"\"UPDATE servers SET state=:state WHERE name=:name\"\"\"\n cursor.execute(sql_str, data)\n connection.commit()\n except sqlite3.DatabaseError as error:\n self.show_db_error(error)",
"def forward_data(self, new_state: XyzState, data: Any) -> None:\n self.send_data(new_state, data)\n\n return None",
"def data_in(self, data, **kwargs):\n action_type = data.get(\"t\", \"UNKNOWN\")\n\n if action_type == \"MESSAGE_CREATE\":\n # someone posted a message on Discord that the bot can see\n data = data[\"d\"]\n if data[\"author\"][\"id\"] == self.discord_id:\n # it's by the bot itself! disregard\n return\n message = data[\"content\"]\n channel_id = data[\"channel_id\"]\n keywords = {\"channel_id\": channel_id}\n if \"guild_id\" in data:\n # message received to a Discord channel\n keywords[\"type\"] = \"channel\"\n author = data[\"member\"][\"nick\"] or data[\"author\"][\"username\"]\n author_id = data[\"author\"][\"id\"]\n keywords[\"sender\"] = (author_id, author)\n keywords[\"guild_id\"] = data[\"guild_id\"]\n\n else:\n # message sent directly to the bot account via DM\n keywords[\"type\"] = \"direct\"\n author = data[\"author\"][\"username\"]\n author_id = data[\"author\"][\"id\"]\n keywords[\"sender\"] = (author_id, author)\n\n # pass the processed data to the server\n self.sessionhandler.data_in(self, bot_data_in=(message, keywords))\n\n elif action_type in (\"GUILD_CREATE\", \"GUILD_UPDATE\"):\n # we received the current status of a guild the bot is on; process relevant info\n data = data[\"d\"]\n keywords = {\"type\": \"guild\", \"guild_id\": data[\"id\"], \"guild_name\": data[\"name\"]}\n keywords[\"channels\"] = {\n chan[\"id\"]: {\"name\": chan[\"name\"], \"guild\": data[\"name\"]}\n for chan in data[\"channels\"]\n if chan[\"type\"] == 0\n }\n # send the possibly-updated guild and channel data to the server\n self.sessionhandler.data_in(self, bot_data_in=(\"\", keywords))\n\n elif \"DELETE\" in action_type:\n # deletes should possibly be handled separately to check for channel removal\n # for now, just ignore\n pass\n\n else:\n # send the data for any other action types on to the bot as-is for optional server-side handling\n keywords = {\"type\": action_type}\n keywords.update(data[\"d\"])\n self.sessionhandler.data_in(self, bot_data_in=(\"\", keywords))",
"async def update_arrived(self, data) -> None:\n\n update = b''.join(self.chunks)\n _data = pickle.loads(update)\n self.chunks = []\n\n if self.server_update is None:\n self.server_update = _data\n elif isinstance(self.server_update, list):\n self.server_update.append(_data)\n else:\n self.server_update = [self.server_update]\n self.server_update.append(_data)",
"async def do_update(self, id, data):\n\n old = await self.middleware.call(\n 'datastore.query',\n self._config.datastore,\n [('identifier', '=', id)],\n {'prefix': self._config.datastore_prefix, 'get': True}\n )\n old.pop('enabled', None)\n self._expand_enclosure(old)\n new = old.copy()\n new.update(data)\n\n if old['passwd'] != new['passwd'] and new['passwd']:\n new['passwd'] = await self.middleware.call(\n 'notifier.pwenc_encrypt',\n new['passwd']\n )\n\n for key in ['acousticlevel', 'advpowermgmt', 'hddstandby']:\n new[key] = new[key].title()\n\n self._compress_enclosure(new)\n\n await self.middleware.call(\n 'datastore.update',\n self._config.datastore,\n id,\n new,\n {'prefix': self._config.datastore_prefix}\n )\n\n if any(new[key] != old[key] for key in ['hddstandby', 'advpowermgmt', 'acousticlevel']):\n await self.middleware.call('disk.power_management', new['name'])\n\n if any(\n new[key] != old[key]\n for key in ['togglesmart', 'smartoptions', 'hddstandby', 'critical', 'difference', 'informational']\n ):\n\n if new['togglesmart']:\n await self.toggle_smart_on(new['name'])\n else:\n await self.toggle_smart_off(new['name'])\n\n await self.middleware.call('service.restart', 'collectd')\n await self._service_change('smartd', 'restart')\n\n updated_data = await self.query(\n [('identifier', '=', id)],\n {'get': True}\n )\n updated_data['id'] = id\n\n return updated_data",
"def set_data(self, data):\n self._model.set_data(data)\n self.__refresh()",
"def update_service_data(self, data, etag):\r\n self.service.name = data[\"service\"][\"name\"]\r\n self.service.etag = etag\r\n self.service.set_mirrors(data[\"service\"][\"filelocations\"])\r\n self.service.torrent = data[\"service\"].get(\"torrents\", \"\")\r\n self.service.save()",
"def store_data_ilm(self, data):\n with self.dataLock:\n data['date'] = convert_time(time.time())\n self.data['ILM'].update(data)\n # this needs to draw from the self.data['INSTRUMENT'] so that in case one of the keys did not show up,\n # since the command failed in the communication with the device, the last value is retained\n chan1 = 100 if self.data['ILM']['channel_1_level'] > 100 else self.data['ILM']['channel_1_level']\n chan2 = 100 if self.data['ILM']['channel_2_level'] > 100 else self.data['ILM']['channel_2_level']\n self.ILM_window.progressLevelHe.setValue(chan1)\n self.ILM_window.progressLevelN2.setValue(chan2)\n\n self.MainDock_HeLevel.setValue(chan1)\n self.MainDock_N2Level.setValue(chan2)\n\n # print(self.data['ILM']['channel_1_level'], self.data['ILM']['channel_2_level'])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Error in label only whitespace allowed, no tabs if checked label differs, raise an error | def CheckLabel(Line):
for i in Line:
if i == '\t': #can't detect leading tabs, stops at the first \
raise InputError(Line,"malformed input")
elif i != ' ':
break | [
"def test_label(self):\n nt = NewickTokenizer(newick=\"(a\\n'b',(b,c),(d,e));\")\n self.assertRaises(ValueError, nt.tokens)",
"def test_ignoring_label_to_string(self):\n code = \"\"\"nop\n label:\n nop\"\"\"\n interpreter = Interpreter(code)\n interpreter.compile()\n self.assertFalse(\"label\" in str(interpreter))",
"def test_is_valid_label_value_invalid_input():\n # test length violations\n assert not is_valid_label_value(value=f\"{'v' * 64}\") # value too long\n # test first character violations (not alphanum)\n assert not is_valid_label_value(value=\"-\")\n assert not is_valid_label_value(value=\"-a\")\n assert not is_valid_label_value(value=\".b\")\n assert not is_valid_label_value(value=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_value(value=\"a-\")\n assert not is_valid_label_value(value=\"b.\")\n assert not is_valid_label_value(value=\"c \")\n assert not is_valid_label_value(value=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_value(value=\"a$$a\")\n assert not is_valid_label_value(value=\"b b\")",
"def test_empty_label_warning(self):\n\n with self.assertLogs(_LOGGER) as logs:\n st.checkbox(label=\"\")\n\n self.assertIn(\n \"`label` got an empty value. This is discouraged for accessibility reasons\",\n logs.records[0].msg,\n )",
"def _is_label(self, words):\n if words[0] == 'label':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_LABEL command.\".format(self._file_line))\n return True\n else:\n return False",
"def test_is_valid_label_value_valid_input():\n # test valid label values\n assert is_valid_label_value(value=None)\n assert is_valid_label_value(value=\"\")\n assert is_valid_label_value(value=\"l0L\")\n assert is_valid_label_value(value=\"L-l\")\n assert is_valid_label_value(value=\"L.L\")\n assert is_valid_label_value(value=\"l_4\")\n assert is_valid_label_value(value=\"4-you\")\n assert is_valid_label_value(value=\"You.2\")",
"def check_label(self):\n if self.p.endswith(':'):\n self.p = self.p[:-1]\n self.new_symbol() # check new symbol\n return True\n return False",
"def test_whitespace(self):\n self.assertRaises(ParseException, self.flag.parseString, ' ')",
"def test_label_with_non_string():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n LINE bigfoot\n Y classification COUNT\n SPLIT BY classification\n X date BY YEAR LABEL 1.2\n \"\"\"\n\n # TODO Make this exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)",
"def test_label_cannot_be_converted_to_string(self):\n\n class NoStr:\n def __str__(self) -> str:\n raise NotImplementedError\n\n with pytest.raises(TypeError, match=\"The given label\"):\n State(\"water\", label=NoStr())",
"def validateLabel(cls, label: str, labeling_version: int) -> bool:\r\n\r\n return len(label.split('.')) in [2, 3]",
"def test_label(self):\n xs = t.Label(t.Exactly(\"x\"), 'CustomLabel')\n self.assertEqual(writePython(xs),\n dd(\"\"\"\n def _G_label_1():\n _G_exactly_2, lastError = self.exactly('x')\n self.considerError(lastError, None)\n return (_G_exactly_2, self.currentError)\n _G_label_3, lastError = self.label(_G_label_1, \"CustomLabel\")\n self.considerError(lastError, None)\n _G_label_3\n \"\"\"))",
"def test_label_not_in_config(self):\n with self.assertRaisesRegex(\n ValueError, 'The config \\'Label\\' field should contain the positive'\n ' class label.'):\n self.ci.run_with_metadata(\n indexed_inputs=self.dataset.indexed_examples,\n model=self.model,\n dataset=self.dataset,\n )",
"def test_label(self):\n k, v = self.sym.parse_label('2 3 12 0 1 1 1 0 foobar')\n # values of 1 aren't used by parser\n self.assertEqual(k, 'label')\n self.assertEqual(v.type, 'label')\n self.assertEqual((v.x, v.y), (2, 3))\n self.assertEqual(v.text, 'foobar')\n self.assertEqual(v.align, 'left')\n self.assertEqual(v._rotation, 0)",
"def check_valid_class_label(s):\n if type(s)==str:\n if re.search(r'[\\0-\\x20]',s):\n raise BadClassLabel(s)\n elif type(s)==str:\n if re.search(r'[^\\x21-\\x7e]',s):\n raise BadClassLabel(s)\n else:\n raise BadClassLabel(s)",
"def validate_label(label):\n try:\n session = persistent_mgr.create_database_session()\n devices_info = persistent_mgr.get_all_devices(session)\n for device in devices_info:\n if device.label == label:\n error_message = _(\"The label '%s' that is specified is already being used by \"\n \"another device or virtual machine in the network.\") % (label)\n return 101, error_message\n return 0, \"\"\n finally:\n session.close()",
"def checkLabel_valid(self, instance):\n entered_label = str(instance.text.strip())\n address_list = kivy_helper_search.search_sql(folder=\"addressbook\")\n addr_labels = [labels[0] for labels in address_list]\n add_dict = dict(address_list)\n if self.address and entered_label in addr_labels \\\n and self.address != add_dict[entered_label]:\n self.ids.add_label.error = True\n self.ids.add_label.helper_text = 'label name already exists.'\n elif entered_label:\n self.ids.add_label.error = False\n else:\n self.ids.add_label.error = True\n self.ids.add_label.helper_text = 'This field is required'",
"def validate_label(self, label):\n if label != self.label:\n raise KeypointsSchemaError(\n \"Label '%s' does not match keypoints schema\" % label\n )",
"def test_issue_remove_label(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
parsing a given text file containing labels and sequences load file, tidy it, process each line in the file return the labels and sequences as list[tuple(string,string)] | def ParseSeqFile(FilePath):
SeqFile = rSeqFile(FilePath)
TidyFile = TidyLines(SeqFile)
result = []
for line in TidyFile:
t = ( ProcessLine(line) )
result.append(t)
return(result) | [
"def parse(self):\n\n with open(self.fasta_file) as file:\n content = file.readlines()\n\n sequences = []\n sequence_ids = []\n sequence = []\n for line in content:\n if line.startswith('>'):\n sequence_ids.append(line.strip())\n if len(sequence) != 0:\n sequences.append(''.join(sequence))\n if len(''.join(sequence)) > MAX_SEQUENCE_LENGTH:\n print WARNING_SEQUENCE_LENTH_EXCEEDED\n sequence = []\n elif line.startswith(\"A\") or line.startswith(\"T\") or \\\n line.startswith(\"C\") or line.startswith(\"G\"):\n sequence.append(line.strip())\n sequences.append(''.join(sequence))\n if len(''.join(sequence)) > MAX_SEQUENCE_LENGTH:\n print WARNING_SEQUENCE_LENTH_EXCEEDED\n\n if len(sequences) > MAX_SEQUENCES:\n print WARNING_MAX_SEQUENCES_EXCEEDED\n\n return sequences, sequence_ids",
"def _read_sequences(filename):\n line, header = \" \", None\n with open(filename) as handle:\n # Find header\n num_sequences = num_bases = 0\n while line:\n line = handle.readline()\n if line.strip():\n header = line\n num_sequences, num_bases = map(int, line.split())\n break\n\n names = [None for _ in xrange(num_sequences)]\n sequences = [[] for _ in xrange(num_sequences)]\n\n line_num = 0\n while line:\n line = handle.readline()\n line_strip = line.strip()\n if line_strip:\n # The first N sequences are expected to contain sample names\n index = line_num % num_sequences\n if line_num < num_sequences:\n name, line_strip = line_strip.split(None, 1)\n names[index] = name\n\n sequences[index].extend(line_strip.split())\n line_num += 1\n\n if len(sequences) != num_sequences:\n message = (\"Expected %i sequences, but found %i in PHYLIP file:\\n\"\n \" Filename = %r\") % (num_sequences,\n len(sequences),\n filename)\n raise NodeError(message)\n\n for (index, fragments) in enumerate(sequences):\n sequences[index] = \"\".join(fragments)\n if len(sequences[index]) != num_bases:\n message = (\"Expected %ibp sequences, found %ibp sequence for %r\\n\"\n \" Filename = %r\") % (num_bases,\n len(sequences[index]),\n names[index],\n filename)\n raise NodeError(message)\n\n return header, names, sequences",
"def readSequences(lines):\n seqs = []\n label = None\n seq_lines = []\n for line in lines:\n line = line.strip() # strip off white space\n if not line: # skip empty lines\n continue\n if line.startswith(';'): # ignore comment lines\n continue\n # check for start of next sequence:\n if line.startswith('>'): # label line\n # first, store the previous sequence if we had one:\n if seq_lines:\n seqs.append(Sequence(label, ''.join(seq_lines)))\n seq_lines = []\n # get the label (name) for the next sequence\n label = line[1:].strip()\n else:\n # collect all lines with sequence information for this sequence:\n seq_lines.append(line)\n # take care of the last sequence in the file\n seqs.append(Sequence(label, ''.join(seq_lines)))\n return seqs",
"def extract_labels(Dir_path, file_name):\n\n file_path = os.path.join(Dir_path,file_name+\".fasta\")\n file_in = open(file_path,'r')\n # test_ = file_in.readlines()\n file_iter = iter(file_in) # iterator\n labels = []\n while True:\n try:\n line = file_iter.next()\n if line[0] == \">\":\n line = line.replace('\\n','')\n line = line.split('_')\n label = int(line[2])\n labels.append(label)\n except StopIteration:\n break\n return(labels)",
"def parse_sequence_file(file_path):\n\n in_buff = open(file_path)\n in_content = in_buff.readlines()\n\n counter = 0\n seq_dict = {}\n\n for line in in_content:\n\n # remove tailing new line\n line = line.strip()\n\n if counter % 3 == 0:\n\n name = line\n\n elif counter % 3 == 1:\n\n sequence = line\n\n elif counter % 3 == 2:\n\n ss_pred = line\n\n if 'U' not in ss_pred:\n seq_dict[name] = (sequence, ss_pred)\n\n counter += 1\n\n return seq_dict",
"def ParseSeqFile(File):\r\n #input checks\r\n if os.path.isfile(File) != True:\r\n raise TypeError ('malformed input')\r\n \r\n returnlist = list()\r\n try:\r\n file = open(File,'r')\r\n except:\r\n return 'malformed input'\r\n \r\n #converting file to wished output\r\n for line in file:\r\n if line[0] != \">\" and len(line)!=1:\r\n raise ValueError ('malformed input')\r\n #first check if the line follows the wished format\r\n if line[0] == \">\":\r\n #creating a list and removeing unwanted > and \\n\r\n partslist = line.strip(\">\").strip(\"\\n\").split()\r\n #check partslist size if 1 malformed input\r\n if partslist == []:\r\n raise ValueError ('malformed input')\r\n #catching label and format to capital letters\r\n label = partslist[0].upper()\r\n #generating dna and format to captial letters\r\n dnaparts = partslist[1:]\r\n dna = ''.join(dnaparts).upper()\r\n returnlist.append((label, dna))\r\n else:\r\n raise ValueError ('malformed input')\r\n #check if DNA is acual DNA\r\n if is_dna(dna) != True:\r\n raise TypeError ('malformed input')\r\n \r\n \r\n #checking output if it is a empty list raise ValueError\r\n if returnlist == []:\r\n raise ValueError ('malformed input')\r\n return (returnlist)",
"def fasta_as_tuples(file):\n\n for defline, group in itertools.groupby(file, lambda x: x[0] == '>'):\n if defline:\n line = next(group)\n id = line[1:].split()[0]\n else:\n sequence = ''.join(line.strip() for line in group)\n yield (id, sequence)",
"def _read_txt(file_path):\n translation_pairs = []\n with file_path.open() as f:\n for line in f:\n translation_pairs.append(\n evaluation.TranslationPair(source=None, translation=line.strip())\n )\n return translation_pairs",
"def read_labels_scrs_format_in(options, parameters):\n input_file = open(parameters['InputFile'], 'r')\n text_line = input_file.readline().strip()\n # skip comments\n while text_line.startswith(\"//\") or text_line.startswith(\";\"):\n text_line = input_file.readline()\n # labels are in the first non-commented line\n labels = text_line.split() \n # binary reads are in the rest of the file\n # one read is in one file\n i = 1\n text_line=input_file.readline().strip()\n reads = [];\n while text_line!=\"\":\n # skip comments\n if text_line.startswith(\"//\") or text_line.startswith(\";\"):\n text_line = input_file.readline()\n continue\n bit_line = text_line.replace(\" \", \"\")\n bit_line = bit_line.replace(\"?\", \"0\")\n ba = BitArray(bin = bit_line)\n unknown_line = text_line.replace(\" \", \"\")\n unknown_line = unknown_line.replace(\"1\", \"0\")\n unknown_line = unknown_line.replace(\"?\", \"1\")\n ua = BitArray(bin = unknown_line)\n readElem = ReadElement(i, ba, ua)\n reads.append(readElem)\n text_line=input_file.readline().strip()\n i= i+1\n return(labels, reads)",
"def read_label_file(fn):\n utt = get_utt(fn)\n with open(fn, 'rb') as f:\n segs = []\n for n, line in enumerate(f):\n onset, offset, label = line.decode('utf-8').strip().split()\n segment_id = '{}_{}'.format(\n utt, str(n).zfill(4))\n segs.append(Segment(segment_id, onset, offset, label))\n return segs",
"def recover_original_data(data_path, sequence_pairs):\n # initialize variables\n num_labels = 0\n num_sequences = 0\n num_correct_labels = 0\n num_correct_sequences = 0\n with open(data_path, \"r\") as input_file:\n # sequence of workds in each sentence\n word_sequence = []\n # gold/original labels for each word in each sentence\n gold_label_sequence = []\n # prediction labels for each word in each sentence\n pred_label_sequence = []\n for line in input_file:\n # split line into tokens\n tokens = line.split()\n # check if line is not empty\n if tokens:\n # a label exists\n num_labels += 1\n # the word is the first token\n word = tokens[0]\n # the original label is the second token\n gold_label = tokens[1]\n # the prediction label is the third token\n pred_label = tokens[2]\n # check if prediction equals to real label\n if pred_label == gold_label:\n num_correct_labels += 1\n # build the sequence of words, labels, and predictions for each sentence\n word_sequence.append(word)\n gold_label_sequence.append(gold_label)\n pred_label_sequence.append(pred_label)\n # line is empty\n else:\n # count number of sequences (=sentences)\n num_sequences += 1\n # check if word_sequence is empty\n if word_sequence:\n sequence_pairs.append([word_sequence, gold_label_sequence])\n # check if we predicted correctly the whole sequence\n if pred_label_sequence == gold_label_sequence:\n num_correct_sequences += 1\n # flush lists for next sequence\n word_sequence = []\n gold_label_sequence = []\n pred_label_sequence = []\n # here is the case where the file does not end with an empty line\n # repeat the process for the last sequence of the file\n if word_sequence:\n num_sequences += 1\n sequence_pairs.append([word_sequence, gold_label_sequence])\n if pred_label_sequence == gold_label_sequence:\n num_correct_sequences += 1\n # calculate per instance (=word) accuracy and per sequence (=sentence) accuracy\n per_instance_accuracy = float(num_correct_labels) / num_labels * 100\n per_sequence_accuracy = float(num_correct_sequences) / num_sequences * 100\n return per_instance_accuracy, per_sequence_accuracy",
"def convert_bmes_to_sequence_tagging(source_file: str, output_file: str):\n # 1. read all lines and split it to sentences\n sentences: List[str] = []\n labels: List[str] = []\n with open(source_file, 'r+', encoding='utf-8') as f:\n\n # 1. 一个文件中的token和labels\n sentence_tokens, sentence_labels = [], []\n for line in f:\n line = line.strip()\n if not line:\n sentences.append(sentence_tokens)\n labels.append(sentence_labels)\n sentence_tokens, sentence_labels = [], []\n else:\n line_tokens, line_labels = read_line(line)\n\n sentence_tokens.extend(line_tokens)\n sentence_labels.extend(line_labels)\n\n assert len(sentences) == len(labels)\n \n # 2. write tokens and labels to the file\n with open(output_file, 'w+', encoding='utf-8') as f:\n\n for index in range(len(sentences)):\n tokens, sentence_labels = sentences[index], labels[index]\n\n items = [\n '###'.join([tokens[i], sentence_labels[i]]) for i in range(len(tokens))]\n\n f.write('\\t'.join(items) + '\\n')",
"def read_input_file(filename: str):\n sentences = []\n sentence = []\n label2idx = {'O': 0}\n label_idx = 1\n with open(filename, 'r', encoding='utf-8') as file:\n for line in file:\n line = line.strip()\n if line == \"\":\n if len(sentence) > 0:\n sentences.append(sentence)\n sentence = []\n continue\n splits = line.split('\\t')\n word = splits[0]\n label = splits[1]\n sentence.append((word, label))\n if label not in label2idx.keys():\n label2idx[label] = label_idx\n label_idx += 1\n if len(sentence) > 0:\n sentences.append(sentence)\n return sentences, label2idx",
"def load_file(filename):\n\tlabels = []\n\tdocs = []\n\n\twith open(filename) as f:\n\t\tfor line in f:\n\t\t\tcontent = line.split('\\t')\n\n\t\t\tif len(content) > 2:\n\t\t\t\tprint('incorrect read')\n\t\t\t\texit()\n\n\t\t\tif len(content[1]) == 0: continue\n\n\t\t\tdocs.append(str(content[1]).strip('\\r').strip('\\n').strip('\\r\\n'))\n\t\t\tlabels.append(content[0])\n\n\treturn docs, labels",
"def parse_file(self, file_path) -> list:\n data = []\n with open(file_path, 'rb') as f:\n lines = pickle.load(f)\n for line in lines:\n input, output = line\n if input.strip() == \"\" or output.strip() == \"\":\n continue\n input_len = len(input.split())\n output_len = len(output.split())\n if input_len > 50 or output_len > 50:\n continue\n data_item = Text2TextDataItem(input_text=input, output_text=output, tokenizer=self.tokenizer,\n share_vocab=self.share_vocab)\n data.append(data_item)\n return data",
"def _preprocess(self, txt_seq):\n input = []\n label = []\n punc = \" \"\n for token in txt_seq.split():\n if token in self.punc2id:\n punc = token\n else:\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n label.append(self.punc2id[punc])\n punc = \" \"\n input.append(self.word2id[\"<END>\"])\n label.append(self.punc2id[punc])\n input = torch.LongTensor(input)\n label = torch.LongTensor(label)\n # input = np.array(input)\n # label = np.array(label)\n return input, label",
"def parse(self):\n count = [] #count for trainset_size\n with open(self.file) as f:\n for line in f:\n data = line.split(\" \")[0]\n filename = data[:-1]\n id = data[-1:]\n if (filename not in count):\n count.append(filename)\n\n acid = \"\"\n structure = \"\"\n with open(self.directory+\"/\"+filename+\".dssp\") as dssp:\n for i in range(28): #skip lines we don't need\n next(dssp)\n for line in dssp:\n if (line[9] != \" \" and line[10] == \" \" and line[11] == id and line[13] not in (\"*\",\"!\",\"B\",\"Z\",\"X\")):\n #amino acid sequence\n if (line[13].islower()):\n acid += \"C\"\n else:\n acid += line[13]\n\n #sequence of the structure\n if (line[16] in (\"H\",\"G\",\"I\")):\n structure += \"H\"\n elif (line[16] in (\"E\",\"B\")):\n structure += \"E\"\n else:\n structure += \"C\"\n\n if (len(count) > self.trainset_size):\n self.testset.append((acid,structure))\n else:\n self.trainset.append((acid,structure))",
"def get_sequence_list(files):\n sequence_list = []\n for i in range(0,len(files)):\n with open(files[i], \"r\") as fasta_file:\n fasta_seq_all = fasta_file.read()\n \n\n fasta_seq_all = fasta_seq_all.split(\">\")\n\n for j in range(0, len(fasta_seq_all)):\n fasta_seq = fasta_seq_all[j]\n if len(fasta_seq) > 2:\n \n fasta_seq = fasta_seq.splitlines()\n label = _format_label(files[i], fasta_seq.pop(0))\n format_fasta_seq = []\n for k in range(0,len(fasta_seq)):\n try:\n if fasta_seq[k][0] == \"\\n\":\n break\n \n format_fasta_seq.append(fasta_seq[k])\n except:\n break\n format_fasta_seq = \"\".join(format_fasta_seq)\n format_fasta_seq.strip()\n if len(format_fasta_seq) > 2:\n sequence_list.append(Sequence(format_fasta_seq, label))\n \n return sequence_list",
"def load_FASTA(filename):\n infile = open(filename)\n full_entries = list(SeqIO.parse(infile, 'fasta'))\n sequences = [str(entry.seq) for entry in full_entries]\n names = [str(entry.id) for entry in full_entries]\n\n return sequences, names"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return 'p1' if the current player is Player 1, and 'p2' if the current player is Player 2. | def get_current_player_name(self) -> str:
if self.p1_turn:
return 'p1'
return 'p2' | [
"def opponent(self):\n if self.player() == 'p1':\n return 'p2'\n else:\n return 'p1'",
"def other_player(player):\n if player == PLAYER_ONE:\n return PLAYER_TWO\n elif player == PLAYER_TWO:\n return PLAYER_ONE\n else:\n return None",
"def get_current_player_name(self):\n if self.is_p1_turn:\n return \"p1\"\n return \"p2\"",
"def get_current_player(player_one_turn: bool) -> str:\n if player_one_turn:\n return P1\n else:\n return P2\n # Complete this function.",
"def get_current_player_name(self) -> str:\n if self.is_p1_turn:\n return \"p1\"\n return \"p2\"",
"def get_current_player(player_one_turn: bool) -> str:\n\n # Complete this function.\n if player_one_turn is True: \n return P1\n \n return P2",
"def get_current_player(player_one_turn):\n\n if player_one_turn == True:\n return \"Player One\"\n else:\n return \"Player Two\"",
"def get_player(self, playername):\n if playername == self._player_1_name:\n return self._player_1\n if playername == self._player_2_name:\n return self._player_2",
"def get_current_player(player_one_turn):\n \n # Get appropriate player whether the parameter is True or False\n if player_one_turn == True:\n return 'Player One'\n return 'Player Two'",
"def otherPlayer(cls, player):\n return 0 if player == 1 else 1",
"def get_player(self, number):\n num = int(number)\n assert (num in [1, 2])\n return self.player_1 if num == 1 else self.player_2",
"def alternate_player(current_player):\r\n if (current_player == provided.PLAYERX):\r\n return provided.PLAYERO\r\n else:\r\n return provided.PLAYERX",
"def get_enemy_as_p1a_or_p2a(self):\n cleaned_data = self.initial_turn.replace(\"\\\"\", \"\").replace(\"\\\\\", \" \")\n index = cleaned_data.find('|player|')\n data = cleaned_data[index:]\n data = data.replace(\"|\", \" \").split()\n for i in range(0, len(data)):\n entry = data[i]\n if entry == \"player\":\n if data[i + 1] == \"p2\":\n if data[i + 2] == \"csc665\":\n return \"p1a:\"\n else:\n return \"p2a:\"",
"def determine_human_players(p1, p2):\n\n p1_human = False\n p2_human = False\n if isinstance(p1,Agents.Human):\n p1_human = True\n if isinstance(p2, Agents.Human):\n p2_human = True\n\n return p1_human, p2_human",
"def player_css_class(p1, p2, cp=None):\n return (\"self_player\" if p1 is p2 else \"other_player\") + (\n \" current_player\" if p1 is cp else \"\")",
"def player_or_bye(p):\n if p == 'BYE':\n return p\n return p.player",
"def next_player(current_player=\"None\"):\n if current_player == \"None\":\n return random.choice([\"Player 1\", \"Player 2\"])\n elif current_player == \"Player 1\":\n return \"Player 2\"\n else:\n return \"Player 1\"",
"def __negated_player(self, player):\n\t\treturn self.PLAYER2 if self.current_player == self.PLAYER1 else self.PLAYER1",
"def other_player(self):\r\n return self.get_others_in_group()[0]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return whether move is a valid move for this GameState. | def is_valid_move(self, move: Any) -> bool:
return move in self.get_possible_moves() | [
"def has_valid_move(self) -> bool:\r\n\t\tis_valid_move = False\r\n\t\tfor row in range(self._othello_game.get_rows()):\r\n\t\t\tif is_valid_move:\r\n\t\t\t\tbreak\r\n\r\n\t\t\tfor col in range(self._othello_game.get_cols()):\r\n\r\n\t\t\t\tif len(self._othello_game.placement_is_valid(row, col)) > 0: #if the move is valid\r\n\t\t\t\t\tis_valid_move = True\r\n\t\t\t\t\tbreak\r\n\r\n\t\treturn is_valid_move",
"def is_valid_move(self, move: Any) -> bool:\n raise NotImplementedError",
"def is_valid_move(self, move):\n\t\tpass",
"def _is_valid_move(state, move):\n (row, col) = move\n rows, cols = len(state), len(state[0])\n if (row >= rows or row < 0 or\n col >= cols or col < 0 or\n state[row][col] is not None):\n return False\n else:\n return True",
"def has_move(self) -> bool:\n\n return len(self.moves) != 0",
"def valid_bool(self):\n return bool(self.piece.validate_move(self.board, self))",
"def valid_move(self, player, move):\n return (True)",
"def __check_valid_move(self, row, column):\n # To make a valid move, the current_state cannot be a \"DRAW\", \"X_WON\", or \"O_WON\"\n if self.current_state == \"UNFINISHED\":\n # The board has three rows and three columns\n if 0 <= row <= 2 and 0 <= column <= 2:\n # The attempted space cannot already contain a move by \"x\" or \"o\"\n if self.board[row][column] == \"\":\n return True\n return False",
"def is_valid_move(self, move_to_make: int) -> bool:\n return move_to_make in self.valid_moves",
"def isValidMove(self, move: Move) -> bool:\n # TODO: How do we determine the move type?\n # Some form of duck typing?\n minigame_move_classes = {\n \"BuyPrivateCompany\": \"BuyPrivateCompanyMove\",\n \"BiddingForPrivateCompany\": \"BuyPrivateCompanyMove\",\n }\n return minigame_move_classes.get(self.minigame_class) == move.__class__.__name__",
"def legal_move(self, move, state = None):\n if state is None:\n state = copy(self.state)\n else:\n state = copy(state)\n return state[move // state.shape[0], move % state.shape[0]] == 0",
"def valid_move(self, player, move):\n if self.rounds < len(self.players):\n if ((False in [(self.board).in_bounds(pt) for pt in move])\n or (self.board).overlap(move)\n or not (True in [(pt in player.corners) for pt in move])):\n return (False)\n else:\n return (True)\n\n elif ((False in [(self.board).in_bounds(pt) for pt in move])\n or (self.board).overlap(move)\n or (self.board).adj(player, move)\n or not (self.board).corner(player, move)):\n return (False)\n\n else:\n return (True)",
"def move_valid(move):\n return True",
"def _valid_move(self, row, col):\n return row >= 0 and row < self.num_rows() \\\n and col >= 0 and col < self.num_cols() \\\n and self._maze_cells[row, col] is None",
"def is_valid_move(self, position: Point) -> bool:\n\t\tif self.tiles[position.x][position.y] == 0:\n\t\t\treturn True\n\t\treturn False",
"def validate_move(self, move_str) -> bool:\n move_str = move_str.lower().strip()\n return move_str in self._valid_moves",
"def is_valid_move(self, x, y, player):\n if self.__board[y][x] is BoardPiece.EMPTY:\n if player.piece == self.turn:\n return True\n return False",
"def has_valid_move(self, cur_square, board):\n coords = cur_square.coords\n neighbor_list = [tuple(map(sum, zip(coords, offset))) for offset in self._offsets]\n return self.has_valid_move_in_list(coords, neighbor_list, board)",
"def is_valid_move(move, board, row):\n\n # See if the current move choice can even go on the board\n if move & board[row] != 0:\n return False\n else:\n board_size = len(board)\n mvs = invalid_spots(move, row, board_size)\n for i in range(board_size):\n if mvs[i] & board[i] != 0:\n return False\n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an estimate in interval [LOSE, WIN] of best outcome the current player can guarantee from state self. | def rough_outcome(self) -> float:
# HUYNH YOU PRICK WHY THE FUCK DO YOU MAKE US WRITE THIS SHIT EVEN IT'S NOT USED ANYWHERE
# pick move based on this may not be optimal but better than random
# return 1 if win immediately
# return -1 if all states reachable will result the other player win
# return 0 if otherwise ??? what the fuck does this mean
# look two states forward
pass | [
"def rough_outcome(self) -> float:\n if is_win(self):\n return 1\n elif is_lose(self):\n return -1\n return 0",
"def rough_outcome_strategy(game: 'Game') -> Any:\n current_state = game.current_state\n best_move = None\n best_outcome = -2 # Temporarily -- just so we can replace this easily later\n \n # Get the move that results in the lowest rough_outcome for the opponent\n for move in current_state.get_possible_moves():\n new_state = current_state.make_move(move)\n \n # We multiply the below by -1 since a state that's bad for the opponent\n # is good for us.\n guessed_score = new_state.rough_outcome() * -1\n if guessed_score > best_outcome:\n best_outcome = guessed_score\n best_move = move\n \n # Return the move that resulted in the best rough_outcome\n return best_move",
"def rough_outcome_strategy(game: Any) -> Any:\n current_state = game.current_state\n best_move = None\n best_outcome = -2 # Temporarily -- just so we can replace this easily later\n \n # Get the move that results in the lowest rough_outcome for the opponent\n for move in current_state.get_possible_moves():\n new_state = current_state.make_move(move)\n \n # We multiply the below by -1 since a state that's bad for the opponent\n # is good for us.\n guessed_score = new_state.rough_outcome() * -1\n if guessed_score > best_outcome:\n best_outcome = guessed_score\n best_move = move\n \n # Return the move that resulted in the best rough_outcome\n return best_move",
"def rough_outcome_strategy(game: Any) -> Any:\n current_state = game.current_state\n best_move = None\n best_outcome = -2 # Temporarily -- just so we can replace this easily later\n\n # Get the move that results in the lowest rough_outcome for the opponent\n for move in current_state.get_possible_moves():\n new_state = current_state.make_move(move)\n\n # We multiply the below by -1 since a state that's bad for the opponent\n # is good for us.\n guessed_score = new_state.rough_outcome() * -1\n if guessed_score > best_outcome:\n best_outcome = guessed_score\n best_move = move\n\n # Return the move that resulted in the best rough_outcome\n return best_move",
"def get_expected_objective(self) -> float:\n # pylint: disable=invalid-name\n obj = 0.\n for gr in self.grounded.values():\n dist = gr.get_expected_dist_to_satisfaction()\n obj += 1 - self.weight * max(0, dist) ** 2\n return obj",
"def winner(self):\n\n if self.home_score > self.away_score:\n return HOME\n elif self.home_score < self.away_score:\n return VISITOR\n else:\n return TIE",
"def winner(self, winnings):\n\n self.money += winnings\n if winnings > self.highest_win:\n self.highest_win = winnings\n\n return self.highest_win",
"def evaluateWinner(self):\n\t\tif self.pots[-1] == 0:\n\t\t\tself.pots.pop()\n\t\tlivePlayers = self.getLivePlayers()\t\n\t\tfor i in range(len(self.pots)):\n\t\t\tplayers = self.getPlayersInPot(i, livePlayers)\n\t\t\tevaluations = []\n\t\t\tfor x in players:\n\t\t\t\tcombined = x.hand + self.communityCards\n\t\t\t\tevaluations.append((x, self.evaluator.getRankOfSeven(\tcombined[0], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[1], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[2], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[3], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[4], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[5], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcombined[6] )))\n\t\t\twinners = self.getWinners(evaluations, i)\n\t\t\tself.handOutMoney(winners, i)\n\t\t\tself.potwinQ.append(winners[0].name)",
"def pickBestMove(self):\n aggressiveness = 3\n defensiveness = 5\n best_option = None\n best_option_rating = 0\n for option in self.nextBoards.values():\n option_rating_numerator = option.simRecordWeighted['wins_weighted'] * aggressiveness + option.simRecordWeighted['ties_weighted']\n option_rating_denominator = option.simRecordWeighted['loses_weighted'] * defensiveness\n option_rating = option_rating_numerator / option_rating_denominator\n if option_rating > best_option_rating:\n best_option = option\n best_option_rating = option_rating\n return best_option",
"def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)",
"def predict_winner(self):\n\t\tif len(self.players) > 1:\n\t\t\t# TODO: convert to using of max() function\n\t\t\twinner = self.players[0]\n\t\t\tfor player in self.players:\n\t\t\t\tif player.wr > winner.wr:\n\t\t\t\t\twinner = player\n\t\t\treturn winner\n\t\telse:\n\t\t\treturn None",
"def Pwin(state):\n # Assumes opponent also plays with optimal strategy\n p, me, you, pending = state\n if me + pending >= goal:\n return 1\n elif you >= goal:\n return 0\n else:\n return max(Q_pig(state, action, Pwin) for action in pig_actions(state))",
"def win_amount(self):\n return self.amount_bet + self.outcome.win_amount(self.amount_bet)",
"def calc_worst_score(self):\n return min([self.temp_score, self.wind_score, self.precipitation_score])",
"def sure_bet_profit(self, other: AutoBetStats) -> float:\n return 1 - (1 / self.max_value + 1 / other.max_value)",
"def fitness(self):\n return (self.early_penalty if self.landing_time < self.target_time else self.late_penalty) *\\\n abs(self.landing_time - self.target_time)",
"def Pwin(state):\n # Assumes opponent also plays with optimal strategy.\n (p, me, you, pending) = state\n if me + pending >= goal:\n return 1\n elif you >= goal:\n return 0\n else:\n return max(Q_pig(state, action, Pwin)\n for action in pig_actions(state))",
"def get_winner(state):\n state_val = get_action_score(state.action[0], state.action[1], state.action_player, state.occupied)\n if state_val == 100:\n return state.action_player\n elif len(state.available_moves) == 0:\n return 0\n else:\n return -1",
"def eval(self):\n\n ratio_player_win = self.player_wins / self.num_test\n ratio_opponent_win = self.opponent_wins / self.num_test\n ratio_tie = 1.0 - ratio_player_win - ratio_opponent_win\n\n print(\"\\nPlayer Test Results:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_tie))\n\n ratio_optimal_win = self.optimal_wins / self.num_test\n ratio_optimal_loose = self.optimal_losses / self.num_test\n ratio_optimal_tie = 1.0 - ratio_optimal_win - ratio_optimal_loose\n\n print(\"\\nOptimal Results:\")\n print(\"\\tPlayer {0:.2f}%\".format(100.0 * ratio_optimal_win))\n print(\"\\tOpponent {0:.2f}%\".format(100.0 * ratio_optimal_loose))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_optimal_tie))\n\n # Ratio of win, loss diff between player and optimal\n # positive if the player beats opponent\n relative_result = ((ratio_player_win - ratio_opponent_win) /\n (ratio_optimal_win - ratio_optimal_loose))\n\n print(\"\\nResults Player Relative Optimal:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win / ratio_optimal_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win / ratio_optimal_loose))\n print(\"\\tScore {0:.2f}%\".format(100.0 * relative_result))\n\n if self.last_test is not None:\n print(\"Diff from last test score is {0:.2f}%\".format(100.0 * (relative_result - self.last_test)))\n self.last_test = relative_result"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set common fields in layer to addressing dictonary. | def set_address_values(layer):
cursor = arcpy.SearchCursor(layer)
for row in cursor:
layer_fields = arcpy.ListFields(layer)
for x in range(len(layer_fields)):
layer_fields[x] = layer_fields[x].name
for key in address_dict:
if key in layer_fields and address_dict.get(key) is None:
address_dict[key] = row.getValue(key) | [
"def _update_layer_fields(layer, field_mapping):\r\n\r\n if 'layerDefinition' in layer and layer['layerDefinition'] is not None:\r\n layer_definition = layer['layerDefinition']\r\n \r\n if 'definitionExpression' in layer_definition and layer_definition['definitionExpression'] is not None:\r\n layer_definition['definitionExpression'] = _find_and_replace_fields(layer_definition['definitionExpression'], field_mapping)\r\n \r\n if 'drawingInfo' in layer_definition and layer_definition['drawingInfo'] is not None:\r\n if 'renderer' in layer_definition['drawingInfo'] and layer_definition['drawingInfo']['renderer'] is not None:\r\n renderer = layer_definition['drawingInfo']['renderer']\r\n if renderer['type'] == 'uniqueValue':\r\n i = 0\r\n while 'field{0}'.format(i) in renderer:\r\n if renderer['field{0}'.format(i)] in field_mapping:\r\n renderer['field{0}'.format(i)] = field_mapping[renderer['field{0}'.format(i)]]\r\n i += 1\r\n elif renderer['type'] == 'classBreaks':\r\n if 'field' in renderer:\r\n if renderer['field'] in field_mapping:\r\n renderer['field'] = field_mapping[renderer['field']]\r\n\r\n value_expression = _deep_get(renderer, \"valueExpression\")\r\n if value_expression is not None:\r\n renderer['valueExpression'] = _find_and_replace_fields_arcade(str(value_expression), field_mapping)\r\n \r\n labeling_infos = _deep_get(layer_definition['drawingInfo'], 'labelingInfo') \r\n if labeling_infos is not None:\r\n for label_info in labeling_infos:\r\n label_expression = _deep_get(label_info, 'labelExpression')\r\n if label_expression is not None:\r\n results = re.findall(\"\\[(.*?)\\]\", label_expression)\r\n for result in results:\r\n if result in field_mapping: \r\n label_info['labelExpression'] = str(label_expression).replace(\"[{0}]\".format(result), \"[{0}]\".format(field_mapping[result]))\r\n \r\n value = _deep_get(label_info, 'labelExpressionInfo', 'value')\r\n if value is not None:\r\n results = re.findall(\"{(.*?)}\", value)\r\n for result in results:\r\n if result in field_mapping: \r\n label_info['labelExpressionInfo']['value'] = str(value).replace(\"{{{0}}}\".format(result), \"{{{0}}}\".format(field_mapping[result]))\r\n\r\n expression = _deep_get(label_info, 'labelExpressionInfo', 'expression')\r\n if expression is not None:\r\n label_info['labelExpressionInfo']['expression'] = _find_and_replace_fields_arcade(str(expression), field_mapping)\r\n \r\n if 'popupInfo' in layer and layer['popupInfo'] is not None:\r\n if 'title' in layer['popupInfo'] and layer['popupInfo']['title'] is not None:\r\n results = re.findall(\"{(.*?)}\", layer['popupInfo']['title'])\r\n for result in results:\r\n if result in field_mapping:\r\n layer['popupInfo']['title'] = str(layer['popupInfo']['title']).replace(\"{{{0}}}\".format(result), \"{{{0}}}\".format(field_mapping[result]))\r\n \r\n if 'description' in layer['popupInfo'] and layer['popupInfo']['description'] is not None:\r\n results = re.findall(\"{(.*?)}\", layer['popupInfo']['description'])\r\n for result in results:\r\n if result in field_mapping:\r\n layer['popupInfo']['description'] = str(layer['popupInfo']['description']).replace(\"{{{0}}}\".format(result), \"{{{0}}}\".format(field_mapping[result]))\r\n\r\n if 'fieldInfos' in layer['popupInfo'] and layer['popupInfo']['fieldInfos'] is not None:\r\n for field in layer['popupInfo']['fieldInfos']:\r\n if field['fieldName'] in field_mapping:\r\n field['fieldName'] = field_mapping[field['fieldName']]\r\n\r\n if 'expressionInfos' in layer['popupInfo'] and layer['popupInfo']['expressionInfos'] is not None:\r\n for expression_info in layer['popupInfo']['expressionInfos']:\r\n if 'expression' in expression_info and expression_info['expression'] is not None:\r\n expression_info['expression'] = _find_and_replace_fields_arcade(str(expression_info['expression']), field_mapping)\r\n\r\n if 'mediaInfos' in layer['popupInfo'] and layer['popupInfo']['mediaInfos'] is not None:\r\n for media_info in layer['popupInfo']['mediaInfos']:\r\n if 'title' in media_info and media_info['title'] is not None:\r\n results = re.findall(\"{(.*?)}\", media_info['title'])\r\n for result in results:\r\n if result in field_mapping:\r\n media_info['title'] = str(media_info['title']).replace(\"{{{0}}}\".format(result), \"{{{0}}}\".format(field_mapping[result]))\r\n if 'caption' in media_info and media_info['caption'] is not None:\r\n results = re.findall(\"{(.*?)}\", media_info['caption'])\r\n for result in results:\r\n if result in field_mapping:\r\n media_info['caption'] = str(media_info['caption']).replace(\"{{{0}}}\".format(result), \"{{{0}}}\".format(field_mapping[result]))\r\n if 'normalizeField' in media_info and media_info['normalizeField'] is not None:\r\n if media_info['normalizeField'] in field_mapping:\r\n media_info['normalizeField'] = field_mapping[media_info['normalizeField']]\r\n if 'fields' in media_info and media_info['fields'] is not None:\r\n for field in media_info['fields']:\r\n fields = []\r\n if field in field_mapping:\r\n fields.append(field_mapping[field])\r\n else:\r\n fields.append(field)\r\n media_info['fields'] = fields\r\n\r\n if 'definitionEditor' in layer and layer['definitionEditor'] is not None:\r\n if 'inputs' in layer['definitionEditor'] and layer['definitionEditor']['inputs'] is not None:\r\n for definition_input in layer['definitionEditor']['inputs']:\r\n if 'parameters' in definition_input and definition_input['parameters'] is not None:\r\n for param in definition_input['parameters']:\r\n if 'fieldName' in param and param['fieldName'] is not None:\r\n if param['fieldName'] in field_mapping:\r\n param['fieldName'] = field_mapping[param['fieldName']]\r\n if 'parameterizedExpression' in layer['definitionEditor'] and layer['definitionEditor']['parameterizedExpression'] is not None:\r\n layer['definitionEditor']['parameterizedExpression'] = _find_and_replace_fields(layer['definitionEditor']['parameterizedExpression'], field_mapping)",
"def LoadProps( self, props_dict ):\n for k in ( 'assemblyAddr', 'auxSubAddrs', 'mode', 'subAddr' ):\n if k in props_dict:\n setattr( self, k, props_dict[ k ] )\n\n super( SubPin2DView, self ).LoadProps( props_dict )",
"def set_attrs(self, **kwargs) -> None:\n self._obj.coords[GEO_MAP_COORD].attrs.update(**kwargs)",
"def SaveProps( self, props_dict, for_drag = False ):\n super( SubPin2DView, self ).SaveProps( props_dict, for_drag = for_drag )\n\n for k in ( 'assemblyAddr', 'auxSubAddrs', 'mode', 'subAddr' ):\n props_dict[ k ] = getattr( self, k )",
"def update_asop_dict(asop_dict,region,coords,color,all_settings):\n # Set unique color\n asop_dict['color'] = color\n\n # Apply any general user settings\n asop_dict['grid_desc'] = all_settings.get('grid','native')\n asop_dict['grid_type'] = all_settings.get('grid','native')\n asop_dict['region_name'] = region\n asop_dict['region_desc'] = region.replace('_',' ')\n asop_dict['region'] = coords\n\n # Edit dx for region\n mean_lat = np.mean(coords[0:2])\n asop_dict['dx'] = asop_dict['dx'] * np.cos(np.radians(mean_lat))\n all_settings.pop('infile','') # key not allowed\n for key in asop_dict:\n if key in all_settings:\n asop_dict[key] = all_settings[key]\n\n # Apply any specific file settings\n infile = os.path.basename(asop_dict['infile'])\n file_settings = settings.get(infile,{})\n file_settings.pop('infile','') # key not allowed\n file_settings.pop('region','')\n if file_settings:\n for key in file_settings:\n asop_dict[key] = file_settings[key]\n if 'legend_name' not in file_settings:\n asop_dict['legend_name'] = asop_dict['name'].replace('_',' ')\n\n print('---> Final data dictionary:')\n print(json.dumps(asop_dict, sort_keys=True, indent=2))\n\n return asop_dict",
"def __setAttributes(self):\n values = {\"f\":\"json\"}\n layerInfo = self._getEsriRESTJSON(self.url,values)\n #Geometry Type\n geometryType = getGeometryType(layerInfo['geometryType'])\n self.geometryType = geometryType\n #Name\n name=arcpy.ValidateTableName(layerInfo['name'])\n self.name=name\n #Spatial Reference - both the wkid and the arcpy SpatialReference object\n #in case it's in a wkt\n try:\n wkid = layerInfo['extent']['spatialReference']['wkid']\n except:\n wkid = 4326\n sr = arcpy.SpatialReference()\n sr.factoryCode = int(wkid)\n sr.create()\n self.sr = sr\n self.wkid = wkid\n #field used to update the feature class are a subset of all the fields in a feature class\n fields = layerInfo['fields']\n updateFields = []\n for field in fields:\n if (field['type'] in ['esriFieldTypeOID','esriFieldTypeGeometry','esriFieldTypeGUID'] or 'shape' in field['name'].lower() or field['name'] in self.userFields):\n pass\n else:\n updateFields.append(field)\n updateFields.insert(0, {\"name\":'Shape@', \"type\":\"esriFieldTypeGeometry\"})\n self.updateFields = updateFields\n #Max values\n if layerInfo.has_key('maxRecordCount'):\n self.maxRecordCount = int(layerInfo['maxRecordCount'])\n else:\n self.maxRecordCount = 1000",
"def overwrite_field(self,cells=None,edges=None,source='depth_max',target='depth_mean'):\n if cells is not None:\n self.cells[target][cells]=self.cells[source][cells]\n if edges is not None:\n self.edges[target][edges]=self.edges[source][edges]",
"def set_lookup_dict(self, add_info_dict):\n self.add_info_dict = add_info_dict",
"def __init__(self):\n\n for layer in self._layer_class_map:\n setattr(self, layer, self._layer_class_map[layer]())",
"def update_data( self, source ):\n assert isinstance( source, dict )\n for attr, value in source.items():\n assert hasattr( self, attr )\n setattr( self, attr, value )",
"def set_org_and_space_dicts(self, org_dict, space_dict):\n self._space = space_dict\n self._org = org_dict\n return self",
"def _update(self, kwargs):\n\n for attr in kwargs:\n self.ptr[attr] = kwargs[attr]",
"def __init__ (self, d):\n try:\n self.__dict__.update (d.__dict__)\n except:\n self.__dict__.update (d)",
"def generic_layer_dict_maker() -> dict:\n layer_dict = {\"core AND helix_start\": 'AFILVWYNQSTHP',\n \"core AND helix\": 'AFILVYNQHM',\n \"core AND loop\": 'AFGILPVWYDENQSTHM',\n \"core AND sheet\": 'FILVWYDENQSTH',\n \"boundary AND helix_start\": 'ADEHIKLNPQRSTV',\n \"boundary AND helix\": 'ADEHIKLNQRSTVM',\n \"boundary AND loop\": 'ADEFGHIKLNPQRSTVY',\n \"boundary AND sheet\": 'DEFHIKLNQRSTVY',\n \"surface AND helix_start\": 'DEHKPQR',\n \"surface AND helix\": 'EHKQR',\n \"surface AND loop\": 'DEGHKNPQRST',\n \"surface AND sheet\": 'EHKNQRST',\n \"helix_cap\": 'DNSTP'}\n return layer_dict",
"def _update_(self, srcdict):\n dict.update(self, srcdict)",
"def _update(self, other):\n # NOTE: detail map properties should NEVER be overridden. NEVER. EVER. kthx.\n if other.use_alpha:\n self.use_alpha = True\n if other.mipmap:\n self.mipmap = True",
"def _update_all_fields(self, name, value):\n for field in self._field_map.values():\n setattr(field, name, value)",
"def _apply_to_field(self, dictionary):\n setattr(self._modelInstance, self._datafield, dictionary)",
"def __extract_common_attrs(self, raw_data: Dict) -> None:\n for attr in self.COMMON_ATTRS:\n if attr not in self.ATTRS and attr in raw_data:\n setattr(self, attr, raw_data[attr])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get AWS ECS task information. For the puspose of getting the EC2 instance id by a given AWS ECS task name, for now, only the 'containerInstanceArn' is fetched from the AWS ECS task. | def get_tasks_information(
task: str,
list_tasks: str,
cluster=CLUSTER_NAME,
client=None,
region=REGION,
):
if not client:
session = boto3.session.Session()
client = session.client("ecs", region)
try:
# Get all tasks in the cluster.
cluster_tasks = client.list_tasks(cluster=cluster)["taskArns"]
logger.debug(f"[CLUSTERTASKS]: '{cluster_tasks}'.")
tasks = client.describe_tasks(cluster=cluster, tasks=cluster_tasks)[
"tasks"
]
logger.debug(f"[TASKS]: '{tasks}'.")
# Filter for given task name.
# Get instance id,
container_instances = []
task_name = ""
for task_ in tasks:
task_definition = task_.get("taskDefinitionArn", "")
if list_tasks:
container_instances.append(task_definition)
continue
container_instance_arn = task_.get("containerInstanceArn", None)
if container_instance_arn:
if not list_tasks:
if re.search(task, task_definition):
container_instances.append(container_instance_arn)
task_name = task_definition
break
else:
container_instances.append(container_instance_arn)
if list_tasks:
return "\n".join(container_instances)
instances = describe_instances_with_cluster(
container_instances=container_instances,
cluster=cluster,
client=client,
region=region,
)
if not instances:
return ""
logger.info(f"Instance '{instances[0]}' runs task '{task_name}'.")
return instances[0]
except (botocore.exceptions.ClientError) as e:
# TODO: Check right error code.
if e.response["Error"]["Code"] == "ClusterNotFoundException":
logger.error(f"Cluster '{cluster}' not found: {str(e)}.")
else:
logger.error(f"Error: {str(e)}")
sys.exit(1) | [
"def get(profile, cluster, tasks):\n client = boto3client.get(\"ecs\", profile)\n params = {}\n params[\"cluster\"] = cluster\n params[\"tasks\"] = tasks\n return client.describe_tasks(**params)",
"def get_ec2_instances(self, task):\n # Assemble arguments to filter ec2 instances by task and branch tags.\n args = {}\n args['Filters'] = [{'Name': 'tag:meerkat:task', 'Values': [task]}]\n\n # Get instance data\n reservations = self.ec2_client.describe_instances(**args)['Reservations']\n\n # Structure the data and return.\n deploy_ids = []\n for res in reservations:\n for instance in res['Instances']:\n for tag in instance['Tags']:\n if tag['Key'] == 'opsworks:instance':\n deploy_ids.append(tag['Value'])\n return deploy_ids",
"def get_task(self):\n return self.task",
"def task_id(self):\n return self._task_id",
"def task_id(self):\n return self._task_id",
"def task_detail(self):\n return self._task_detail",
"def _look_for_task(self, instance_action_id, task_id):\n if isinstance(task_id, int):\n task_index = task_id\n else:\n if task_id.startswith(\"TASK-\"):\n task_id = task_id[5:]\n ins_action_id, _, task_index = task_id.rpartition(\".\")\n if ins_action_id:\n instance_action_id = ins_action_id\n\n tasks = self.db.get_rows(FROM=\"vim_wim_actions\", WHERE={\"instance_action_id\": instance_action_id,\n \"task_index\": task_index})\n if not tasks:\n return None\n task = tasks[0]\n task[\"params\"] = None\n task[\"depends\"] = {}\n if task[\"extra\"]:\n extra = yaml.load(task[\"extra\"], Loader=yaml.Loader)\n task[\"extra\"] = extra\n task[\"params\"] = extra.get(\"params\")\n else:\n task[\"extra\"] = {}\n return task",
"def task_get(client: Client, task_id: str) -> dict:\n response = requests.get(\n f'{client.base_url}/api/task_manager/v2/tasks/{task_id}',\n headers=client.auth_header,\n )\n handle_error_response(response)\n return response.json()",
"def gen_task_metadata(task, job_id):\n hostname = socket.gethostname()\n metadata = {'job_id': job_id, 'host': hostname}\n for hint in task.hints:\n if hint.class_ == \"DockerRequirement\" and \"dockerImageId\" in hint.params.keys():\n metadata['container_runtime'] = bc.get('task_manager', 'container_runtime')\n container_path = hint.params[\"dockerImageId\"]\n with open(container_path, 'rb') as container:\n c_hash = hashlib.md5()\n chunk = container.read(8192)\n while chunk:\n c_hash.update(chunk)\n chunk = container.read(8192)\n container_hash = c_hash.hexdigest()\n metadata['container_hash'] = container_hash\n return metadata",
"def task(self):\n intids = getUtility(IIntIds, name='intids')\n task = self.__parent__\n intid = intids.getId(task)\n return intid",
"def get_service_task_info(self, service):\n info = {'id': '', 'image': '', 'cmd': '', 'timestamp': '',\n 'message': 'task not available yet', 'status': 'notstarted',\n 'containerid': '', 'exitcode': '', 'pid': ''}\n\n task = self.get_service_task(service)\n if task:\n status = 'undefined'\n state = task['Status']['State']\n if state in ('new', 'pending', 'assigned', 'accepted', 'preparing',\n 'starting'):\n status = 'notstarted'\n elif state == 'running':\n status = 'started'\n elif state == 'failed':\n status = 'finishedWithError'\n elif state == 'complete':\n status = 'finishedSuccessfully'\n\n info['id'] = task['ID']\n info['image'] = task['Spec']['ContainerSpec']['Image']\n info['cmd'] = ' '.join(task['Spec']['ContainerSpec']['Command'])\n info['timestamp'] = task['Status']['Timestamp']\n info['message'] = task['Status']['Message']\n info['status'] = status\n\n if 'ContainerStatus' in task['Status']:\n info['containerid'] = task['Status']['ContainerStatus']['ContainerID']\n info['exitcode'] = task['Status']['ContainerStatus']['ExitCode']\n info['pid'] = task['Status']['ContainerStatus']['PID']\n return info",
"def _get_service_container_instances(self, cluster_name, service_arn, task_arn):\n if not task_arn:\n task_arn = self.get_task_arn(cluster_name, service_arn)\n\n tasks = self.ecs_client.describe_tasks(\n cluster=cluster_name, tasks=[task_arn])['tasks']\n containers = [x['containerInstanceArn'] for x in tasks]\n response = self.ecs_client.describe_container_instances(\n cluster=cluster_name,\n containerInstances=containers\n )\n return response['containerInstances']",
"def _get_taskinfo(task_name):\n t = tasks.ALL_TASKS.get(task_name, None)\n if t is None:\n # While task_name is probably already validated, it won't hurt to check again\n raise forms.ValidationError(\"Invalid task name: {0}\".format(task_name))\n return t",
"def _get_task(self, task_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"tasks\", \"task_id\", task_id)",
"def task_name(self):\n return self._task_name",
"def get_instance_image_info(task):\n ctx = task.context\n node = task.node\n image_info = {}\n # NOTE(pas-ha) do not report image kernel and ramdisk for\n # local boot or whole disk images so that they are not cached\n if (node.driver_internal_info.get('is_whole_disk_image')\n or deploy_utils.get_boot_option(node) == 'local'):\n return image_info\n root_dir = get_http_boot_dir()\n i_info = node.instance_info\n labels = ('kernel', 'ramdisk')\n d_info = deploy_utils.get_image_instance_info(node)\n if not (i_info.get('kernel') and i_info.get('ramdisk')):\n glance_service = service.GlanceImageService(context=ctx)\n iproperties = glance_service.show(d_info['image_source'])['properties']\n for label in labels:\n i_info[label] = str(iproperties[label + '_id'])\n node.instance_info = i_info\n node.save()\n\n for label in labels:\n image_info[label] = (\n i_info[label],\n os.path.join(root_dir, node.uuid, label)\n )\n\n return image_info",
"def get_task_id(self, task_type):\n\n for task_dict in self._configuration['tasks']:\n if task_dict['type'] == task_type and 'task_id' in task_dict:\n return task_dict['task_id']\n return None",
"def get_task_metadata(self, task):\n return self._gdb_interface.get_task_metadata(task)",
"def task_definition_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"task_definition_arn\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Geeft bericht of iemand lang genoeg is voor de attractie. | def lang_genoeg(lengte):
return | [
"def substituer(texte): # Donne une vague idée mais pas efficace, mal codé\r\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\r\n texte_lettre_only = []\r\n for car in texte:\r\n if car in alphabet:\r\n texte_lettre_only.append(car)\r\n nouveau_texte = list(texte)\r\n j = 0\r\n alphabet_francais_texte = trouver_frequence_texte(texte_lettre_only)\r\n alphabet_francais.sort(reverse=True)\r\n for lettre in texte_lettre_only:\r\n a = False\r\n i = 0\r\n if nouveau_texte[j] == \" \" or nouveau_texte[j] == \":\" or nouveau_texte[j] == \",\" or nouveau_texte[j] == \"?\" or nouveau_texte[j] == \".\" or nouveau_texte[j] == \"2\" or nouveau_texte[j] == \"6\":\r\n j += 1\r\n else:\r\n while a == False:\r\n if lettre == alphabet_francais_texte[i][1]:\r\n nouveau_texte[j] = alphabet_francais[i][1]\r\n a = True\r\n else:\r\n i += 1\r\n if i == 26:\r\n i = 0\r\n j += 1\r\n texte_str = str_convert(nouveau_texte)\r\n return texte_str",
"def cesar2(texte, decalage):",
"def test_greek_g_nasality_assimilation(self):\n condition = grc.Word(\"gignɔ́ːskɔː\", grc.GREEK[\"Attic\"][\"Probert\"])\n condition._refresh()\n condition._g_nasality_assimilation()\n output = \"\".join([p.ipa for p in condition.phones])\n target = unicodedata.normalize(\"NFC\", \"giŋnɔ́ːskɔː\")\n self.assertEqual(output, target)",
"def translate_leet(phrase):",
"def test_greek_print_ipa(self):\n w = grc.Word(\"élipe\", grc.GREEK[\"Attic\"][\"Probert\"])\n output = [w._print_ipa(True), w._print_ipa(False)]\n target = [\n unicodedata.normalize(\"NFC\", \"é.li.pe\"),\n unicodedata.normalize(\"NFC\", \"élipe\"),\n ]\n self.assertEqual(output, target)",
"def test_latin_intervocalic_j(self):\n condition = lat.Word(\"gaɫlɪ̣a\", lat.LATIN[\"Classical\"][\"Allen\"])\n condition._refresh()\n condition._intervocalic_j()\n outputs = \"\".join([p.ipa for p in condition.phones])\n target = unicodedata.normalize(\"NFC\", \"gaɫlɪ̣ja\")\n self.assertEqual(outputs, target)",
"def question_new_translate():",
"def cesar(texte, decalage):",
"def test_greek_s_voice_assimilation(self):\n condition = grc.Word(\"ẹːrgɑsménon\", grc.GREEK[\"Attic\"][\"Probert\"])\n condition._refresh()\n condition._s_voice_assimilation()\n output = \"\".join([p.ipa for p in condition.phones])\n target = unicodedata.normalize(\"NFC\", \"ẹːrgɑzménon\")\n self.assertEqual(output, target)",
"def rec_monomers_lig_HRG():\n Monomer('HRG', ['b', 'st'], {'st': ['M', 'E']}) # Heregulin ligand",
"def cryptage(lettre,cle_a,cle_b) :\r\n if (ord(lettre)>=97 and ord(lettre)<=122) : #codage des minuscules \r\n return(minuscules(lettre,cle_a,cle_b))\r\n elif(ord(lettre)>=65 and ord(lettre)<=90) : #codage des majuscules \r\n return(majuscules(lettre,cle_a,cle_b))\r\n else : #codage des caractères spéciaux (inchangés)\r\n return(lettre)",
"def ligada(self):\n return self.__ligada",
"def gerundify(verb):\n if verb.endswith(\"e\"):\n verb = verb[:-1]\n\n if random() < 0.4:\n if (\n not verb.startswith(\"a\")\n and not verb.startswith(\"e\")\n and not verb.startswith(\"i\")\n and not verb.startswith(\"o\")\n and not verb.startswith(\"u\")\n ):\n verb = \"a-\" + verb\n\n return verb + \"ing\"",
"def 取龜(我):\n return 我",
"def Basic_0037(Place):\n CountryCode, PlaceId = CountryCode_PlaceID(Place)\n return_emits = []\n try:\n BaseTextList = Place.findall(ns+\"BaseText\")\n for BaseText in BaseTextList:\n btext = BaseText.text\n btext_attrib = BaseText.attrib\n try: # Get the languageCode\n btext_lc = btext_attrib['languageCode']\n except:\n btext_lc = 'None'\n if btext_lc != \"ka\":\n for char in btext:\n if re.match(ur'[\\u10A0-\\u10FF]+',char):\n btext = btext.replace('|', '#') # remove pipe symbols so that they don't mess up the output\n btext = btext.encode('UTF-8') # convert the unicode to bytestrings for the return\n emit_string = 'Basic_0037|'+CountryCode+'|'+PlaceId+'|Invalid: Georgian characters found in BaseText|'+btext+'|languageCode=\"'+btext_lc+'\"'\n return_emits.append(emit_string)\n break\n except:\n pass\n return return_emits",
"def extract_cle_publc(self):",
"def motivation_letter(mistake_word, true_word):",
"def analyser_le_texte(texte, langue_id):\n\n lexique = lexique = win32com.client.Dispatch(\"SAPI.SpLexicon\")\n tuple = lexique.GetWords()\n\n les_mots = tuple[0] # la liste des mots (déjà en majuscules) du lexique\n tuple = None\n nb_mots = les_mots.Count\n if nb_mots == 0:\n return texte\n\n # créer la liste des mots qui correspondent à la langue\n list_mots = []\n for i in range(0, nb_mots):\n if les_mots.Item(i).LangId == langue_id:\n list_mots.append(les_mots.Item(i).Word)\n\n les_mots = None\n nb_mots = len(list_mots)\n if nb_mots == 0:\n return texte\n\n str = texte.upper() # cf remarque 2)\n str = str.lstrip(\" \")\n str = str.rstrip(\" \")\n\n for i in range(0, nb_mots):\n # mot = les_mots.Item(i).Word\n mot = list_mots[i]\n debut = 0\n separateur = \" \"\n lg_mot = len(mot)\n # pdb.set_trace()\n while bool(1) == True:\n position = str.find(mot, debut)\n if position != -1:\n str = str[0 : position] + separateur + mot + separateur + str[position + lg_mot : ]\n debut = position + lg_mot + 2 # +2 pour tenir compte des séparateurs\n else:\n break\n\n return str",
"def ala_lc(word):\n ala_lc_dict = {\n 'Ё' : 'Ë',\n 'А' : 'A',\n 'Б' : 'B',\n 'В' : 'V',\n 'Г' : 'G',\n 'Д' : 'D',\n 'Е' : 'E',\n 'Ж' : 'Zh',\n 'З' : 'Z',\n 'И' : 'I',\n 'Й' : 'Ĭ',\n 'К' : 'K',\n 'Л' : 'L',\n 'М' : 'M',\n 'Н' : 'N',\n 'О' : 'O',\n 'П' : 'P',\n 'Р' : 'R',\n 'С' : 'S',\n 'Т' : 'T',\n 'У' : 'U',\n 'Ф' : 'F',\n 'Х' : 'Kh',\n 'Ц' : 'T͡S',\n 'Ч' : 'Ch',\n 'Ш' : 'Sh',\n 'Щ' : 'Shch',\n 'Ъ' : 'ʺ',\n 'Ы' : 'Y',\n 'Ь' : 'ʹ',\n 'Э' : 'Ė',\n 'Ю' : 'I͡U',\n 'Я' : 'I͡A',\n 'а' : 'a',\n 'б' : 'b',\n 'в' : 'v',\n 'г' : 'g',\n 'д' : 'd',\n 'е' : 'e',\n 'ж' : 'zh',\n 'з' : 'z',\n 'и' : 'i',\n 'й' : 'ĭ',\n 'к' : 'k',\n 'л' : 'l',\n 'м' : 'm',\n 'н' : 'n',\n 'о' : 'o',\n 'п' : 'p',\n 'р' : 'r',\n 'с' : 's',\n 'т' : 't',\n 'у' : 'u',\n 'ф' : 'f',\n 'х' : 'kh',\n 'ц' : 't͡s',\n 'ч' : 'ch',\n 'ш' : 'sh',\n 'щ' : 'shch',\n 'ъ' : 'ʺ',\n 'ы' : 'y',\n 'ь' : 'ʹ',\n 'э' : 'ė',\n 'ю' : 'i͡u',\n 'я' : 'i͡a',\n 'ё' : 'ë',}\n return \"\".join(ala_lc_dict.get(c, c) for c in word)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a Pseudocode Operation at the actual active buffer. | def AddPseudoCode(self, pcode):
self.buffers[self.buffergrade].append(pcode) | [
"def add_code(self, code):\n self.code += code",
"def add_operation(self):\n arg1 = self.memory[self.memory[self._cursor + 1]]\n arg2 = self.memory[self.memory[self._cursor + 2]]\n arg3 = self.memory[self._cursor + 3]\n self.memory[arg3] = arg1 + arg2\n # print(f'Cursor: {self._cursor}\\tAssigning position {position} with value {n1 + n2}')\n self._cursor += 4\n return",
"def add_operation(self):\n n1 = self.memory[self.memory[self._cursor + 1]]\n n2 = self.memory[self.memory[self._cursor + 2]]\n position = self.memory[self._cursor + 3]\n self.memory[position] = n1 + n2\n # print(f'Cursor: {self._cursor}\\tAssigning position {position} with value {n1} + {n2} = {n1 + n2}')\n return",
"def add_opcodes(self, opcodes):\n self.opcodes.update(opcodes)",
"def generate_pseudo_instruction_code(label):\r\n s = []\r\n s.append('(' + label + ')')\r\n return s",
"def IncrementStatement(self) -> CodeStatement:",
"def _insert_op(self, op):\n if self._curblock is not None:\n self.emit(op)",
"def add(self, op):\n self.ops.append(op)\n op.func = self",
"def add_to_lexem(self, c):\n c = self.source[self.pos]\n self.lexemlst.append(c)\n self.pos += 1 # advanced to the next one\n if c == '\\n':\n self.lineno += 1 # line counter for error messages",
"def append(self,instr):\n self.instructions.append(instr)",
"def add_instruction(self, instruction):\n address = int(self.start_address, 0) + (self.num_of_instr * 4)\n self.instructions.append([phex(address, 10), instruction])\n self.num_of_instr = self.num_of_instr + 1",
"def pseudo(self, pseudo_enter):\n self.__pseudo = pseudo_enter",
"def start(self, pos = 0, lib_call = False) -> None:\n from utils.instructions import instructions\n self.scope_push()\n self.pos = pos\n while self.pos < len(self.code.instructions):\n self.pos += 1 + instructions[self.code.get_instruction(self.pos)].run(self, self.code, self.pos + 1)",
"def add_char(self, char):\n if self.pos >= self.line_length():\n self.buffer.append_char(char, self.line)\n else:\n self.buffer.insert_char(char, self.line, self.pos)\n \n self.pos += 1\n self.has_changes = True",
"def make_codes(self):\n\t\troot = heapq.heappop(self.heap)#obtenemos la raiz del arbol\n\t\tcurrent_code = \"\"\n\t\tself.make_codes_helper(root, current_code)",
"def add_state(self, state):\n\n if not getattr(self, \"buffer\"):\n self.reinit_buffer(state)\n\n self.buffer = self.buffer[1:]\n self.buffer.append(self.transform(state))",
"def add_op(self, expr):\n from cascada.bitvector import operation\n assert isinstance(expr, operation.Operation)\n assert not self.contain_op(expr)\n name = \"{}{}\".format(self.id_prefix, self.counter)\n self.counter += 1\n identifier = core.Variable(name, expr.width)\n self.table[identifier] = expr\n\n return identifier",
"def AddOperation(self, op):\n self._operations.append(op)",
"def asm(self, text):\n self.text.append(text)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Increment the BufferGrade and initialize a new empty buffer. | def IndentBuffer(self):
self.buffergrade += 1
self.buffers[self.buffergrade] = [] | [
"def set_garbage(self):\n self.grade = 0",
"def new_grade(self, value):\r\n self.logger.warn(\"Setting values on new_grade will NOT update the remote Canvas instance.\")\r\n self._new_grade = value",
"def update(self, grade):\n for i, component_grade in enumerate(grade._component_grades):\n if component_grade is not None:\n self._component_grades[i] = component_grade",
"def primeFastaBuffer(self, buffer):\n self.fastaBuffer = buffer",
"def grade(self, grade):\n\n self._grade = grade",
"def _make_buffer(self, *args):\n buffer = cl.Buffer(*args)\n self.total_buffer_size += buffer.get_info(cl.mem_info.SIZE)\n self.total_buffers += 1\n return buffer",
"def setGrade(self):\n if self.scores['sent_count'] != 0:\n self.us_grade = (1.043 * ((self.scores['polysyllword_count'] * (30 / self.scores['sent_count']))**0.5)) + 3.1291",
"def set_buffer(self, shader, offset):\n Buffer.bind_vbo(self.vertices)\n Buffer.bind_ebo(self.indexes)\n Buffer.get_attribute_location(shader, \"position\")\n Buffer.vertex_attribute(6, 0, 0)\n Buffer.get_attribute_location(shader, \"aNormal\")\n Buffer.vertex_attribute(6, offset, 1)",
"def _initialize_buffers(self) -> None:",
"def re_init_buffer(self):\n #~ print(self.verts)\n #~ print(self.texcoords)\n #~ print(self.inds)\n self.shape.buf[0].re_init(pts=np.array(self.verts, 'f'),texcoords=np.array(self.texcoords, 'f'))",
"def reset(self):\r\n self._buffer.fill(0)",
"def _refresh_buffers(self) -> None:",
"def add_grade(self, grade):\n if self._grades is None:\n self._grades = []\n self._grades.append(grade)",
"def current_grade(self, value):\r\n self.logger.warn(\"Setting values on current_grade will NOT update the remote Canvas instance.\")\r\n self._current_grade = value",
"def reset(self):\n self._buffer.fill(0)",
"def _push_buffer(self):\r\n \r\n self._push_writer()",
"def _buffer(self, enable=True):\n self._isBuffered = enable",
"def raise_grade(self):\n if self.grade < 5:\n self.grade += 1",
"def __init__(self, buffer_size):\n self.num_experiences = 0\n self.buffer = deque(maxlen=buffer_size)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decrement the BufferGrade and pop out the buffer active before. | def DeIndentBuffer(self):
if self.buffergrade == 0:
raise Exception("You can't deindent more.")
self.buffergrade -= 1
tmp = self.buffers[self.buffergrade + 1]
del self.buffers[self.buffergrade + 1]
return tmp | [
"def decrease_grade(self):\n if self.grade > 0:\n self.grade -= 1",
"def decrease(self):\n self.score -= self.score",
"def hit_decrement(self):\n self.hit -= 1",
"def decrement(self):\n self.count -= 1",
"def decrement(self):\n self.data[self.pointer] -= 1\n self.data[self.pointer] %= 256",
"def IndentBuffer(self):\n self.buffergrade += 1\n self.buffers[self.buffergrade] = []",
"def RemoveGrade(self, grade):\n if not self.__data['g'].HasKey(grade.ID):\n raise NonExistentItemIDError(\"Grade does not exist.\")\n self.__data['g'].RemoveItems([grade.ID])\n self.__undo_list.append(['g'])\n self.__redo_list.clear()",
"def decSpan(self,b):\r\n self.b -= b;",
"def _pop_buffer(self):\r\n \r\n return self._buffer_stack.pop()",
"def test_decrease_buffer_under_filled(self):\n self._test_resize_buffer(15, 16, 8)",
"def decrement_score(self):\n if self.score > 0:\n self.score -= 1\n self.parent_post.decrement_total_score()",
"def dec_gains_of_free_cells(self):\r\n for cell in self.cells:\r\n if not cell.locked:\r\n cell.gain -= 1\r\n cell.yank()",
"def decrement_max_gain(self):\r\n while self.max_gain > -self.pmax:\r\n self.max_gain -= 1\r\n if len(self[self.max_gain]) != 0:\r\n break",
"def decrement(self):\n self.value = self - 1",
"def dec_greediness(self):\n self._greediness -= 1",
"def decrement(self, x, y):\n self.field.add(x, y, -1)\n self.depth += 1",
"def dec(self, by=1):\n assert by > 0\n self.counter -= by\n if self.counter <= 0:\n # Don't leave self.counter < 0, that will screw things up in\n # future calls.\n self.counter = 0\n # Transitioning from nonzero to 0 means wait() need no longer wait.\n self.event.send()",
"def decrement(self):\n\t\tif self.value <= 0:\n\t\t\treturn\n\t\t\n\t\twith self._lock:\n\t\t\tself.value -= 1\n\t\treturn",
"def decrease_quantity(self):\n self._quantity_available -= 1\n return self._quantity_available"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a reference to the actual buffer activated. | def RefBuffer(self):
return self.buffers[self.buffergrade] | [
"def buffer(self):\n return self.buffer_dict.get_active()",
"def get_buffer(self):\n return self.buffer",
"def current_buffer(self):\n return self.layout.current_buffer",
"def buffer_backend(cls, *args, **kwargs):\n return cls._buffer_context",
"def current_buffer_app(self):\n return self.session.current_buffer",
"def buffer_b(self) -> Optional[ShadertoyBuffer]:\n return self._buffer_b",
"def get_buffer(self) -> ctypes.Array:\n return self._buffer",
"def get_buffer(self):\n with self._buffer_lock:\n return deepcopy(self._buffer)",
"def getBuffer(self) -> bytes:\n return self._buffer",
"def bpointer(self):\r\n return self._bpointer",
"def ControlBufferBehaviour(self):\n return self._get_attribute('controlBufferBehaviour')",
"def reward_buffer(self):\n return self._reward_buffer",
"def buffer_c(self) -> Optional[ShadertoyBuffer]:\n return self._buffer_c",
"def get_frame_buffer(self):\n if self.lcd_enabled():\n return self.gb_screen\n else:\n return self.white_screen",
"def current_buffer(self, no_minibuffer=False):\n return \\\n self._mini_buffer \\\n if self.mini_buffer_state and not no_minibuffer else \\\n self.selected_window().buffer()",
"def get_buffername(self):\n return self.__buffername",
"def buffer(self):\n return self._session.request('window_get_buffer', self)",
"def get_buffer(*args):\n return _cerevoice_aud.get_buffer(*args)",
"def get_buffer(self):\n return self.latex_buffer"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Track a code indentation index for successive utilization. | def TrackIfIndex(self, index):
self.indentindex.append(index) | [
"def increase_code_indent(self) -> None:\n self._parent_node.increase_code_indent()",
"def _increaseindentation(self):\n self._indentlist.append(self._curindent)\n if not self._equalsigns[-1]:\n self._curindent = self._curindent + self._indent",
"def addIndent(self):\r\n self._indents += 1",
"def getIndentationLevel(self, code_line):\n print(\"the code line : \", code_line)\n return len(code_line) - len(code_line.lstrip(\" \"))",
"def __doIndent(self):\n self.__incr( 'numBlocks' )\n self.blockDepth += 1\n self.metrics['blockDepth'] = self.blockDepth\n if self.metrics.get('maxBlockDepth',0) < self.blockDepth:\n self.metrics['maxBlockDepth'] = self.blockDepth\n return True",
"def addIndentationLevel(self, original_line, trace_call):\n # apply same level of indentation\n number_spaces = self.getIndentationLevel(original_line)\n print(\"step 3 spaces : \", number_spaces)\n \n # copy the original trace_call in the new_trace_call using\n # the correct number of spaces\n new_trace_call = []\n index_new_trace_call = 0\n for trace_line in trace_call:\n # calculate new size of the trace_line\n added_space_length = len(trace_line) + number_spaces\n # append spaces at the beginning of the line\n new_trace_call.append(trace_line.rjust(added_space_length)) \n index_new_trace_call = index_new_trace_call + 1\n return new_trace_call",
"def get_indent(self) -> int:\n return self._indent",
"def LineNumber(self) -> int:",
"def indent_level(self):\n return self._indent_level",
"def getLineNumber(self) -> int:\n ...",
"def tab(self):\n self.__num_tab_from_margin += 1\n self.__indent_str = self.__get_indent_string()",
"def indent(self, ws, doc, pos, indent_unit):\n context = CompletionContext(ws=ws, doc=doc, pos=pos)\n log.info(\"Line on cursor: %s\", context.line)\n indent_state = IndentState.detect(context.line, indent_unit)\n\n self._indent(context, indent_state)\n\n indentation = indent_state.indentation()\n insert_pos = pos\n return {\n \"indent\": indentation,\n \"textEdits\": [\n TextEdit(\n Range(start=insert_pos, end=insert_pos),\n new_text=f\"\\n{indentation}\",\n ).dump()\n ],\n }",
"def get_function_indent(line: str) -> int:\n first_function_entrance = line.index('def')\n indents = line[:first_function_entrance]\n indents_space_count = len(indents)\n return indents_space_count",
"def addIndents(self, prevLevel, nextLevel):\n for num in range(self.level - prevLevel):\n self.textLines[0] = u'<div>%s' % self.textLines[0]\n for num in range(self.level - nextLevel):\n self.textLines[-1] = u'%s</div>' % self.textLines[-1]\n return self.level",
"def is_indentation_sensitive(self) -> bool: #pylint: disable=no-self-use\n return False",
"def reindent_stats(tokens):\r\n find_stmt = 1 # next token begins a fresh stmt?\r\n level = 0 # current indent level\r\n stats = []\r\n\r\n for t in tokens:\r\n token_type = t[0]\r\n sline = t[2][0]\r\n line = t[4]\r\n\r\n if token_type == tokenize.NEWLINE:\r\n # A program statement, or ENDMARKER, will eventually follow,\r\n # after some (possibly empty) run of tokens of the form\r\n # (NL | COMMENT)* (INDENT | DEDENT+)?\r\n find_stmt = 1\r\n\r\n elif token_type == tokenize.INDENT:\r\n find_stmt = 1\r\n level += 1\r\n\r\n elif token_type == tokenize.DEDENT:\r\n find_stmt = 1\r\n level -= 1\r\n\r\n elif token_type == tokenize.COMMENT:\r\n if find_stmt:\r\n stats.append((sline, -1))\r\n # but we're still looking for a new stmt, so leave\r\n # find_stmt alone\r\n\r\n elif token_type == tokenize.NL:\r\n pass\r\n\r\n elif find_stmt:\r\n # This is the first \"real token\" following a NEWLINE, so it\r\n # must be the first token of the next program statement, or an\r\n # ENDMARKER.\r\n find_stmt = 0\r\n if line: # not endmarker\r\n stats.append((sline, level))\r\n\r\n return stats",
"def indentation(self, indent: str) -> None:\n self._indent = indent\n self._update()",
"def indent_level(self):\n return len(self._tagstack) - 1",
"def reset_line_numbers(self):\n # Because of code folding possibly advancing the numbering, have to\n # check the whole listing\n current_position = 1\n line_number = self.starting_line_number\n\n if len(self.lines) > 0 and isinstance(self.lines[0], FoldedCodeLine):\n line_number = self.starting_line_number + self.lines[0].size\n\n for line in self.lines:\n if not isinstance(line, FoldedCodeLine) and \\\n line.line_number != line_number:\n # changing the line number, trigger the hook\n line.line_number = line_number\n self.render_hook.line_changed(self, current_position, line)\n\n current_position += 1\n if isinstance(line, FoldedCodeLine):\n line_number += line.size\n else:\n line_number += 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pop (get and remove) the last code indentation index tracked. | def PopIfIndex(self):
return self.indentindex.pop() | [
"def _decreaseindentation(self):\n self._curindent = self._indentlist.pop()",
"def pop(self, i=0):\n return self.frame.stack.pop(-1-i)",
"def pop_scope(self):\n top = self.scope_stack[-1]\n self.scope_stack = self.scope_stack[:-1]\n return top",
"def pop_current_line(self):\n self.current_line.pop()",
"def indentation_level(self):\n return self._indentation_levels[-1]",
"def pop_scope(self):\n self.scope_stack = self.scope_stack[:-1]",
"def undo_pop(self):\n # print(\"tokundo\", file=sys.stderr)\n self.pos -= 1",
"def unindent(self):\n self.__indent.pop()",
"def DeIndentBuffer(self):\n if self.buffergrade == 0:\n raise Exception(\"You can't deindent more.\")\n self.buffergrade -= 1\n tmp = self.buffers[self.buffergrade + 1]\n del self.buffers[self.buffergrade + 1]\n return tmp",
"def indentation(self):\n return self.__indent[-1]",
"def scope_pop(self) -> None:\n self.scope_stack.popleft()",
"def pop(self, idx):\n return self.stack.pop(idx)",
"def pop(self, line_number: int = -1) -> Statement:\n\n pass",
"def pop(self):\n return self.namespace_stack.pop()",
"def pop_last(self):\n self.pop_item(-1)",
"def remove_level(self):\n last_level = self.pyramid.pop()\n self.levels = len(self.pyramid)\n return last_level",
"def removeIndent(self):\r\n if self._indents <= 0:\r\n raise Exception(\"Indentation can not be decreased to negative values\")\r\n self._indents -= 1",
"def pop(self):\n value = self.stack[-1]\n del self.stack[-1]\n return value",
"def dedent(self):\n self.indentation = self.indentation_stack.pop()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialization of protected Operation Object attribute for subclasses. | def __init__(self):
self._OPERATION = None | [
"def __init__(self, **kwargs):\n super(StoredObject, self).__init__()",
"def __init__(self):\n Calculation.__init__(self)\n\n self._calculation_process_type = BasicCalculationProcess",
"def __init__(self, operations = []):\n self.operations = operations",
"def __init__(self):\n self.__dict__ = self.__state",
"def _init(self,name,type_) :\n # If the object was mapped to something from the dataset, put the right\n # variable behind it.\n if name in self.ws()._mappings:\n name = self.ws()._mappings[name]\n\n # Get the right object from our own cache, KeyError is raised correctly.\n if name in self.ws()._objects :\n x = self.ws()._objects[name]\n elif name in self.ws() :\n x = self._addObject( self.ws()[name] )\n else :\n raise KeyError, 'P2VV - ERROR: RooObject._init(): object not found in work space (%s)' % name\n\n if not x.InheritsFrom(type_) :\n raise KeyError('%s is %s, not %s' % (name, x.ClassName(), type_))\n self._var = x\n self.ws()._rooobjects[x.GetName()] = self",
"def __init__(self):\n self.init(**self.get_init_storage())",
"def __init_subclass__(cls) -> None:\n super().__init_subclass__()\n dataclass(cls)",
"def __init__(self):\n super().__init__(interface.RemoteControl, DEFAULT_PRIORITIES)",
"def __init__(self, **kwargs):\n\n super(RefactoringOperation, self).__init__(**kwargs)",
"def __init__(self, operation, constargs, randomargs):\n Operation.__init__(self)\n self.operation = operation\n self.constargs = constargs\n self.randomargs = randomargs\n if type(operation) is str:\n import CCAugmentation.outputs as cca_out\n import CCAugmentation.transformations as cca_trans\n self.operation = eval(self._get_op_str())\n self.args = {'operation': self.operation.__name__, 'constargs': constargs, 'randomargs': randomargs}",
"def __init__(self, powerwall_data: PowerwallRuntimeData) -> None:\n super().__init__(powerwall_data)\n self._attr_unique_id = f\"{self.base_unique_id}_off_grid_operation\"",
"def _init_derived(self):\n DiffMapping._init_derived(self) # derived quantities of the mother class\n self._inverse = None",
"def __init__(self, **params):\n self.__object = object_param(**params)",
"def __init__(self, component, **kwargs):\n super(SingleOperation, self).__init__(**kwargs)\n self.component = component",
"def __init__(self):\n\t\tself._id = 0",
"def __init__(self):\n for name, value in self.get_class_attributes():\n setattr(self, name, deepcopy(value))",
"def __init__(self, base_attr, bitmask):\n\n self.base_attr = base_attr\n self.bitmask = bitmask",
"def __init__(self, op, op_param_list, op_reg_list):\n self. operation = {\n 'op': op,\n 'op_param_list': op_param_list,\n 'op_reg_list': op_reg_list\n }",
"def __init__(self):\n self.metadata_dict = {}\n self.set_nodata_val(None)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the Operation Object generated by the command. | def getOp(self):
return self._OPERATION | [
"def get_operation(self) -> Operation:\n\n if self.operation_type == 'model':\n operation = Model(operation_type=self.operation_name)\n elif self.operation_type == 'data_operation':\n operation = DataOperation(operation_type=self.operation_name)\n else:\n raise ValueError(f'Operation type {self.operation_type} is not supported')\n\n return operation",
"def get_operation(self):\n\n operation = OrderedDict(\n tags=self.parser.get_tags(),\n summary=self.parser.get_summary(),\n description=self.parser.get_description(),\n parameters=self.parameters,\n produces=None,\n consumes=None,\n responses=self.responses,\n security=self.security\n )\n\n for key, value in list(operation.items()):\n # Remove empty keys\n if not value:\n operation.pop(key)\n\n return operation",
"def get_operation_obect(self, method):\n pass",
"def get_operation(\n self,\n ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_operation\" not in self._stubs:\n self._stubs[\"get_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/GetOperation\",\n request_serializer=operations_pb2.GetOperationRequest.SerializeToString,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"get_operation\"]",
"def operation(self, name):\n\n try:\n return self.operations[name]\n except KeyError:\n return self.operation_not_found(name)",
"def GetCurrent(cls):\r\n current = OpContext.current()\r\n if current is not None and current.executing_op is not None:\r\n return current.executing_op\r\n return Operation()",
"def get_operation(cls, operation: BinaryOperation) -> Optional[BinaryOperation]:\n for id, op in vars(cls).items():\n if type(operation) == type(op):\n return op",
"def getOperation(self):\n return _libsbml.FluxBound_getOperation(self)",
"def operation_handle(self):\n return self.args[1]",
"def return_operation(self):\n return self._return_operation",
"def get_command(self):\n req_type = type(self.req)\n\n if req_type == ureq.CreateEntryRequest:\n return commands.CreateCommand(self.req.results)\n elif req_type == ureq.ReadEntryRequest:\n return commands.ReadCommand(self.req.results)\n elif req_type == ureq.UpdateEntryRequest:\n return commands.UpdateCommand(self.req.results)\n elif req_type == ureq.DeleteEntryRequest:\n return commands.DeleteCommand(self.req.results)",
"def deserialize(cls, payload):\n return operations_pb2.Operation.FromString(payload)",
"def operation_type(self):\n return self._operation_type",
"def operation_name(self):\n return self.args[0]",
"def to_command(self):\n return self.cmd",
"def GetOperation(name):\n client = GetClientInstance()\n messages = client.MESSAGES_MODULE\n request = messages.ApikeysOperationsGetRequest(name=name)\n try:\n return client.operations.Get(request)\n except (apitools_exceptions.HttpForbiddenError,\n apitools_exceptions.HttpNotFoundError) as e:\n exceptions.ReraiseError(e, exceptions.OperationErrorException)",
"def get_command(self):\n return self.command",
"def current_operation(self):\n if self._supported_features & SUPPORT_OPERATION_MODE:\n try:\n operation = OperationMode(self.ampio.get_item_state(*self.config[CONF_OPERATION_MODE_ITEM])).name\n if self.is_heating:\n operation = \"{}(Heating)\".format(operation)\n return operation\n except (TypeError, KeyError, ValueError):\n return None\n else:\n return None",
"def get_operations(self):\n raise NotImplementedError(\n 'operation get_operations(...) not yet implemented')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a temporary image for manipulation, and handles optional RGB conversion. | def _create_tmp_image(self, content):
content.seek(0)
image = Image.open(content)
if self.force_rgb and image.mode not in ('L', 'RGB', 'RGBA'):
image = image.convert('RGB')
return image | [
"def temporary_image(self):\n\n image = Image.new('RGB', (1, 1))\n tmp_file = tempfile.NamedTemporaryFile(suffix='.jpg')\n image.save(tmp_file, 'jpeg')\n # important because after save(),\n # the fp is already at the end of the file\n tmp_file.seek(0) # retrieves the created temp file\n return tmp_file",
"def _create_img() -> ImageObject:\n return ImageObject(IMG_DEFAULT_SRC)",
"def new_image(x, y, out, data):\n img = Image.new('RGB', (x, y))\n img.putdata(data)\n img.save(out)",
"def prepare_image(im_non_numpy, target=None):\n # if the image mode is not RGB, convert it\n if im_non_numpy.mode != \"RGB\":\n im_non_numpy = im_non_numpy.convert(\"RGB\")\n\n if target:\n # resize the input image and preprocess it\n im_non_numpy = im_non_numpy.resize(target)\n\n im = np.array(im_non_numpy)\n return im",
"def new_test_image():\n warnings.warn(DeprecationWarning(\n \"new_test_image() is deprecated in favour of the get_sample_image() \"\n \"context manager.\"), stacklevel=2)\n image_name = 'test-{}.png'.format(uuid.uuid4())\n image = Image.new('RGBA', size=(50, 50), color=(256, 0, 0))\n ImageDraw.Draw(image)\n byte_io = BytesIO()\n image.save(byte_io, 'png')\n byte_io.seek(0)\n return image_name, ContentFile(byte_io.read(), image_name)",
"def create_temporary_image(image):\n\n temp = tempfile.NamedTemporaryFile()\n temp.write(image)\n temp.seek(0)\n\n return temp",
"def asRGB(self):\r\n\r\n width, height, pixels, meta = self.asDirect()\r\n if meta['alpha']:\r\n raise Error(\"will not convert image with alpha channel to RGB\")\r\n if not meta['greyscale']:\r\n return width, height, pixels, meta\r\n meta['greyscale'] = False\r\n typecode = 'BH'[meta['bitdepth'] > 8]\r\n\r\n def iterrgb():\r\n for row in pixels:\r\n a = array(typecode, [0]) * 3 * width\r\n for i in range(3):\r\n a[i::3] = row\r\n yield a\r\n return width, height, iterrgb(), meta",
"def genrandimg(args) -> None:\n\n size = (int(args.x), int(args.y))\n fp = Image.new(\"RGB\", size)\n data = []\n\n if not args.c: # If color\n for i in range(size[0]*size[1]):\n r = random.choice([0x00, 0xff])\n data.append((r, r, r)) # Each RGB value is the same random value\n else: # Else black-and-white\n for i in range(size[0]*size[1]):\n r = [random.choice(range(0, 256)) for _ in range(0, 3)]\n r = (r[0], r[1], r[2]) # Choose 3 random numbers for different RGB values\n data.append(r)\n\n fp.putdata(data)\n print(\"Saving to %s...\" % args.o)\n fp.save(args.o)\n fp.close()",
"def convert_image_to_rgb(self):\n self.image = self.image.convert('RGB')",
"def recreate_image(im_as_var):\n recreated_im = im_as_var.data.numpy()[0]\n recreated_im[recreated_im > 1] = 1\n recreated_im[recreated_im < 0] = 0\n # recreated_im = np.round(recreated_im * 255)\n return recreated_im",
"def test_write_lossless_rgb(tmp_path):\n\n temp_file = str(tmp_path / \"temp.webp\")\n # temp_file = \"temp.webp\"\n\n pil_image = hopper(\"RGBA\")\n\n mask = Image.new(\"RGBA\", (64, 64), (128, 128, 128, 128))\n # Add some partially transparent bits:\n pil_image.paste(mask, (0, 0), mask)\n\n pil_image.save(temp_file, lossless=True)\n\n with Image.open(temp_file) as image:\n image.load()\n\n assert image.mode == \"RGBA\"\n assert image.size == pil_image.size\n assert image.format == \"WEBP\"\n image.load()\n image.getdata()\n\n assert_image_equal(image, pil_image)",
"def convert_to_rgb(image):\n if image.mode != \"RGB\":\n image = image.convert(\"RGB\")\n return image",
"def test_no_rgb_colorspace(self):\n user = UserFactory.create()\n file_path = os.path.join(os.path.dirname(__file__), \"broken_colorspace.gif\")\n self._upload_photo(user, file_path)",
"def red_filter(img):\r\n #with Image.open(filename) as img:\r\n w = img.width\r\n h = img.height\r\n\r\n newimg = Image.new('RGB', (w,h))\r\n for y in range(h):\r\n for x in range(w):\r\n r, g, b = img.getpixel((x,y))\r\n \r\n newimg.putpixel((x, y), (r, 0, 0))\r\n \r\n return newimg",
"def generate_dest_image(self):\n self.dest_image = np.zeros((self.dest_height, self.dest_width, 3))\n for h_index in xrange(self.dest_height):\n for w_index in xrange(self.dest_width):\n b, g, r, c = self.current_total_rgb_counter[h_index, w_index]\n if c != 0:\n b, g, r = map(\n lambda x: int(x/c),\n [b, g, r]\n )\n self.dest_image[h_index, w_index] = (b, g, r)",
"def _tiny_png(outfile,width=4,height=4,\n bg_color=RGB_COLORS['white'],\n fg_color=RGB_COLORS['blue']):\n img = Image.new('RGB',(width,height),bg_color)\n pixels = img.load()\n for i in range(int(width/2)):\n for j in range(int(height/2)):\n pixels[i,j] = fg_color\n for i in range(int(width/2),width):\n for j in range(int(height/2),height):\n pixels[i,j] = fg_color\n fp,tmp_plot = tempfile.mkstemp(\".tiny.png\")\n img.save(tmp_plot)\n os.fdopen(fp).close()\n shutil.move(tmp_plot,outfile)\n return outfile",
"def makecolorimage(self):\n if (self.stampsize == self.samplesize == 0) and self.testfirst:\n # Already did the full image!\n print('Full size image already made.')\n imfile = self.testimages[-1]\n outfile = join(self.outdir, self.outfile)\n if self.deletetests:\n print('Renaming to', outfile)\n os.rename(imfile, outfile)\n else:\n print('Copying to', outfile)\n os.copy(imfile, outfile)\n imfull = Image.open(outfile)\n return imfull\n \n # Clean up: Delete test images\n if self.deletetests:\n for testimage in self.testimages:\n if exists(testimage):\n os.remove(testimage)\n\n dx = dy = self.stampsize\n if dx * dy == 0:\n dx = dy = self.maxstampsize\n \n imfull = Image.new(self.mode, (self.nx, self.ny))\n\n print\n if self.mode == 'RGB':\n print('Making full color image, one stamp (section) at a time...')\n elif self.mode == 'L':\n print('Making full grayscale image, one stamp (section) at a time...')\n #for yo in range(0,self.ny,dy):\n #dy1 = min([dy, self.ny-yo])\n #for xo in range(0,self.nx,dx):\n #dx1 = min([dx, self.nx-xo])\n for yo in range(self.ylo,self.yhi,dy):\n dy1 = min([dy, self.yhi-yo])\n for xo in range(self.xlo,self.xhi,dx):\n dx1 = min([dx, self.xhi-xo])\n print('%5d, %5d / (%d x %d)' % (xo, yo, self.nx, self.ny))\n #print dx, dy, self.nx, self.ny, dx1, dy1\n #stamps = self.dataRGB[:,yo:yo+dy,xo:xo+dx]\n limits = yo, yo+dy, xo, xo+dx\n stamps = self.loadstamps(limits)\n im = RGBscale2im(stamps, self.levdict, self.noiselums, self.colorsatfac, self.mode, self.invert)\n if self.show and self.showstamps:\n im.show()\n\n #print array(stamps).shape, im.size, xo,self.ny-yo-dy1,xo+dx1,self.ny-yo\n imfull.paste(im, (xo,self.ny-yo-dy1,xo+dx1,self.ny-yo))\n\n #outfile = outname+'.png'\n outfile = join(self.outdir, self.outfile)\n if self.legend:\n self.addlegend(im=imfull)\n else:\n print('Saving', outfile, '...')\n imfull.save(outfile)\n \n #imfull.save(root+'.jpg')\n\n if self.show:\n self.showimage(outfile, Image)\n \n return imfull",
"def __make_png(self, abspath_img_rgb):\n if not os.path.exists(DIR_PNG):\n os.makedirs(DIR_PNG)\n\n outsize = '{}%'.format(OUTSIZE_RGB)\n img_name_rgb = os.path.basename(abspath_img_rgb)\n suffix_extension_tif = Utils.get_suffix_tif(img_name_rgb)\n img_png = img_name_rgb.replace(suffix_extension_tif, '.png')\n path_img_png = os.path.join(DIR_PNG, img_png)\n\n command = \"gdal_translate -ot byte -of PNG -outsize {} {} \" \\\n \"-a_nodata 0 -q {} {}\".format(\n outsize, outsize, abspath_img_rgb, path_img_png\n )\n os.system(command)\n return os.path.join(DIR_PNG_TO_DB, img_png)",
"def test_fromarray_rgb_fail():\n arr = numpy.zeros((20, 10, 3), dtype='float')\n\n parameters = {'data': [arr]}\n\n images.fromarray(parameters).convert('RGB')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Renders the image. Override this method when creating a custom renderer. | def _render(self, image):
raise NotImplementedError('Override this method to render images!') | [
"def render_canvas(self):\r\n self._display.image(self._image)\r\n self._display.display()",
"def render(self):\n self.dirty = True\n self.image.fill(self.fill_color)\n if len(self.text):\n while self.font.size(self.text)[0]> self.pixel_width:\n self.backspace_char()\n self.text_img = self.font.render(self.text, 1, self.text_color, \\\n self.fill_color)\n self.image.blit(self.text_img, (2,2))\n xpos = self.font.size(self.text[:self.caretpos])[0]+2\n pygame.draw.line(self.image, (255, 255, 255, 255), (xpos, 2), \\\n (xpos, self.image.get_height()-2), 1)\n else:\n pygame.draw.line(self.image, (255, 255, 255), (3, 2), \\\n (3, self.image.get_height()-2), 1)\n pygame.draw.rect(self.image, (100, 100, 100), self.text_field_rect , 2)",
"def render(self, surface):\r\n surface.blit(self._image, self._rect)",
"def draw_image(self, ax, image):\r\n self.renderer.draw_image(imdata=utils.image_to_base64(image),\r\n extent=image.get_extent(),\r\n coordinates=\"data\",\r\n style={\"alpha\": image.get_alpha(),\r\n \"zorder\": image.get_zorder()},\r\n mplobj=image)",
"def RenderImage(self):\n if self.node != None:\n eval_info = EvalInfo(self.node)\n image = eval_info.node.EvaluateNode(eval_info)\n return image",
"def render(self, *args, **kwargs):\n self._renderer.render(*args, **kwargs)",
"def draw(self):\n self.write_image()\n self.update()",
"def prep_image(self):\n self.image = self.font.render(self.text, True, self.text_color, self.bg_color)\n self.image_rect = self.image.get_rect()",
"def draw(self, surface):\r\n surface.blit(self.image, self.rect)",
"def render_emotion(self):\n\n check_rendering_requirements()\n\n import matplotlib.pyplot as plt\n import numpy as np\n\n arr = np.asarray(self.content, dtype=np.uint8)\n img = self._renderResultOnImage(self.raw_result, arr)\n ig, ax = plt.subplots(figsize=(15, 20))\n ax.imshow(img)\n plt.show()",
"def render(self, mode='human'):\n self.rendering_mode = mode\n\n if self.viewer is None:\n self.viewer = EnvViewer(self, offscreen=self.offscreen)\n\n self.enable_auto_render = not self.offscreen\n\n # If the frame has already been rendered, do nothing\n if self.should_update_rendering:\n self.viewer.display()\n\n if mode == 'rgb_array':\n image = self.viewer.get_image()\n if not self.viewer.offscreen:\n self.viewer.handle_events()\n self.viewer.handle_events()\n return image\n elif mode == 'human':\n if not self.viewer.offscreen:\n self.viewer.handle_events()\n self.should_update_rendering = False",
"def render(self):\n dirty_rects = self.all_sprites.draw(self.screen)\n pg.display.update(dirty_rects)",
"def render_image(self, rgbobj, dst_x, dst_y):\n self.logger.debug(\"redraw pixmap=%s\" % (self.pixmap))\n if self.pixmap is None:\n return\n self.logger.debug(\"drawing to pixmap\")\n\n # Prepare array for rendering\n arr = rgbobj.get_array(self.rgb_order, dtype=np.uint8)\n (height, width) = arr.shape[:2]\n\n return self._render_offscreen(self.pixmap, arr, dst_x, dst_y,\n width, height)",
"def display(self, canvas, x, y, width, height):\n canvas.create_image(x, y, image=img_agent, anchor=NW)",
"def _render_callback(self):\n pass",
"def draw_image(self):\n self.PDF.saveState()\n self.PDF.scale(1, -1)\n # self.PDF.drawImage(\n # LOGO, 490, -78, width=80, preserveAspectRatio=True, mask=\"auto\"\n # )\n self.PDF.restoreState()",
"def draw_image_processing(self, screen):\n screen.blit(self.get_image_processing(), self.get_image_processing_rect())",
"def showRGBImage( self ):\n logger.debug( f\"showRGBImage()\" )\n\n self._vtkArr = numpy_to_vtk( np.flip( self._rgbImage.swapaxes( 0, 1 ), axis = 1 ).reshape( (-1, 3), order = \"F\" ) )\n\n self._image.SetDimensions( self._yLen, self._xLen, 1 )\n self._image.GetPointData().SetScalars( self._vtkArr )\n\n self._scaledImage.SetInputData( self._image )\n self._scaledImage.SetAxisMagnificationFactor( 0, 0.5 )\n self._scaledImage.SetAxisMagnificationFactor( 1, 0.5 )\n self._scaledImage.Update()\n\n self._imageMapper.SetInputData( self._scaledImage.GetOutput() )\n\n self._renderWindow.Render()",
"def display(self):\n image_qt = ImageQt.ImageQt(self.view_state.get_image())\n self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(image_qt))\n self.imageLabel.adjustSize()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Normalize, pad and batch the input images. | def preprocess_image(self, batched_inputs):
images = [x.to(self.device) for x in batched_inputs]
norms = [self.normalizer(x) for x in images]
size = (norms[0].shape[1],norms[0].shape[2])
images = ImageList.from_tensors(norms, self.backbone.size_divisibility)
return images, size | [
"def normalize_images(data, blend_cat, Args):\n im = data['X_train']['blend_image']\n std = np.std(im)\n mean = np.mean(im)\n data['X_train']['blend_image'] = (im - mean) / std\n data['X_val']['blend_image'] = (data['X_val']['blend_image'] - mean) / std\n data['X_train'] = normalize_other_inputs(data['X_train'], Args)\n data['X_val'] = normalize_other_inputs(data['X_val'], Args)\n for key in data['Y_train'].keys():\n data['Y_train'][key] = (data['Y_train'][key] - mean) / std\n data['Y_val'][key] = (data['Y_val'][key] - mean) / std\n blend_cat['std'] = std\n blend_cat['mean'] = mean\n return data",
"def preprocess_image(self, batched_inputs, opt = ''):\n images = batched_inputs[\"images\"].to(self.device)\n # images = batched_inputs\n images.sub_(self.pixel_mean).div_(self.pixel_std)\n return images",
"def apply_preprocess(self, img, batch_size=10):\n self._setup()\n\n # Loop to save RAM\n index = np.arange(img.shape[0])\n new_img = []\n for index in range(0, img.shape[0], batch_size):\n batch = img[index:min(index + batch_size, img.shape[0]), ::]\n processed = self._pre_process.augment_images(batch)\n new_img.append(processed)\n\n return np.concatenate(new_img, axis=0)",
"def dimension_preprocess(self, img, padding=True):\n \n # Assert single image input\n assert len(img.shape) == 3, \"Image dimension expected to be (H, W, C)\"\n \n # Check if we are adding padding for too small images\n if padding:\n \n # Check if height is too small\n if img.shape[0] < self.rows:\n padding = np.ones((self.rows - img.shape[0], img.shape[1], img.shape[2]))\n img = np.concatenate((img, padding), axis=0)\n \n # Check if width is too small\n if img.shape[1] < self.cols:\n padding = np.ones((img.shape[0], self.cols - img.shape[1], img.shape[2]))\n img = np.concatenate((img, padding), axis=1)\n \n # Get chunking of the image\n x_chunks, y_chunks = self.get_chunks(img)\n \n # Chunk up the image\n images = []\n for x in x_chunks:\n for y in y_chunks:\n images.append(\n img[x[0]:x[1], y[0]:y[1], :]\n )\n images = np.array(images) \n return images",
"def process_batch(self, inputs):\n\n for key, ipt in inputs.items():\n if key != 'target_folder':\n \n inputs[key] = ipt.to(self.device)\n\n if self.opt.pose_model_type == \"shared\":\n # If we are using a shared encoder for both depth and pose (as advocated\n # in monodepthv1), then all images are fed separately through the depth encoder.\n all_color_aug = torch.cat([inputs[(\"color_aug\", i, 0)] for i in self.opt.frame_ids])\n all_features = self.models[\"encoder\"](all_color_aug)\n all_features = [torch.split(f, self.opt.batch_size) for f in all_features]\n\n features = {}\n for i, k in enumerate(self.opt.frame_ids):\n features[k] = [f[i] for f in all_features]\n\n outputs = self.models[\"depth\"](features[0])\n else:\n # Otherwise, we only feed the image with frame_id 0 through the depth encoder\n features = self.models[\"encoder\"](inputs[\"color_aug\", 0, 0])\n outputs = self.models[\"depth\"](features)\n # print('primary output',outputs)\n\n if self.opt.predictive_mask:\n outputs[\"predictive_mask\"] = self.models[\"predictive_mask\"](features)\n\n if self.use_pose_net:\n outputs.update(self.predict_poses(inputs, features))\n\n self.generate_images_pred(inputs, outputs)\n losses = self.compute_losses(inputs, outputs)\n\n #also here are not backpropagated\n # experiment.log_metric('loss per batch', losses[\"loss\"].cpu().detach().numpy())\n #print(outputs)\n #print('begin////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////')\n # print(\"this is the output\", outputs)\n #for key, value in outputs.items():\n # print('key',key, \"value size\",value.size())\n # if key == \"('sample', -1, 0)\":\n # print(\"dict key\",value.size())\n #print('end/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////')\n return outputs, losses",
"def normalization(imgs):\n\n imgs = np.asarray(imgs).astype(np.float32)\n imgs = np.expand_dims(imgs / 255, axis=-1)\n return imgs",
"def update_image_sizes(self, data):\n \n # Iterate back over the images in the training dataset and clip them if needed\n for inp, out in data:\n\n # Grab the municipality ID from the image name and load it\n muni_id = inp[0].split(\"/\")[3]\n \n # This will do the iterative clipping to the correct size\n cur_image = self.prep_input(inp)\n most_recent_im_size = (cur_image.shape[2], cur_image.shape[3])\n \n # Here, check to see if the image size is already less than or equal to the convergence dimensions,\n # If it is, skip the clipping and move on to the next input\n if (most_recent_im_size[0] <= self.convergence_dims[0]) or (most_recent_im_size[1] <= self.convergence_dims[1]):\n self.go_dict[muni_id] = 1\n if self.v:\n print(\"Image is already at scale, skipping clip.\")\n continue \n \n # If it's not the first epoch...\n if self.epoch > 0:\n \n self.model.eval()\n \n # Get the size of the image\n image_size = (cur_image.shape[2], cur_image.shape[3])\n \n # Get the gradcam and the attention heatmap for the current image\n gradcam, attn_heatmap = get_gradcam(self.model, image_size, cur_image.cuda(), target_layer = self.model.sa) \n \n # Then clip the input to the attention-based area\n cur_image, new_dims = self.clip_input(cur_image, attn_heatmap)\n \n if self.v:\n print(\"\\n\")\n print(muni_id)\n print(\"old image size: \", image_size)\n print(\"new image size: \", cur_image.shape[2], cur_image.shape[3])\n \n cur_image.cpu()\n \n # Update the image sizes in the dictionary\n self.image_sizes[muni_id].append(new_dims)\n \n if self.plot:\n plot_gradcam(gradcam)\n plt.savefig(f\"epoch{self.epoch}_muni{muni_id}_gradcam.png\")",
"def preprocess_images(input_dir, output_dir, suffix='full'):\n if not os.path.isdir(input_dir):\n raise FileNotFoundError(f'Directory {input_dir} does not exist')\n\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n trans = get_by_suffix(suffix)\n\n for item in tqdm(os.listdir(input_dir)):\n hr = cv2.imread(os.path.join(input_dir, item))\n\n hr = ia.Resize({\"height\": 512, \"width\": 512}).augment_image(hr)\n lr = trans.augment_image(hr)\n img = np.concatenate((lr, hr), axis=0)\n cv2.imwrite(os.path.join(output_dir, item), img)",
"def batch_normalization(inputs, training=False, **kwargs):\n bn_layer = batch_norm_class(training)(**kwargs)\n return bn_layer(inputs, training=training)",
"def space_to_batch(images, labels, tiles, n_tiles, paddings_image, paddings_tiles, shape_padded_image, shape_padded_label, shape_input, shape_output, b_with_labels=False, b_verbose=False):\n\n # map parse function to each zipped element\n print(paddings_tiles, shape_padded_label, shape_output)\n assert any([a % b <= 0 for a, b in zip(shape_padded_label, shape_output)])\n\n paddings_both = [a + b for a, b in zip(paddings_image, paddings_tiles)]\n shape_padded_both = [a + 2 * b for a, b in zip(shape_padded_image, paddings_tiles)]\n scale_factor = [float(a/b) for a, b in zip(shape_padded_both, shape_padded_image)]\n\n paddings_labels = [(x, x) for x in paddings_tiles] + [(0, 0)]\n paddings_both = [(x, x) for x in paddings_both] + [(0, 0)]\n\n if b_verbose:\n print('Padding/ padding_img: ', paddings_labels, paddings_both, scale_factor)\n logging.info('Using %d patches to predict a whole image', n_tiles)\n\n # process labels into patches\n if b_with_labels:\n # print('labels prior: ', labels)\n labels = tf.pad(labels, paddings_labels)\n labels = tf.expand_dims(labels, axis=0)\n batch_shape = tf.stack([n_tiles, *shape_output, tf.shape(labels)[-1]])\n labels = tf.reshape(labels, batch_shape)\n # print('labels post: ', labels)\n\n # process images into patches\n # Note: a simple reshape is not possible due to the overlapping of inputs\n # map_fn or tf while_loops or sth similar might help\n images = tf.pad(images, paddings_both)\n if b_verbose:\n images = tf.Print(images, [tf.shape(images), tiles], 'Temporary patch shape - before: ', summarize=5)\n\n patches = [None for _ in range(n_tiles)]\n # patch_indices = list(range(n_tiles))\n positions = [None for _ in range(n_tiles)]\n offset_image = [int(x / 2) for x in shape_input]\n idx_tile = 0\n for idx_0 in range(tiles[0]):\n for idx_1 in range(tiles[1]):\n for idx_2 in range(tiles[2]):\n start_pos = [shape_output[0] * idx_0, shape_output[1] * idx_1, shape_output[2] * idx_2, 0]\n positions[idx_tile] = [float(a + b) for a, b in zip(start_pos[0:3], offset_image)]\n patches[idx_tile] = tf.slice(images, start_pos, shape_input + [tf.shape(images)[-1]])\n idx_tile += 1\n # images = tf.Print(images, [tf.shape(images), idx_0, idx_1, idx_2, start_pos], 'performed crop at: ')\n\n if b_verbose:\n patches[0] = tf.Print(patches[0], [tf.shape(patches[0])], 'Temporary patch shape - within: ', summarize=5)\n images = tf.stack(patches, axis=0)\n\n positions_t = tf.stack(positions, axis=0)\n positions_t = tf.cast(tf.multiply((tf.divide(positions_t, shape_padded_both) - 0.5) * 2, scale_factor), dtype=tf.float32) # rescale it | account for larger padded size\n if b_verbose:\n images = tf.Print(images, [tf.shape(images)], 'Temporary patch shape - after: ', summarize=5)\n\n return images, labels, positions_t",
"def _pad_img(self, results):\n pad_val = self.pad_val.get('img', 0)\n for key in results.get('img_fields', ['img']):\n if self.pad_to_square:\n max_size = max(results[key].shape[:2])\n self.size = (max_size, max_size)\n if self.size is not None:\n padded_img = general_ocr.impad(\n results[key], shape=self.size, pad_val=pad_val)\n elif self.size_divisor is not None:\n padded_img = general_ocr.impad_to_multiple(\n results[key], self.size_divisor, pad_val=pad_val)\n results[key] = padded_img\n results['pad_shape'] = padded_img.shape\n results['pad_fixed_size'] = self.size\n results['pad_size_divisor'] = self.size_divisor",
"def _pad_img(self, results):\n if self.size is not None:\n padded_img = self.impad(\n results['img'], shape=self.size, pad_val=self.pad_val)\n elif self.size_divisor is not None:\n pad_h = int(np.ceil(results['img'].shape[0] / self.size_divisor)) * self.size_divisor\n pad_w = int(np.ceil(results['img'].shape[1] / self.size_divisor)) * self.size_divisor\n pad_d = int(np.ceil(results['img'].shape[2] / self.size_divisor)) * self.size_divisor\n padded_img = self.impad(\n results['img'], shape=(pad_h, pad_w, pad_d), pad_val=self.pad_val)\n results['img'] = padded_img\n results['pad_shape'] = padded_img.shape\n results['pad_fixed_size'] = self.size\n results['pad_size_divisor'] = self.size_divisor",
"def dimension_postprocess(self, chunked_data, original_data, scale=1, padding=True):\r\n\r\n assert len(original_data.shape) == 2, \"data dimension expected to be (xline ,samp_point)\"\r\n assert len(chunked_data.shape) == 3, \"Chunked data dimension expected to be (batch_size, xline, samp_point)\"\r\n\r\n if padding:\r\n if original_data.shape[0] < self.rows:\r\n new_images = []\r\n for data in chunked_data:\r\n new_images.append(data[0:scale * original_data.shape[0], :])\r\n chunked_data = np.array(new_images)\r\n\r\n if original_data.shape[1] < self.cols:\r\n new_images = []\r\n for data in chunked_data:\r\n new_images.append(data[:, 0:scale * original_data.shape[1]])\r\n chunked_data = np.array(new_images)\r\n\r\n new_shape = (\r\n original_data.shape[0] * scale,\r\n original_data.shape[1] * scale\r\n )\r\n reconstruction = np.zeros(new_shape)\r\n x_chunks, y_chunks = self.get_chunks(original_data)\r\n\r\n i = 0\r\n s = scale\r\n for x in x_chunks:\r\n for y in y_chunks:\r\n prior_fill = reconstruction != 0\r\n chunk = np.zeros(new_shape)\r\n chunk[x[0] * s:x[1] * s, y[0] * s:y[1] * s] += chunked_data[i]\r\n chunk_fill = chunk != 0\r\n reconstruction += chunk\r\n reconstruction[prior_fill & chunk_fill] = reconstruction[prior_fill & chunk_fill] / 2\r\n i += 1\r\n return reconstruction",
"def _pre_process_images(images, details):\n # If the images are gray-scale, the number of channels (1) must be \"added\" to the size of the samples.\n if details['channels'] == 1:\n img_rows, img_cols = details['sample size']\n\n # The place of the dimension with 1 depends on the backend used by Keras.\n if K.image_data_format() == 'channels_first':\n images = images.reshape(images.shape[0], 1, img_rows, img_cols)\n else:\n images = images.reshape(images.shape[0], img_rows, img_cols, 1)\n\n # Normalize pixel values to be in the interval [0, 1]\n images = images.astype('float32')\n max_bit_value = 2 ** details['bits per sample'] - 1\n images /= max_bit_value\n return images",
"def preprocess_images():\n whale_list = handle_input()\n # Remove augment folder if it exists\n if os.path.exists(\"augment\"):\n shutil.rmtree(\"augment\")\n os.makedirs(\"augment\") # Create new augmentation folder\n # For all whales check if there is enough images, if not create extra\n for w in whale_list[:40]: # TODO: remove 10\n tmp = []\n n = 0\n # If there is not enough images\n while w.n + len(tmp) < WHALE_NUM:\n img = pil_image.open(random.choice(w.flist)) # Open a random existing image\n img = aug.random_augment(img) # Randomly augment this image\n im_name = os.path.join(\"augment\", f\"{w.id}_{n}.jpg\") # Create new filename\n aug.save_image(img, im_name) # Save the image\n tmp.append(im_name) # Append file name\n n += 1\n # Add new images to WhaleData object\n for n in tmp:\n w.add_image(n)\n # Write existing + augmented images per whale to file\n with open(\"train_augment.csv\", \"w\") as f:\n for w in whale_list[:40]:\n f.write(w.output_line() + \"\\n\")",
"def preprocess_images(self, images):\n raise NotImplementedError",
"def main():\n\n # Just grab all files - we'll use try/except to filter\n images = glob.glob(os.path.join(args.input_dir, '*.*'))\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n for img_file in images:\n print(img_file)\n try:\n np_img = plt.imread(img_file)\n print(np_img.shape)\n img_name = img_file.split(os.sep)[-1]\n new_img_file = os.path.join(args.output_dir, img_name)\n pad_image(np_img, new_img_file)\n except Exception as e:\n print('Warning: {}. Skpping file.'.format(e))\n continue",
"def preprocess(self, inputs, color_aug):\n for k in list(inputs):\n if 'color' in k:\n n, im, i = k\n for i in range(self.num_scales):\n inputs[(n, im, i)] = self.resize[i](inputs[(n, im, i - 1)])\n elif 'instance_mask_x' in k or 'instance_mask_y' in k:\n n, im, i = k\n inputs[(n, im, 0)] = self.resize_mask(inputs[(n, im, -1)])\n\n for k in list(inputs):\n f = inputs[k]\n if \"color\" in k:\n n, im, i = k\n inputs[(n, im, i)] = self.to_tensor(f)\n inputs[(n + \"_aug\", im, i)] = self.to_tensor(color_aug(f))\n if 'instance_mask_x' in k or 'instance_mask_y' in k:\n n, im, i = k\n inputs[(n, im, i)] = self.to_tensor(f)",
"def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that apigateway v1 and apigateway v2 actions are both present in the ses namespace | def test_services_with_multiple_pages_apigateway(self):
# API Gateway Management V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonapigatewaymanagement.html
self.assertTrue("apigateway:AddCertificateToDomain" in self.all_actions)
self.assertTrue("apigateway:RemoveCertificateFromDomain" in self.all_actions)
self.assertTrue("apigateway:SetWebACL" in self.all_actions)
# API Gateway Management V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonapigatewaymanagement.html
# API Gateway V2 doesn't have any unique actions in but it does have some unique resource types. Let's make sure those resource types are in the IAM Definition.
# Resource types unique to API Gateway V2:
resource_types = get_arn_types_for_service("apigateway")
resource_types = list(resource_types.keys())
self.assertTrue("AccessLogSettings" in resource_types)
# Resource types unique to API Gateway V1:
self.assertTrue("RestApi" in resource_types) | [
"def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\n self.assertTrue(lb_v1_only_action in results)\n self.assertTrue(lb_v2_only_action in results)",
"def test_services_with_multiple_pages_ses(self):\n # SES V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html\n self.assertTrue(\"ses:PutIdentityPolicy\" in self.all_actions)\n # SES V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonsimpleemailservicev2.html\n self.assertTrue(\"ses:ListImportJobs\" in self.all_actions)\n\n results = get_actions_for_service(\"ses\")\n actions = [\n \"ses:CloneReceiptRuleSet\",\n \"ses:CreateConfigurationSetTrackingOptions\",\n \"ses:CreateReceiptFilter\",\n \"ses:CreateReceiptRule\",\n \"ses:CreateReceiptRuleSet\",\n \"ses:CreateTemplate\",\n \"ses:DeleteConfigurationSetTrackingOptions\",\n \"ses:DeleteIdentity\",\n \"ses:DeleteIdentityPolicy\",\n \"ses:DeleteReceiptFilter\",\n \"ses:DeleteReceiptRule\",\n \"ses:DeleteReceiptRuleSet\",\n \"ses:DeleteTemplate\",\n \"ses:DeleteVerifiedEmailAddress\",\n \"ses:DescribeActiveReceiptRuleSet\",\n \"ses:DescribeConfigurationSet\",\n \"ses:DescribeReceiptRule\",\n \"ses:DescribeReceiptRuleSet\",\n \"ses:GetAccountSendingEnabled\",\n \"ses:GetIdentityDkimAttributes\",\n \"ses:GetIdentityMailFromDomainAttributes\",\n \"ses:GetIdentityNotificationAttributes\",\n \"ses:GetIdentityPolicies\",\n \"ses:GetIdentityVerificationAttributes\",\n \"ses:GetSendQuota\",\n \"ses:GetSendStatistics\",\n \"ses:GetTemplate\",\n \"ses:ListIdentities\",\n \"ses:ListIdentityPolicies\",\n \"ses:ListReceiptFilters\",\n \"ses:ListReceiptRuleSets\",\n \"ses:ListTemplates\",\n \"ses:ListVerifiedEmailAddresses\",\n \"ses:PutIdentityPolicy\",\n \"ses:ReorderReceiptRuleSet\",\n \"ses:SendBounce\",\n \"ses:SendBulkTemplatedEmail\",\n \"ses:SendRawEmail\",\n \"ses:SendTemplatedEmail\",\n \"ses:SetActiveReceiptRuleSet\",\n \"ses:SetIdentityDkimEnabled\",\n \"ses:SetIdentityFeedbackForwardingEnabled\",\n \"ses:SetIdentityHeadersInNotificationsEnabled\",\n \"ses:SetIdentityMailFromDomain\",\n \"ses:SetIdentityNotificationTopic\",\n \"ses:SetReceiptRulePosition\",\n \"ses:TestRenderTemplate\",\n \"ses:UpdateAccountSendingEnabled\",\n \"ses:UpdateConfigurationSetReputationMetricsEnabled\",\n \"ses:UpdateConfigurationSetSendingEnabled\",\n \"ses:UpdateConfigurationSetTrackingOptions\",\n \"ses:UpdateReceiptRule\",\n \"ses:UpdateTemplate\",\n \"ses:VerifyDomainDkim\",\n \"ses:VerifyDomainIdentity\",\n \"ses:VerifyEmailAddress\",\n \"ses:VerifyEmailIdentity\",\n ]\n for action in actions:\n self.assertTrue(action in results)",
"def test_subscriber_access_for_two_vsg_services(self):",
"def test_fn_saintsxctf_com_api_authorizer_exists(self) -> None:\n api_id = APIGateway.rest_api_exists(self, self.api_name)\n authorizers = self.apigateway.get_authorizers(restApiId=api_id)\n authorizer_list: List[dict] = authorizers.get('items')\n self.assertEqual(1, len(authorizer_list))\n\n authorizer: dict = authorizer_list[0]\n self.assertEqual('saints-xctf-com-fn-auth', authorizer.get('name'))\n self.assertEqual('TOKEN', authorizer.get('type'))\n\n if self.prod_env:\n authorizer_name = 'function:SaintsXCTFAuthorizerPROD/invocations'\n else:\n authorizer_name = 'function:SaintsXCTFAuthorizerDEV/invocations'\n\n self.assertTrue(authorizer_name in authorizer.get('authorizerUri'))",
"def test_create_namespaced_ingress(self):\n pass",
"def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)",
"def test_patch_namespaced_ingress(self):\n pass",
"def test_services_with_multiple_pages_aws_marketplace(self):\n # Overlap: AWS Marketplace, Marketplace Catalog, and AWS Marketplace Entitlement service, AWS Marketplace Image Building Service, AWS Marketplace Metering Service, AWS Marketplace Private Marketplace, and AWS Marketplace Procurement Systems\n # AWS Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplace.html\n self.assertTrue(\"aws-marketplace:AcceptAgreementApprovalRequest\" in self.all_actions)\n # AWS Marketplace Catalog: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacecatalog.html\n self.assertTrue(\"aws-marketplace:CancelChangeSet\" in self.all_actions)\n # AWS Marketplace Entitlement Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceentitlementservice.html\n self.assertTrue(\"aws-marketplace:GetEntitlements\" in self.all_actions)\n # AWS Marketplace Image Building Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceimagebuildingservice.html\n self.assertTrue(\"aws-marketplace:DescribeBuilds\" in self.all_actions)\n # AWS Marketplace Metering Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacemeteringservice.html\n self.assertTrue(\"aws-marketplace:BatchMeterUsage\" in self.all_actions)\n # AWS Marketplace Private Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprivatemarketplace.html\n self.assertTrue(\"aws-marketplace:AssociateProductsWithPrivateMarketplace\" in self.all_actions)\n # AWS Marketplace Procurement Systems: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprocurementsystemsintegration.html\n self.assertTrue(\"aws-marketplace:DescribeProcurementSystemConfiguration\" in self.all_actions)\n\n results = get_actions_for_service(\"aws-marketplace\")\n actions = [\n \"aws-marketplace:AcceptAgreementApprovalRequest\",\n \"aws-marketplace:BatchMeterUsage\",\n \"aws-marketplace:CancelAgreementRequest\",\n \"aws-marketplace:CancelChangeSet\",\n \"aws-marketplace:CompleteTask\",\n \"aws-marketplace:DescribeAgreement\",\n \"aws-marketplace:DescribeBuilds\",\n \"aws-marketplace:DescribeChangeSet\",\n \"aws-marketplace:DescribeEntity\",\n \"aws-marketplace:DescribeProcurementSystemConfiguration\",\n \"aws-marketplace:DescribeTask\",\n \"aws-marketplace:GetAgreementApprovalRequest\",\n \"aws-marketplace:GetAgreementRequest\",\n \"aws-marketplace:GetAgreementTerms\",\n \"aws-marketplace:GetEntitlements\",\n \"aws-marketplace:ListAgreementApprovalRequests\",\n \"aws-marketplace:ListAgreementRequests\",\n \"aws-marketplace:ListBuilds\",\n \"aws-marketplace:ListChangeSets\",\n \"aws-marketplace:ListEntities\",\n \"aws-marketplace:ListTasks\",\n \"aws-marketplace:MeterUsage\",\n \"aws-marketplace:PutProcurementSystemConfiguration\",\n \"aws-marketplace:RegisterUsage\",\n \"aws-marketplace:RejectAgreementApprovalRequest\",\n \"aws-marketplace:ResolveCustomer\",\n \"aws-marketplace:SearchAgreements\",\n \"aws-marketplace:StartBuild\",\n \"aws-marketplace:StartChangeSet\",\n \"aws-marketplace:Subscribe\",\n \"aws-marketplace:Unsubscribe\",\n \"aws-marketplace:UpdateAgreementApprovalRequest\",\n \"aws-marketplace:UpdateTask\",\n \"aws-marketplace:ViewSubscriptions\",\n ]\n for action in actions:\n self.assertTrue(action in results)",
"def test_replace_namespaced_ingress(self):\n pass",
"def test_subscriber_access_if_vsg2_goes_down(self):",
"def test_aws_service_api_validate_subscription_post(self):\n pass",
"def test_delete_namespaced_ingress(self):\n pass",
"def lambda_handler(event, context):\n set_logging(level=logging.DEBUG)\n\n try:\n payload = json.loads(event[\"Records\"][0][\"Sns\"][\"Message\"])\n account_id = payload['account_id']\n account_name = payload['account_name']\n # get the last region from the list to process\n region = payload['regions'].pop()\n # region = payload['region']\n # if request_id is present in payload, it means this lambda was called from the API\n request_id = payload.get('request_id', None)\n except Exception:\n logging.exception(f\"Failed to parse event\\n{event}\")\n return\n\n try:\n config = Config()\n\n main_account = Account(region=config.aws.region)\n ddb_table = main_account.resource(\"dynamodb\").Table(config.sqspolicy.ddb_table_name)\n\n account = Account(id=account_id,\n name=account_name,\n region=region,\n role_name=config.aws.role_name_identification)\n if account.session is None:\n return\n\n logging.debug(f\"Checking for public SQS policies in {account}\")\n\n # existing open issues for account to check if resolved\n open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SQSPolicyIssue)\n # make dictionary for fast search by id\n # and filter by current region\n open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region}\n logging.debug(f\"SQS in DDB:\\n{open_issues.keys()}\")\n\n checker = SQSPolicyChecker(account=account)\n if checker.check():\n for queue in checker.queues:\n logging.debug(f\"Checking {queue.name}\")\n if queue.public:\n issue = SQSPolicyIssue(account_id, queue.url)\n issue.issue_details.tags = queue.tags\n issue.issue_details.name = queue.name\n issue.issue_details.region = queue.account.region\n issue.issue_details.policy = queue.policy\n if config.sqspolicy.in_whitelist(account_id, queue.url):\n issue.status = IssueStatus.Whitelisted\n else:\n issue.status = IssueStatus.Open\n logging.debug(f\"Setting {queue.name} status {issue.status}\")\n IssueOperations.update(ddb_table, issue)\n # remove issue id from issues_list_from_db (if exists)\n # as we already checked it\n open_issues.pop(queue.url, None)\n\n logging.debug(f\"SQS in DDB:\\n{open_issues.keys()}\")\n # all other unresolved issues in DDB are for removed/remediated queues\n for issue in open_issues.values():\n IssueOperations.set_status_resolved(ddb_table, issue)\n if request_id:\n api_table = main_account.resource(\"dynamodb\").Table(config.api.ddb_table_name)\n DDB.track_progress(api_table, request_id)\n except Exception:\n logging.exception(f\"Failed to check SQS policies for '{account_id} ({account_name})'\")\n return\n\n # push SNS messages until the list with regions to check is empty\n if len(payload['regions']) > 0:\n try:\n Sns.publish(payload[\"sns_arn\"], payload)\n except Exception:\n logging.exception(\"Failed to chain insecure services checking\")\n\n logging.debug(f\"Checked SQS policies for '{account_id} ({account_name})'\")",
"def init_amazon_payments(self):\n try:\n self.session = self.request.basket.amazonpaymentssession\n except (AmazonPaymentsSession.DoesNotExist, AttributeError):\n return False\n logger.debug(\"Amazon Billing Agreement ID: %s\" % (\n self.session.billing_agreement_id))\n self.api = AmazonPaymentsAPI(\n settings.AMAZON_PAYMENTS_ACCESS_KEY,\n settings.AMAZON_PAYMENTS_SECRET_KEY,\n settings.AMAZON_PAYMENTS_SELLER_ID,\n settings.AMAZON_PAYMENTS_API_ENDPOINT,\n settings.AMAZON_PAYMENTS_API_VERSION,\n settings.AMAZON_PAYMENTS_IS_LIVE,\n )\n return True",
"def test_fn_saintsxctf_com_api_exists(self) -> None:\n APIGateway.rest_api_exists(self, self.api_name)",
"def validate_auto_deploy(events: dict, context: dict) -> bool:\n if 'HttpWsApiGwId' not in events:\n raise KeyError('Requires HttpWsApiGwId in events')\n\n if 'HttpWsStageName' not in events:\n raise KeyError('Requires HttpWsStageName in events')\n\n gateway_id: str = events['HttpWsApiGwId']\n stage_name: str = events['HttpWsStageName']\n\n response = get_stage(gateway_id, stage_name)\n\n if 'AutoDeploy' in response and response['AutoDeploy']:\n raise ValueError('AutoDeploy must be turned off to update deployment manually')\n return True",
"def is_action_valid(self, method, action):\n if method in self.api_methods:\n if action in self.api_methods[method]:\n if getattr(self, action, None):\n return True\n\n return False",
"def test_connect_post_namespaced_status_webhooks(self):\n pass",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_deployment_template_action_spaces(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that awsmarketplace actions from all the different awsmarketplace SAR pages are present in the IAM definition. | def test_services_with_multiple_pages_aws_marketplace(self):
# Overlap: AWS Marketplace, Marketplace Catalog, and AWS Marketplace Entitlement service, AWS Marketplace Image Building Service, AWS Marketplace Metering Service, AWS Marketplace Private Marketplace, and AWS Marketplace Procurement Systems
# AWS Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplace.html
self.assertTrue("aws-marketplace:AcceptAgreementApprovalRequest" in self.all_actions)
# AWS Marketplace Catalog: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacecatalog.html
self.assertTrue("aws-marketplace:CancelChangeSet" in self.all_actions)
# AWS Marketplace Entitlement Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceentitlementservice.html
self.assertTrue("aws-marketplace:GetEntitlements" in self.all_actions)
# AWS Marketplace Image Building Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceimagebuildingservice.html
self.assertTrue("aws-marketplace:DescribeBuilds" in self.all_actions)
# AWS Marketplace Metering Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacemeteringservice.html
self.assertTrue("aws-marketplace:BatchMeterUsage" in self.all_actions)
# AWS Marketplace Private Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprivatemarketplace.html
self.assertTrue("aws-marketplace:AssociateProductsWithPrivateMarketplace" in self.all_actions)
# AWS Marketplace Procurement Systems: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprocurementsystemsintegration.html
self.assertTrue("aws-marketplace:DescribeProcurementSystemConfiguration" in self.all_actions)
results = get_actions_for_service("aws-marketplace")
actions = [
"aws-marketplace:AcceptAgreementApprovalRequest",
"aws-marketplace:BatchMeterUsage",
"aws-marketplace:CancelAgreementRequest",
"aws-marketplace:CancelChangeSet",
"aws-marketplace:CompleteTask",
"aws-marketplace:DescribeAgreement",
"aws-marketplace:DescribeBuilds",
"aws-marketplace:DescribeChangeSet",
"aws-marketplace:DescribeEntity",
"aws-marketplace:DescribeProcurementSystemConfiguration",
"aws-marketplace:DescribeTask",
"aws-marketplace:GetAgreementApprovalRequest",
"aws-marketplace:GetAgreementRequest",
"aws-marketplace:GetAgreementTerms",
"aws-marketplace:GetEntitlements",
"aws-marketplace:ListAgreementApprovalRequests",
"aws-marketplace:ListAgreementRequests",
"aws-marketplace:ListBuilds",
"aws-marketplace:ListChangeSets",
"aws-marketplace:ListEntities",
"aws-marketplace:ListTasks",
"aws-marketplace:MeterUsage",
"aws-marketplace:PutProcurementSystemConfiguration",
"aws-marketplace:RegisterUsage",
"aws-marketplace:RejectAgreementApprovalRequest",
"aws-marketplace:ResolveCustomer",
"aws-marketplace:SearchAgreements",
"aws-marketplace:StartBuild",
"aws-marketplace:StartChangeSet",
"aws-marketplace:Subscribe",
"aws-marketplace:Unsubscribe",
"aws-marketplace:UpdateAgreementApprovalRequest",
"aws-marketplace:UpdateTask",
"aws-marketplace:ViewSubscriptions",
]
for action in actions:
self.assertTrue(action in results) | [
"def test_get_actions_with_arn_type_and_access_level_case_3(self):\n desired_output = [\n 's3:PutAccountPublicAccessBlock',\n 's3:PutAccessPointPublicAccessBlock'\n ]\n output = get_actions_with_arn_type_and_access_level(\n # \"ram\", \"resource-share\", \"Write\"\n \"s3\", \"*\", \"Permissions management\"\n )\n print(output)\n for item in desired_output:\n self.assertTrue(item in output)\n # self.assertListEqual(desired_output, output)",
"def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)",
"def test_services_with_multiple_pages_apigateway(self):\n # API Gateway Management V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonapigatewaymanagement.html\n self.assertTrue(\"apigateway:AddCertificateToDomain\" in self.all_actions)\n self.assertTrue(\"apigateway:RemoveCertificateFromDomain\" in self.all_actions)\n self.assertTrue(\"apigateway:SetWebACL\" in self.all_actions)\n # API Gateway Management V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonapigatewaymanagement.html\n # API Gateway V2 doesn't have any unique actions in but it does have some unique resource types. Let's make sure those resource types are in the IAM Definition.\n # Resource types unique to API Gateway V2:\n resource_types = get_arn_types_for_service(\"apigateway\")\n resource_types = list(resource_types.keys())\n self.assertTrue(\"AccessLogSettings\" in resource_types)\n # Resource types unique to API Gateway V1:\n self.assertTrue(\"RestApi\" in resource_types)",
"def test_excluded_actions_scan_policy_file(self):\n test_policy = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"iam:CreateAccessKey\"\n ],\n \"Resource\": \"*\"\n },\n ]\n }\n results = scan_policy(test_policy)\n expected_results = {\n \"ServicesAffected\": [\n \"iam\",\n \"s3\"\n ],\n \"PrivilegeEscalation\": {\n \"severity\": \"high\",\n \"description\": \"<p>These policies allow a combination of IAM actions that allow a principal with these permissions to escalate their privileges - for example, by creating an access key for another IAM user, or modifying their own permissions. This research was pioneered by Spencer Gietzen at Rhino Security Labs. Remediation guidance can be found <a href=\\\"https://rhinosecuritylabs.com/aws/aws-privilege-escalation-methods-mitigation/\\\">here</a>.</p>\",\n \"findings\": [\n {\n \"type\": \"CreateAccessKey\",\n \"actions\": [\n \"iam:createaccesskey\"\n ]\n }\n ]},\n \"ResourceExposure\": {\n \"severity\": \"high\",\n \"description\": \"<p>Resource Exposure actions allow modification of Permissions to <a href=\\\"https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_identity-vs-resource.html\\\">resource-based policies</a> or otherwise can expose AWS resources to the public via similar actions that can lead to resource exposure - for example, the ability to modify <a href=\\\"https://docs.aws.amazon.com/ram/latest/userguide/what-is.html\\\">AWS Resource Access Manager</a>.</p>\",\n \"findings\": [\n \"iam:CreateAccessKey\"\n ]},\n \"DataExfiltration\": {\n \"severity\": \"medium\",\n \"description\": \"<div style=\\\"text-align:left\\\"><p>Policies with Data Exfiltration potential allow certain read-only IAM actions without resource constraints, such as <code>s3:GetObject</code>, <code>ssm:GetParameter*</code>, or <code>secretsmanager:GetSecretValue</code>. <br> <ul> <li>Unrestricted <code>s3:GetObject</code> permissions has a long history of customer data leaks.</li> <li><code>ssm:GetParameter*</code> and <code>secretsmanager:GetSecretValue</code> are both used to access secrets.</li> <li><code>rds:CopyDBSnapshot</code> and <code>rds:CreateDBSnapshot</code> can be used to exfiltrate RDS database contents.</li> </ul></p></div>\",\n \"findings\":[\n \"s3:GetObject\"\n ]},\n \"ServiceWildcard\": {\n \"severity\": \"medium\",\n \"description\": \"<p>\\\"Service Wildcard\\\" is the unofficial way of referring to IAM policy statements that grant access to ALL actions under a service - like s3:*. Prioritizing the remediation of policies with this characteristic can help to efficiently reduce the total count of issues in the Cloudsplaining report.</p>\",\n \"findings\": []\n },\n \"CredentialsExposure\": {\n \"severity\": \"high\",\n \"description\": \"<p>Credentials Exposure actions return credentials as part of the API response , such as ecr:GetAuthorizationToken, iam:UpdateAccessKey, and others. The full list is maintained here: https://gist.github.com/kmcquade/33860a617e651104d243c324ddf7992a</p>\",\n \"findings\": [\n \"iam:CreateAccessKey\"\n ]},\n \"InfrastructureModification\": {\n \"severity\": \"low\",\n \"description\": \"\",\n \"findings\":[\n \"iam:CreateAccessKey\",\n \"s3:GetObject\"\n ]}\n }\n # print(json.dumps(results, indent=4))\n self.maxDiff = None\n self.assertDictEqual(results, expected_results)",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_template_action_spaces(self):\n pass",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_deployment_template_action_spaces(self):\n pass",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_delete_action_spaces(self):\n pass",
"def test_get_actions_with_arn_type_and_access_level_case_4(self):\n desired_output = [\n 'secretsmanager:ListSecrets'\n ]\n output = get_actions_with_arn_type_and_access_level(\n \"secretsmanager\", \"*\", \"List\"\n )\n self.assertListEqual(desired_output, output)",
"def test_search_workflow_step_permission(self):\n pass",
"def process_list_of_actions(self, supplied_actions, db_session):\n arns_matching_supplied_actions = []\n # query the database for corresponding ARNs and add them to arns_matching_supplied_actions\n for action in supplied_actions:\n action_name = get_action_name_from_action(action)\n service_name = get_service_from_action(action)\n for row in db_session.query(ActionTable).filter(and_(ActionTable.service.like(service_name),\n ActionTable.name.like(action_name))):\n if row.resource_arn_format not in arns_matching_supplied_actions:\n arns_matching_supplied_actions.append(\n [row.resource_arn_format, row.access_level, str(row.service + ':' + row.name)])\n # Identify the actions that require wildcard ONLY - i.e., they do not permit use of resource ARNs\n # If that's the case, add it to the wildcard namespace. Otherwise, don't add it.\n actions_with_wildcard = []\n for i in range(len(arns_matching_supplied_actions)):\n if '*' not in arns_matching_supplied_actions[i][0]:\n self.add(db_session, [arns_matching_supplied_actions[i][0]],\n arns_matching_supplied_actions[i][1])\n else:\n actions_with_wildcard.append(\n arns_matching_supplied_actions[i][2])\n\n self.update_actions_for_raw_arn_format(db_session)\n # Remove actions from the collection that have the same CRUD level but\n # were not requested by the user\n self.remove_actions_not_matching_list(supplied_actions)\n # If the action exists in the wildcard list,\n # let's remove it from the collection so we don't have actions across\n # both\n actions_with_wildcard_placeholder = []\n for action in range(len(actions_with_wildcard)):\n if self.does_action_exist(actions_with_wildcard[action]):\n pass\n else:\n actions_with_wildcard_placeholder.append(\n actions_with_wildcard[action])\n\n actions_with_wildcard.clear()\n actions_with_wildcard.extend(actions_with_wildcard_placeholder)\n self.combine_policy_elements()\n self.remove_actions_duplicated_in_wildcard_resources()\n\n # If the wildcard list is not empty\n if len(actions_with_wildcard) > 0:\n self.add_complete_entry(\n '*', 'Mult', 'Mult', '*', actions_with_wildcard)\n # NOTE avoid final and other qualifiers IMHO\n arn_dict = self.get_policy_elements(db_session)\n return arn_dict",
"def allow_action(self, action):\n aset = set([k for k in action.attrs.keys() if k.startswith(\"variant.\")])\n\n unknown_variants = aset - self.__keyset\n\n # handle variant.debug\n\n for u in unknown_variants:\n # install only unknown variant.debug\n # actions tagged w/ \"false\"\n if u.startswith(\"variant.debug.\") and \\\n action.attrs[u] != \"false\":\n return False\n # could assert here for other\n # unknown variants... best course TBD\n for a in aset & self.__keyset:\n if self[a] != action.attrs[a]:\n return False\n return True",
"def test_aria_allowed_attr(self):\n assert test_results.get('aria-allowed-attr') is None, test_results['aria-allowed-attr'].help",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_deployment_preview_action_spaces(self):\n pass",
"def valid_actions(self) -> List[str]:\n return list(self.action_map().keys())",
"def test_actions_not_present(self):\n response = self.client.get(self.preview_url)\n\n soup = BeautifulSoup(str(response.content), features=\"lxml\")\n actions = soup.find(class_=\"column-list_actions\")\n\n self.assertEqual(response.status_code, 200)\n self.assertNotContains(response, \"Actions\")\n self.assertIsNone(actions)",
"def test_services_with_multiple_pages_kinesis_analytics(self):\n # Kinesis Analytics V1\n results = get_actions_for_service(\"kinesisanalytics\")\n actions = [\n \"kinesisanalytics:GetApplicationState\", # Only in v1, not v2\n \"kinesisanalytics:ListApplications\", # In both\n ]\n for action in actions:\n self.assertTrue(action in results)",
"def get_actions(self, request):\n return super(OrganizationAdmin, self).get_actions(request)",
"def get_legal_actions(self, block_=None):\n return self._get_move_actions(block_) + self._get_mine_actions() + \\\n self._get_placement_actions(block_)",
"def test_no_amo_installs_allowed_from(self):\n # self.data does not include a marketplace URL by default.\n self.listed = True\n self.analyze()\n self.assert_failed(with_errors=True)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that greengrass v1 and greengrass v2 actions are both present in the greengrass namespace | def test_services_with_multiple_pages_greengrass(self):
# Greengrass V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiotgreengrass.html
self.assertTrue("greengrass:CreateResourceDefinition" in self.all_actions)
# Greengrass V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiotgreengrassv2.html
self.assertTrue("greengrass:CreateComponentVersion" in self.all_actions)
results = get_actions_for_service("greengrass")
actions = [
"greengrass:AssociateRoleToGroup",
"greengrass:CreateConnectorDefinition",
"greengrass:CreateConnectorDefinitionVersion",
"greengrass:CreateCoreDefinition",
"greengrass:CreateCoreDefinitionVersion",
"greengrass:CreateDeviceDefinition",
"greengrass:CreateDeviceDefinitionVersion",
"greengrass:CreateFunctionDefinition",
"greengrass:CreateFunctionDefinitionVersion",
"greengrass:CreateGroup",
"greengrass:CreateGroupCertificateAuthority",
"greengrass:CreateGroupVersion",
"greengrass:CreateLoggerDefinition",
"greengrass:CreateLoggerDefinitionVersion",
"greengrass:CreateResourceDefinition",
"greengrass:CreateResourceDefinitionVersion",
"greengrass:CreateSoftwareUpdateJob",
"greengrass:CreateSubscriptionDefinition",
"greengrass:CreateSubscriptionDefinitionVersion",
"greengrass:DeleteConnectorDefinition",
"greengrass:DeleteCoreDefinition",
"greengrass:DeleteDeviceDefinition",
"greengrass:DeleteFunctionDefinition",
"greengrass:DeleteGroup",
"greengrass:DeleteLoggerDefinition",
"greengrass:DeleteResourceDefinition",
"greengrass:DeleteSubscriptionDefinition",
"greengrass:DisassociateRoleFromGroup",
"greengrass:Discover",
"greengrass:GetAssociatedRole",
"greengrass:GetBulkDeploymentStatus",
"greengrass:GetConnectorDefinition",
"greengrass:GetConnectorDefinitionVersion",
"greengrass:GetCoreDefinition",
"greengrass:GetCoreDefinitionVersion",
"greengrass:GetDeploymentStatus",
"greengrass:GetDeviceDefinition",
"greengrass:GetDeviceDefinitionVersion",
"greengrass:GetFunctionDefinition",
"greengrass:GetFunctionDefinitionVersion",
"greengrass:GetGroup",
"greengrass:GetGroupCertificateAuthority",
"greengrass:GetGroupCertificateConfiguration",
"greengrass:GetGroupVersion",
"greengrass:GetLoggerDefinition",
"greengrass:GetLoggerDefinitionVersion",
"greengrass:GetResourceDefinition",
"greengrass:GetResourceDefinitionVersion",
"greengrass:GetSubscriptionDefinition",
"greengrass:GetSubscriptionDefinitionVersion",
"greengrass:GetThingRuntimeConfiguration",
"greengrass:ListBulkDeploymentDetailedReports",
"greengrass:ListBulkDeployments",
"greengrass:ListConnectorDefinitionVersions",
"greengrass:ListConnectorDefinitions",
"greengrass:ListCoreDefinitionVersions",
"greengrass:ListCoreDefinitions",
"greengrass:ListDeviceDefinitionVersions",
"greengrass:ListDeviceDefinitions",
"greengrass:ListFunctionDefinitionVersions",
"greengrass:ListFunctionDefinitions",
"greengrass:ListGroupCertificateAuthorities",
"greengrass:ListGroupVersions",
"greengrass:ListGroups",
"greengrass:ListLoggerDefinitionVersions",
"greengrass:ListLoggerDefinitions",
"greengrass:ListResourceDefinitionVersions",
"greengrass:ListResourceDefinitions",
"greengrass:ListSubscriptionDefinitionVersions",
"greengrass:ListSubscriptionDefinitions",
"greengrass:ResetDeployments",
"greengrass:StartBulkDeployment",
"greengrass:StopBulkDeployment",
"greengrass:UpdateConnectorDefinition",
"greengrass:UpdateCoreDefinition",
"greengrass:UpdateDeviceDefinition",
"greengrass:UpdateFunctionDefinition",
"greengrass:UpdateGroup",
"greengrass:UpdateGroupCertificateConfiguration",
"greengrass:UpdateLoggerDefinition",
"greengrass:UpdateResourceDefinition",
"greengrass:UpdateSubscriptionDefinition",
"greengrass:UpdateThingRuntimeConfiguration"
]
for action in actions:
self.assertTrue(action in results)
# if action not in results:
# print(action) | [
"def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\n self.assertTrue(lb_v1_only_action in results)\n self.assertTrue(lb_v2_only_action in results)",
"def has_action2(self, feature):\n return feature in self._action2",
"def gold_action(self):\n raise RuntimeError(\"Gold actions not defined for this navigator!\")",
"def check_action_sanity(self):\n for action in crest.get_all_actions(self.model):\n assert action._name is not None, f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is 'None'\"\n assert action._name != \"\", f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(action.transition, crest.Transition), f\"Action {action._name}'s state is not a crest.Transition. It is: {action.transition} ({action.transition.__class__})\"\n assert action.state in crest.get_transitions(action._parent), f\"Action's transition {action.transition._name} ({action.transition}) is not in the transitions of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.target, crest.Port), f\"Action {action._name}'s target is not a crest.Port\"\n assert action.target in api.get_targets(action._parent), f\"Action's target {action.target._name} ({action.target}) is not in the targets of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.function, (crestml.LearnedFunction, types.FunctionType)), f\"Action {action._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'self' in inspect.signature(action.function).parameters, f\"Action {action._name}'s function has no self parameter. entity: {action._parent._name} ({action._parent.__class__.__name__})\"\n assert len(inspect.signature(action.function).parameters) == 1, f\"An action should have only one one argument 'self'\"\n\n for port in SH.get_read_ports_from_update(action.function, action):\n assert port in api.get_sources(action._parent), f\"Action {action._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {action._parent._name} ({action._parent})\"",
"def test_check_push_rules_actions(self) -> None:\n with self.assertRaises(InvalidRuleException):\n self.module_api.check_push_rule_actions([\"foo\"])\n\n with self.assertRaises(InvalidRuleException):\n self.module_api.check_push_rule_actions([{\"foo\": \"bar\"}])\n\n self.module_api.check_push_rule_actions([\"notify\"])\n\n self.module_api.check_push_rule_actions(\n [{\"set_tweak\": \"sound\", \"value\": \"default\"}]\n )",
"def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)",
"def test_10_unsupported_actions(self):\n\n def __count_pulled_packages(pth):\n self.pkgrepo(\"list -F tsv -H -s {0}\".format(pth))\n return len(self.output.splitlines())\n\n def __check_errout(pfmri):\n s1 = \"invalid action in package {0}\".format(pfmri)\n s2 = \"Malformed action in package '{0}'\".format(pfmri)\n self.assert_(s1 in self.errout or s2 in self.errout,\n \"{0} not in error\".format(pfmri))\n\n def __empty_repo(uri, arg_string):\n if uri.startswith(\"http://\"):\n rurl = self.dcs[4].get_repo_url()\n self.pkgrepo(\"remove -s {0} '*'\".format(rurl))\n # Refresh the depot to get it to realize that\n # the catalog has changed.\n self.dcs[4].refresh()\n elif arg_string:\n portable.remove(uri)\n else:\n self.pkgrepo(\"remove -s {0} '*'\".format(uri))\n\n\n def __test_rec(duri, arg_string, pfmris):\n self.debug(\"\\n\\nNow pkgrecv'ing to {0}\".format(duri))\n\n # It's necessary to use the -D option below because\n # otherwise pkgrecv will fail because the manifest\n # doesn't validate.\n\n novalidate = \"-D manifest_validate=Never \"\n # Check that invalid action attributes don't cause\n # tracebacks.\n self.pkgrecv(self.durl1, novalidate +\n \"-d {0} {1} {2}\".format(duri, arg_string,\n \" \".join(pfmris)), exit=pkgdefs.EXIT_OOPS)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri), 0)\n if arg_string:\n portable.remove(duri)\n\n self.pkgrecv(self.rurl1, novalidate +\n \"-d {0} {1} {2}\".format(duri, arg_string,\n \" \".join(pfmris)), exit=pkgdefs.EXIT_OOPS)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri), 0)\n if arg_string:\n portable.remove(duri)\n\n # Check that other packages are retrieved and the exit\n # code reflects partial success.\n self.pkgrecv(self.durl1, novalidate +\n \"-d {0} {1} -m all-timestamps '*'\".format(\n duri, arg_string), exit=pkgdefs.EXIT_PARTIAL)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri),\n len(self.published) - len(pfmris))\n __empty_repo(duri, arg_string)\n\n self.pkgrecv(self.rurl1, novalidate +\n \"-d {0} {1} -m all-timestamps '*'\".format(\n duri, arg_string), exit=pkgdefs.EXIT_PARTIAL)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri),\n len(self.published) - len(pfmris))\n __empty_repo(duri, arg_string)\n\n self.rurl1 = self.dcs[1].get_repo_url()\n repo = self.dcs[1].get_repo()\n rd = repo.get_pub_rstore()\n pfmri = fmri.PkgFmri(self.published[4])\n mp = rd.manifest(pfmri)\n\n with open(mp, \"rb\") as fh:\n original_txt = fh.read()\n txt = original_txt.replace(\"type=require\", \"type=foo\")\n with open(mp, \"wb\") as fh:\n fh.write(txt)\n\n rpth = tempfile.mkdtemp(dir=self.test_root)\n self.pkgrepo(\"create {0}\".format(rpth))\n adir = tempfile.mkdtemp(dir=self.test_root)\n\n # The __empty repo function above assumes that the only http uri\n # used is the one for depot number 4.\n dest_uris = ((rpth, \"\"), (self.durl4, \"\"),\n (os.path.join(adir, \"archive.p5p\"), \"-a\"))\n for duri, arg_string in dest_uris:\n __test_rec(duri, arg_string, [self.published[4]])\n\n # Test that multiple packages failing are handled correctly.\n for i in range(5, 7):\n pfmri = fmri.PkgFmri(self.published[i])\n mp = rd.manifest(pfmri)\n with open(mp, \"rb\") as fh:\n original_txt = fh.read()\n txt = \"foop\\n\" + original_txt\n with open(mp, \"wb\") as fh:\n fh.write(txt)\n\n for duri, arg_string, in dest_uris:\n __test_rec(duri, arg_string, self.published[4:7])",
"def p_governance_events(params, _1, _2, state):\n events: dict = params['governance_events']\n t = state['timestep']\n\n action = {}\n if t in events.keys():\n event = events[t]\n action = decode_event(event)\n else:\n pass\n\n return action",
"def check_type(self):\n if self.action < 0 or self.action >= len(_action_args_dict):\n raise GameActionError('Invalid action type ({0})'.format(self.action))",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_by_project_and_version_responder_spaces(self):\n pass",
"def checkNamespaces(self) :\n nsp = [x[\"namespace\"] for x in self.GO]\n lnsp = [len(x) for x in nsp]\n lnsp = set(lnsp)\n if (not lnsp == set([1])) :\n raise GOparserError(\"GO term namespace error (len!=1)\")\n nsp = [x[0] for x in nsp]\n nsp = set(nsp)\n if (not nsp == set([\"cellular_component\", \"biological_process\",\n \"molecular_function\"])) :\n raise GOparserError((\"Improper GO term namespaces: \" +\n repr(list(nsp))))",
"def validate_virtual_server_group(self, args: dict[str, Any], action: str):\n pass",
"def check(self):\n # get the data from shotgun\n app = self.parent.app\n context = app.context\n # get asset type\n filters = [[\"id\", \"is\", context.entity[\"id\"]]]\n fields = [\"sg_asset_type\"]\n assetType = app.shotgun.find_one(\n \"Asset\", filters=filters, fields=fields)[\"sg_asset_type\"]\n # get step short name\n filters = [[\"id\", \"is\", context.step[\"id\"]]]\n fields = [\"short_name\"]\n stepShortName = app.shotgun.find_one(\n \"Step\", filters=filters, fields=fields)[\"short_name\"]\n\n try:\n assetNode = gNodes.getTopGNode()\n except:\n assetNode = None\n\n if assetNode:\n metadataCode = assetNode.grid_code.get()\n metadataAssetType = assetNode.grid_type.get(asString=True)\n metadataPipeStep = assetNode.grid_pipeStep.get(asString=True)\n if not (assetType == metadataAssetType and\n stepShortName == metadataPipeStep and\n context.entity[\"name\"] == metadataCode):\n self.status = self.errorMode\n self.addError(\"Context and asset node metadata don't match\")\n self.errorMessage = \"Context and asset node metadata don't match\"\n else:\n self.status = \"OK\"\n else:\n self.status = \"OK\"",
"def test_lti20_rest_good_dispatch(self):\r\n for ginput, expected in self.GOOD_DISPATCH_INPUTS:\r\n self.assertEquals(self.xmodule.parse_lti_2_0_handler_suffix(ginput), expected)",
"def validate_action_space(self):\n valid_action_space = True\n if self.action_space_type == constants.ActionSpaceTypes.DISCRETE:\n for action_dict in self.action_space:\n if constants.ModelMetadataKeys.STEERING not in action_dict \\\n or constants.ModelMetadataKeys.SPEED not in action_dict:\n valid_action_space = False\n break\n elif self.action_space_type == constants.ActionSpaceTypes.CONTINUOUS:\n if constants.ModelMetadataKeys.STEERING not in self.action_space \\\n or constants.ModelMetadataKeys.SPEED not in self.action_space:\n valid_action_space = False\n else:\n steering_action_dict = self.action_space[constants.ModelMetadataKeys.STEERING]\n speed_action_dict = self.action_space[constants.ModelMetadataKeys.SPEED]\n if (constants.ModelMetadataKeys.CONTINUOUS_HIGH not in steering_action_dict\n or constants.ModelMetadataKeys.CONTINUOUS_LOW not in steering_action_dict\n or constants.ModelMetadataKeys.CONTINUOUS_HIGH not in speed_action_dict\n or constants.ModelMetadataKeys.CONTINUOUS_LOW not in speed_action_dict\n or steering_action_dict[constants.ModelMetadataKeys.CONTINUOUS_HIGH]\n <= steering_action_dict[constants.ModelMetadataKeys.CONTINUOUS_LOW]\n or speed_action_dict[constants.ModelMetadataKeys.CONTINUOUS_HIGH]\n <= speed_action_dict[constants.ModelMetadataKeys.CONTINUOUS_LOW]):\n valid_action_space = False\n if not valid_action_space:\n raise Exception(f\"Incorrect action space values: {self.action_space}\")",
"def check_if_can_evolve(self):\n # This sounds similar to generate actions\n pass",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_lifecycle_progression_action_spaces(self):\n pass",
"def test_action_already_visited_target(self):\n\n game = GameTestHelper.create_start_ready_game()\n game.start()\n\n game.add_resident(Roles.SEER)\n\n seer = Seer.objects.filter(\n game=game, role__role=Roles.SEER.value\n ).first()\n\n villager = game.residents.filter(\n role__role=Roles.VILLAGER.value\n ).first()\n villager.hut.is_visited = True\n\n with self.assertRaises(APIException) as ex:\n seer.action(player=game.owner, target_hut=villager.hut)\n\n self.assertEquals(\n ex.exception.code, APIExceptionCode.ACTION_INVALID_TARGET\n )",
"def allow_action(self, action):\n aset = set([k for k in action.attrs.keys() if k.startswith(\"variant.\")])\n\n unknown_variants = aset - self.__keyset\n\n # handle variant.debug\n\n for u in unknown_variants:\n # install only unknown variant.debug\n # actions tagged w/ \"false\"\n if u.startswith(\"variant.debug.\") and \\\n action.attrs[u] != \"false\":\n return False\n # could assert here for other\n # unknown variants... best course TBD\n for a in aset & self.__keyset:\n if self[a] != action.attrs[a]:\n return False\n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that elb v1 and elb v2 actions are both present in the elasticloadbalancing namespace | def test_services_with_multiple_pages_elb(self):
results = get_actions_for_service("elasticloadbalancing")
actions = [
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:AttachLoadBalancerToSubnets",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:CreateAppCookieStickinessPolicy",
"elasticloadbalancing:CreateLBCookieStickinessPolicy",
"elasticloadbalancing:CreateLoadBalancerListeners",
"elasticloadbalancing:CreateLoadBalancerPolicy",
"elasticloadbalancing:DeleteLoadBalancerListeners",
"elasticloadbalancing:DeleteLoadBalancerPolicy",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DescribeInstanceHealth",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeLoadBalancerPolicyTypes",
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
"elasticloadbalancing:DisableAvailabilityZonesForLoadBalancer",
"elasticloadbalancing:EnableAvailabilityZonesForLoadBalancer",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:SetLoadBalancerListenerSSLCertificate",
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener",
]
for action in actions:
self.assertTrue(action in results) | [
"def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\n self.assertTrue(lb_v1_only_action in results)\n self.assertTrue(lb_v2_only_action in results)",
"def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)",
"def check_elb_cross_zone(session, elb):\n name = elb['LoadBalancerName']\n enabled = get_elb_cross_zone_attr(session, name)\n status = \"OK\" if enabled else \"NOK\"\n return name, status",
"def test_match_endpoints():\n\n service_names = [\n \"iap-ingress-kfctl-8c9b.endpoints.kubeflow-ci-deployment.cloud.goog\",\n ]\n\n for s in service_names:\n assert cleanup_ci.is_match(s, patterns=cleanup_ci.E2E_PATTERNS)",
"def find_elbv2s(instance_id):\n print(yellow(\"Searching for v2 ELBs associated with instance {}.\".format(\n instance_id)))\n v2 = client('elbv2')\n elb_names = set()\n r1 = v2.describe_load_balancers()\n for lb in r1['LoadBalancers']:\n r2 = v2.describe_target_groups(LoadBalancerArn=lb['LoadBalancerArn'])\n for tg in r2['TargetGroups']:\n r3 = v2.describe_target_health(TargetGroupArn=tg['TargetGroupArn'])\n for thd in r3['TargetHealthDescriptions']:\n if thd['Target']['Id'] == instance_id:\n print(green(\"Found ELB named {}.\".format(\n lb['LoadBalancerName'])))\n elb_names.add(lb['LoadBalancerName'])\n if elb_names:\n return elb_names\n else:\n print(green(\"Could not find any v2 ELBs associated with instance {}.\".\n format(instance_id)))",
"def check_action_sanity(self):\n for action in crest.get_all_actions(self.model):\n assert action._name is not None, f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is 'None'\"\n assert action._name != \"\", f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(action.transition, crest.Transition), f\"Action {action._name}'s state is not a crest.Transition. It is: {action.transition} ({action.transition.__class__})\"\n assert action.state in crest.get_transitions(action._parent), f\"Action's transition {action.transition._name} ({action.transition}) is not in the transitions of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.target, crest.Port), f\"Action {action._name}'s target is not a crest.Port\"\n assert action.target in api.get_targets(action._parent), f\"Action's target {action.target._name} ({action.target}) is not in the targets of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.function, (crestml.LearnedFunction, types.FunctionType)), f\"Action {action._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'self' in inspect.signature(action.function).parameters, f\"Action {action._name}'s function has no self parameter. entity: {action._parent._name} ({action._parent.__class__.__name__})\"\n assert len(inspect.signature(action.function).parameters) == 1, f\"An action should have only one one argument 'self'\"\n\n for port in SH.get_read_ports_from_update(action.function, action):\n assert port in api.get_sources(action._parent), f\"Action {action._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {action._parent._name} ({action._parent})\"",
"def test_box_actions_out_of_bound(env: gym.Env):\n env.reset(seed=42)\n\n oob_env = gym.make(env.spec.id, disable_env_checker=True)\n oob_env.reset(seed=42)\n\n assert isinstance(env.action_space, spaces.Box)\n dtype = env.action_space.dtype\n upper_bounds = env.action_space.high\n lower_bounds = env.action_space.low\n\n for i, (is_upper_bound, is_lower_bound) in enumerate(\n zip(env.action_space.bounded_above, env.action_space.bounded_below)\n ):\n if is_upper_bound:\n obs, _, _, _, _ = env.step(upper_bounds)\n oob_action = upper_bounds.copy()\n oob_action[i] += np.cast[dtype](OOB_VALUE)\n\n assert oob_action[i] > upper_bounds[i]\n oob_obs, _, _, _, _ = oob_env.step(oob_action)\n\n assert np.alltrue(obs == oob_obs)\n\n if is_lower_bound:\n obs, _, _, _, _ = env.step(\n lower_bounds\n ) # `env` is unwrapped, and in new step API\n oob_action = lower_bounds.copy()\n oob_action[i] -= np.cast[dtype](OOB_VALUE)\n\n assert oob_action[i] < lower_bounds[i]\n oob_obs, _, _, _, _ = oob_env.step(oob_action)\n\n assert np.alltrue(obs == oob_obs)\n\n env.close()",
"def check_action_intersect(action_1, action_2):\n if action_1 == action_2:\n return True\n raise NotImplementedError(\"Not yet implemented, replaced pdb.set_trace\")",
"def check_elb_azs(session, elb):\n zones = elb['AvailabilityZones']\n name = elb['LoadBalancerName']\n status = \"OK\" if len(zones) > 1 else \"NOK\"\n return name, status",
"def cloud_control_create_ec2(event, context):\n\n msg = \"\"\n validate_with_context_payload = {\n \"LastInstanceName\": event[\"body\"][\"InstanceName\"],\n \"LastSubnetName\": event[\"body\"][\"SubnetName\"],\n \"LastKeyPairName\": event[\"body\"][\"KeyName\"],\n \"LastSecGroupName\": event[\"body\"][\"SecGroupName\"],\n \"LastInstanceType\": event[\"body\"][\"InstanceType\"]\n }\n response = {}\n response = validate_with_dynamo(validate_with_context_payload)\n payload_response = json.loads(response)\n ValidatedInstanceName = payload_response[\"LastInstanceName\"]\n ValidatedSubnetName = payload_response[\"LastSubnetName\"]\n ValidatedKeyPairName = payload_response[\"LastKeyPairName\"]\n ValidatedSecGroupName = payload_response[\"LastSecGroupName\"]\n ValidatedInstanceType = payload_response[\"LastInstanceType\"]\n # Validate instance name\n ec2_client = boto3.client('ec2')\n response = ec2_client.describe_instances(\n Filters=[\n {\n 'Name': 'tag:Name',\n 'Values': [ValidatedInstanceName]\n }\n ]\n )\n instance_list = []\n for reservation in response['Reservations']:\n for instance in reservation['Instances']:\n instance_list.append(instance['InstanceId'])\n\n if instance_list:\n msg = \"Instance with name {} exists!\".format(ValidatedInstanceName)\n return {\"msg\": msg}\n\n# to refactor\n\n msg = \"Instance {} is created \".format(ValidatedInstanceName)\n #subnet_name = ValidatedSubnetName.lower()\n success_code, msg, subnet_id = ec2_find_subnet(ValidatedSubnetName.lower(), msg)\n if not success_code == 0:\n return {\"msg\": msg}\n\n success_code, msg, sg_id = ec2_find_sg(ValidatedSecGroupName, msg)\n if not success_code == 0:\n return {\"msg\": msg}\n\n success_code, msg, key_name = ec2_find_key(ValidatedKeyPairName, msg)\n if not success_code == 0:\n return {\"msg\": msg}\n\n # Prepare data\n # This should be improved.\n # It looks bad, but I do not have idea now, how to write it better.\n if not key_name == \"none\":\n response = ec2_client.run_instances(\n BlockDeviceMappings=[\n {\n 'DeviceName': '/dev/xvda',\n 'Ebs': {\n\n 'DeleteOnTermination': True,\n 'VolumeSize': 8,\n 'VolumeType': 'gp2'\n },\n },\n ],\n ImageId='ami-030dbca661d402413',\n InstanceType=ValidatedInstanceType,\n KeyName=key_name,\n MaxCount=1,\n MinCount=1,\n Monitoring={\n 'Enabled': False\n },\n SecurityGroupIds=[\n sg_id,\n ],\n SubnetId=subnet_id,\n TagSpecifications=[\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': ValidatedInstanceName\n },\n ]\n },\n ]\n )\n else:\n response = ec2_client.run_instances(\n BlockDeviceMappings=[\n {\n 'DeviceName': '/dev/xvda',\n 'Ebs': {\n\n 'DeleteOnTermination': True,\n 'VolumeSize': 8,\n 'VolumeType': 'gp2'\n },\n },\n ],\n ImageId='ami-030dbca661d402413',\n InstanceType=ValidatedInstanceType,\n MaxCount=1,\n MinCount=1,\n Monitoring={\n 'Enabled': False\n },\n SecurityGroupIds=[\n sg_id,\n ],\n SubnetId=subnet_id,\n TagSpecifications=[\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': ValidatedInstanceName\n },\n ]\n },\n ]\n )\n write_to_table_payload = {\n \"LastInstanceName\": ValidatedInstanceName,\n \"LastSubnetName\": ValidatedSubnetName,\n \"LastKeyPairName\": ValidatedKeyPairName,\n \"LastSecGroupName\": ValidatedSecGroupName,\n \"LastInstanceType\": ValidatedInstanceType\n }\n write_to_dynamo(write_to_table_payload)\n return {\"msg\": msg}",
"def test_mujoco_incompatible_v3_to_v2(env_name: str):\n with pytest.raises(KeyError):\n verify_environments_match(f\"{env_name}-v3\", f\"{env_name}-v2\")",
"def checkNamespaces(self) :\n nsp = [x[\"namespace\"] for x in self.GO]\n lnsp = [len(x) for x in nsp]\n lnsp = set(lnsp)\n if (not lnsp == set([1])) :\n raise GOparserError(\"GO term namespace error (len!=1)\")\n nsp = [x[0] for x in nsp]\n nsp = set(nsp)\n if (not nsp == set([\"cellular_component\", \"biological_process\",\n \"molecular_function\"])) :\n raise GOparserError((\"Improper GO term namespaces: \" +\n repr(list(nsp))))",
"def validate_op(self):\n self._validate_ad_state()\n self._validate_ad_compliance()\n self._validate_time_ranges()",
"def check_deployment(version_stack_name, app_name):\n\n print(\"Polling Target Group ({}) until a successful state is reached...\".format(version_stack_name))\n elbv2 = boto3.client('elbv2')\n waiter = elbv2.get_waiter('target_in_service')\n cloudformation = boto3.client('cloudformation')\n response = cloudformation.describe_stack_resources(\n StackName=version_stack_name,\n LogicalResourceId='ALBTargetGroup'\n )\n target_group = response['StackResources'][0]['PhysicalResourceId']\n start_time = datetime.datetime.now()\n try:\n waiter.wait(TargetGroupArn=target_group)\n except botocore.exceptions.WaiterError:\n print('Health check did not pass!')\n response = cloudformation.describe_stack_resources(\n StackName=version_stack_name,\n LogicalResourceId='ECSService'\n )\n service = response['StackResources'][0]['PhysicalResourceId']\n print('Outputting events for service {}:'.format(service))\n response = cloudformation.describe_stack_resources(\n StackName=\"ECS-{}\".format(app_name),\n LogicalResourceId='ECSCluster'\n )\n cluster = response['StackResources'][0]['PhysicalResourceId']\n ecs = boto3.client('ecs')\n response = ecs.describe_services(\n cluster=cluster,\n services=[service]\n )\n for event in [x['message'] for x in response['services'][0]['events']]:\n print(event)\n# print('Deleting CloudFormation stack...')\n# response = cloudformation.delete_stack(\n# StackName=\"MV-{realm}-{app_name}-{version}-{env}\".format(env=os.environ['ENV'], app_name=os.environ['ECS_APP_NAME'], version=os.environ['BUILD_VERSION'], realm=os.environ['REALM'])\n# )\n# waiter = cf.get_waiter('stack_delete_complete')\n# waiter.wait(\n# StackName=\"MV-{realm}-{app_name}-{version}-{env}\".format(env=os.environ['ENV'], app_name=os.environ['ECS_APP_NAME'], version=os.environ['BUILD_VERSION'], realm=os.environ['REALM'])\n# )\n# print('CloudFormation stack deleted.')\n elapsed_time = datetime.datetime.now() - start_time\n print('Health check passed in {}'.format(elapsed_time))\n print(\"Done.\")",
"def validate_virtual_server_group(self, args: dict[str, Any], action: str):\n pass",
"def test_redeploy_edges(self):\n pass",
"def validate_route_states(duthost, vrf=DEFAULT, check_point=QUEUED, action=ACTION_IN):\n for route in IP_ROUTE_LIST:\n check_route_install_status(duthost, route, vrf, IP_VER, check_point, action)\n for route in IPV6_ROUTE_LIST:\n check_route_install_status(duthost, route, vrf, IPV6_VER, check_point, action)",
"def test_01_internallb_rules(self):\n\n # 1. Create an Internal LB Rule with source IP Address specified, check if the Internal LB Rule is successfully\n # created.\n # 2. Create an Internal LB Rule without source IP Address specified, check if the Internal LB Rule is\n # successfully created.\n # 3. Create an Internal LB Rule when the specified source IP Address is outside the VPC network (tier) CIDR\n # range, check if the Internal LB Rule creation failed as the requested source IP is not in the network's\n # CIDR subnet.\n # 4. Create an Internal LB Rule when the specified source IP Address is outside the VPC super CIDR range,\n # check if the Internal LB Rule creation failed as the requested source IP is not in the network's CIDR\n # subnet.\n # 5. Create an Internal LB Rule in the tier with LB service provider as VpcInlineLbVm, check if the Internal LB\n # Rule creation failed as Scheme Internal is not supported by this network offering.\n # 6. Create multiple Internal LB Rules using different Load Balancing source IP Addresses, check if the Internal\n # LB Rules are successfully created.\n # 7. Create multiple Internal LB Rules with different ports but using the same Load Balancing source IP Address,\n # check if the Internal LB Rules are successfully created.\n # 8. Create multiple Internal LB Rules with same ports and using the same Load Balancing source IP Address,\n # check if the second Internal LB Rule creation failed as it conflicts with the first Internal LB rule.\n # 9. Attach a VM to the above created Internal LB Rules, check if the VM is successfully attached to the\n # Internal LB Rules.\n # 10. Verify the InternalLbVm deployment after successfully creating the first Internal LB Rule and attaching a\n # VM to it.\n # 11. Verify the failure of attaching a VM from a different tier to an Internal LB Rule created on a tier.\n # 12. Delete the above created Internal LB Rules, check if the Internal LB Rules are successfully deleted.\n\n # Creating VPC networks in the VPC, and deploying VMs\n self.debug(\"Creating a VPC network with Internal LB service...\")\n internal_tier = self.create_Network(self.net_off_1, gateway='10.1.1.1')\n\n self.debug(\"Deploying a VM in the created VPC network...\")\n internal_vm = self.create_VM(internal_tier)\n\n self.debug(\"Creating a VPC network without Internal LB service...\")\n public_tier = self.create_Network(self.net_off_2, gateway='10.1.2.1')\n\n self.debug(\"Deploying a VM in the created VPC network...\")\n public_vm = self.create_VM(public_tier)\n\n # Creating Internal LB Rules\n self.debug(\"Creating an Internal LB Rule without source IP Address specified...\")\n int_lb_rule = self.create_Internal_LB_Rule(internal_tier)\n self.validate_Internal_LB_Rule(int_lb_rule, state=\"Add\")\n\n # Validating InternalLbVm deployment\n with self.assertRaises(Exception):\n self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress)\n self.debug(\"InternalLbVm is not deployed in the network as there are no VMs assigned to this Internal LB Rule\")\n\n self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)\n int_lb_rule.delete(self.api_client)\n with self.assertRaises(Exception):\n self.validate_Internal_LB_Rule(int_lb_rule)\n self.debug(\"Internal LB Rule successfully deleted in CloudStack\")\n\n free_source_ip = int_lb_rule.sourceipaddress\n\n self.debug(\"Creating an Internal LB Rule with source IP Address specified...\")\n int_lb_rule = self.create_Internal_LB_Rule(internal_tier, source_ip=free_source_ip)\n self.validate_Internal_LB_Rule(int_lb_rule, state=\"Add\")\n\n # Validating InternalLbVm deployment\n with self.assertRaises(Exception):\n self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress)\n self.debug(\"InternalLbVm is not deployed in the network as there are no VMs assigned to this Internal LB Rule\")\n\n self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)\n int_lb_rule.delete(self.api_client)\n with self.assertRaises(Exception):\n self.validate_Internal_LB_Rule(int_lb_rule)\n self.debug(\"Internal LB Rule successfully deleted in CloudStack\")\n\n self.debug(\"Creating an Internal LB Rule when the specified source IP Address is outside the VPC network CIDR \"\n \"range...\")\n with self.assertRaises(Exception):\n self.create_Internal_LB_Rule(internal_tier, source_ip=\"10.1.1.256\")\n self.debug(\"Internal LB Rule creation failed as the requested IP is not in the network's CIDR subnet\")\n\n self.debug(\"Creating an Internal LB Rule when the specified source IP Address is outside the VPC super CIDR \"\n \"range...\")\n with self.assertRaises(Exception):\n self.create_Internal_LB_Rule(internal_tier, source_ip=\"10.2.1.256\")\n self.debug(\"Internal LB Rule creation failed as the requested IP is not in the network's CIDR subnet\")\n\n self.debug(\"Creating an Internal LB Rule in a VPC network without Internal Lb service...\")\n with self.assertRaises(Exception):\n self.create_Internal_LB_Rule(public_tier)\n self.debug(\"Internal LB Rule creation failed as Scheme Internal is not supported by this network offering\")\n\n self.debug(\"Creating multiple Internal LB Rules using different Load Balancing source IP Addresses...\")\n int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])\n self.validate_Internal_LB_Rule(int_lb_rule_1, state=\"Active\", vm_array=[internal_vm])\n int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])\n self.validate_Internal_LB_Rule(int_lb_rule_2, state=\"Active\", vm_array=[internal_vm])\n\n # Validating InternalLbVms deployment and state\n self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state=\"Running\")\n self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress, state=\"Running\")\n\n self.debug('Removing VMs from the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))\n int_lb_rule_1.remove(self.api_client, vms=[internal_vm])\n with self.assertRaises(Exception):\n self.validate_Internal_LB_Rule(int_lb_rule_1, vm_array=[internal_vm])\n self.debug(\"VMs successfully removed from the Internal LB Rule in CloudStack\")\n int_lb_rule_2.remove(self.api_client, vms=[internal_vm])\n with self.assertRaises(Exception):\n self.validate_Internal_LB_Rule(int_lb_rule_2, vm_array=[internal_vm])\n self.debug(\"VMs successfully removed from the Internal LB Rule in CloudStack\")\n\n # Validating InternalLbVms state\n self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state=\"Running\")\n self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress, state=\"Running\")\n\n self.debug('Deleting the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))\n int_lb_rule_1.delete(self.api_client)\n with self.assertRaises(Exception):\n self.validate_Internal_LB_Rule(int_lb_rule_1)\n self.debug(\"Internal LB Rule successfully deleted in CloudStack\")\n int_lb_rule_2.delete(self.api_client)\n with self.assertRaises(Exception):\n self.validate_Internal_LB_Rule(int_lb_rule_2)\n self.debug(\"Internal LB Rule successfully deleted in CloudStack\")\n\n # Validating InternalLbVms un-deployment\n with self.assertRaises(Exception):\n self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress)\n self.debug(\"InternalLbVm successfully destroyed in CloudStack\")\n with self.assertRaises(Exception):\n self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress)\n self.debug(\"InternalLbVm successfully destroyed in CloudStack\")\n\n self.debug(\"Creating multiple Internal LB Rules with different ports but using the same Load Balancing source \"\n \"IP Address...\")\n int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])\n self.validate_Internal_LB_Rule(int_lb_rule_1, state=\"Active\", vm_array=[internal_vm])\n int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier,\n vm_array=[internal_vm],\n services=self.test_data[\"internal_lbrule_http\"],\n source_ip=int_lb_rule_1.sourceipaddress\n )\n self.validate_Internal_LB_Rule(int_lb_rule_2, state=\"Active\", vm_array=[internal_vm])\n\n # Validating InternalLbVm deployment and state\n self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state=\"Running\")\n\n self.debug('Removing VMs from the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))\n int_lb_rule_1.remove(self.api_client, vms=[internal_vm])\n with self.assertRaises(Exception):\n self.validate_Internal_LB_Rule(int_lb_rule_1, vm_array=[internal_vm])\n self.debug(\"VMs successfully removed from the Internal LB Rule in CloudStack\")\n int_lb_rule_2.remove(self.api_client, vms=[internal_vm])\n with self.assertRaises(Exception):\n self.validate_Internal_LB_Rule(int_lb_rule_2, vm_array=[internal_vm])\n self.debug(\"VMs successfully removed from the Internal LB Rule in CloudStack\")\n\n # Validating InternalLbVm state\n self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state=\"Running\")\n\n self.debug('Deleting the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))\n int_lb_rule_1.delete(self.api_client)\n with self.assertRaises(Exception):\n self.validate_Internal_LB_Rule(int_lb_rule_1)\n self.debug(\"Internal LB Rule successfully deleted in CloudStack\")\n int_lb_rule_2.delete(self.api_client)\n with self.assertRaises(Exception):\n self.validate_Internal_LB_Rule(int_lb_rule_2)\n self.debug(\"Internal LB Rule successfully deleted in CloudStack\")\n\n # Validating InternalLbVm un-deployment\n with self.assertRaises(Exception):\n self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress)\n self.debug(\"InternalLbVm successfully destroyed in CloudStack\")\n\n self.debug(\"Creating multiple Internal LB Rules with same ports and using the same Load Balancing source IP \"\n \"Address...\")\n int_lb_rule = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])\n self.validate_Internal_LB_Rule(int_lb_rule, state=\"Active\", vm_array=[internal_vm])\n with self.assertRaises(Exception):\n self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm], source_ip=int_lb_rule.sourceipaddress)\n self.debug(\"Internal LB Rule creation failed as it conflicts with the existing rule\")\n\n # Validating InternalLbVm deployment and state\n self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress, state=\"Running\")\n\n self.debug('Removing VMs from the Internal LB Rule - %s' % int_lb_rule.name)\n int_lb_rule.remove(self.api_client, vms=[internal_vm])\n with self.assertRaises(Exception):\n self.validate_Internal_LB_Rule(int_lb_rule, vm_array=[internal_vm])\n self.debug(\"VMs successfully removed from the Internal LB Rule in CloudStack\")\n\n # Validating InternalLbVm state\n self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress, state=\"Running\")\n\n self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)\n int_lb_rule.delete(self.api_client)\n with self.assertRaises(Exception):\n self.validate_Internal_LB_Rule(int_lb_rule)\n self.debug(\"Internal LB Rule successfully deleted in CloudStack\")\n\n # Validating InternalLbVm un-deployment\n with self.assertRaises(Exception):\n self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress)\n self.debug(\"InternalLbVm successfully destroyed in CloudStack\")\n\n self.debug(\"Attaching a VM from a different tier to an Internal LB Rule created on a tier...\")\n with self.assertRaises(Exception):\n self.create_Internal_LB_Rule(internal_tier, vm_array=[public_vm])\n self.debug(\"Internal LB Rule creation failed as the VM belongs to a different network\")",
"def validate_availability_zones(self, context, resource_type,\n availability_zones):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that lex v1 and lex v2 actions are both present in the lex namespace | def test_services_with_multiple_pages_lex(self):
# Lex V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonlex.html
self.assertTrue("lex:DeleteUtterances" in self.all_actions)
# Lex V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonlexv2.html
self.assertTrue("lex:ListBotLocales" in self.all_actions)
results = get_actions_for_service("lex")
actions = [
"lex:CreateIntentVersion",
"lex:CreateSlotTypeVersion",
"lex:DeleteBotChannelAssociation",
"lex:DeleteIntentVersion",
"lex:DeleteSlotTypeVersion",
"lex:GetBot",
"lex:GetBotAlias",
"lex:GetBotAliases",
"lex:GetBotChannelAssociation",
"lex:GetBotChannelAssociations",
"lex:GetBotVersions",
"lex:GetBots",
"lex:GetBuiltinIntent",
"lex:GetBuiltinIntents",
"lex:GetBuiltinSlotTypes",
"lex:GetExport",
"lex:GetImport",
"lex:GetIntent",
"lex:GetIntentVersions",
"lex:GetIntents",
"lex:GetMigration",
"lex:GetMigrations",
"lex:GetSlotType",
"lex:GetSlotTypeVersions",
"lex:GetSlotTypes",
"lex:GetUtterancesView",
"lex:PostContent",
"lex:PostText",
"lex:PutBot",
"lex:PutBotAlias",
"lex:PutIntent",
"lex:PutSlotType",
"lex:StartMigration",
]
for action in actions:
self.assertTrue(action in results) | [
"def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)",
"def checkNamespaces(self) :\n nsp = [x[\"namespace\"] for x in self.GO]\n lnsp = [len(x) for x in nsp]\n lnsp = set(lnsp)\n if (not lnsp == set([1])) :\n raise GOparserError(\"GO term namespace error (len!=1)\")\n nsp = [x[0] for x in nsp]\n nsp = set(nsp)\n if (not nsp == set([\"cellular_component\", \"biological_process\",\n \"molecular_function\"])) :\n raise GOparserError((\"Improper GO term namespaces: \" +\n repr(list(nsp))))",
"def InitActionCheck(initActionList, init):\n for actions in initActionList:\n action_class = getNameFromIRI(actions.is_a[0].iri)\n # if the action is a SpeedAction class\n if action_class == \"SpeedAction\":\n action_entity_ref = getNameFromIRI(actions.has_entity_ref[0].iri)\n target_speed = actions.has_target_speed[0]\n ontology_transition_dynamics = actions.has_transition_dynamics[0]\n xosc_transition_dynamics = checkTransitionDynamics(ontology_transition_dynamics)\n init.add_init_action(action_entity_ref, xosc.AbsoluteSpeedAction(target_speed, xosc_transition_dynamics))\n continue\n #if the action is TeleportAction\n if action_class == \"TeleportAction\":\n action_entity_ref = getNameFromIRI(actions.has_entity_ref[0].iri)\n # if the action has position as parameter set\n s: int = 0\n offset = 0\n lane_id = 0\n road_id = 0\n if len(actions.has_position) != 0:\n position = actions.has_position[0]\n if len(position.has_s) != 0:\n s = position.has_s[0]\n\n if len(position.has_offset) != 0:\n offset = position.has_offset[0]\n\n if len(position.has_lane_id) != 0:\n lane_id = position.has_lane_id[0]\n\n if len(position.has_road_id) != 0:\n road_id = position.has_road_id[0]\n\n init.add_init_action(action_entity_ref, xosc.TeleportAction(xosc.LanePosition(s, offset, lane_id, road_id)))\n continue\n if action_class == \"EnvironmentAction\": # if the action is an EnvironmentAction\n xosc_environment_action = checkEnvironmentAction(actions)\n init.add_global_action(xosc_environment_action)\n return init",
"def test_lexmatch(self):\n outfile = f\"{OUTPUT_DIR}/matcher-test-cli.sssom.tsv\"\n nucleus_match = \"XX:1\"\n intracellular_match = \"XX:2\"\n OTHER_ONTOLOGY = f\"{INPUT_DIR}/alignment-test.obo\"\n for input_arg in [TEST_SIMPLE_OBO, TEST_OBOJSON, TEST_OWL_RDF, TEST_ONT, TEST_DB]:\n for reversed in [False, True]:\n if reversed:\n args = [\n \"-a\",\n input_arg,\n \"-i\",\n OTHER_ONTOLOGY,\n ]\n else:\n args = [\n \"-i\",\n input_arg,\n \"-a\",\n OTHER_ONTOLOGY,\n ]\n result = self.runner.invoke(\n main,\n args\n + [\"lexmatch\", \"-R\", RULES_FILE, \"-o\", outfile, \"--no-ensure-strict-prefixes\"],\n )\n err = result.stderr\n self.assertEqual(0, result.exit_code)\n with open(outfile) as stream:\n contents = \"\\n\".join(stream.readlines())\n self.assertIn(\"skos:closeMatch\", contents)\n self.assertIn(\"skos:exactMatch\", contents)\n self.assertIn(nucleus_match, contents)\n self.assertIn(intracellular_match, contents)\n msdf = parse_sssom_table(outfile)\n msd = to_mapping_set_document(msdf)\n self.assertEqual(\"http://purl.obolibrary.org/obo/XX_\", msd.prefix_map[\"XX\"])\n cases = [\n (nucleus_match, NUCLEUS, SKOS_EXACT_MATCH),\n (intracellular_match, INTRACELLULAR, SKOS_CLOSE_MATCH),\n (\"BFO:0000023\", \"CHEBI:50906\", SKOS_EXACT_MATCH),\n ]\n for mapping in msd.mapping_set.mappings:\n tpl = (mapping.subject_id, mapping.object_id, mapping.predicate_id)\n tpl2 = (mapping.object_id, mapping.subject_id, mapping.predicate_id)\n if tpl in cases:\n cases.remove(tpl)\n elif tpl2 in cases:\n cases.remove(tpl2)\n self.assertEqual(0, len(cases), f\"Cases not found: {cases} for {input_arg}\")\n self.assertEqual(\"\", err)",
"def check(self):\n illegalNamespaces = list()\n\n progStandard = re.compile(\"^[A-Z]{4}[0-9]{2}_[0-9]{3}$\")\n progShot = re.compile(\"^SH[0-9]{4}_[0-9]{3}$\")\n\n for namespaces in pm.namespaceInfo(listOnlyNamespaces=True, internal=False, recurse=True):\n for namespace in namespaces.split(\":\"):\n if not progStandard.match(namespace) and not progShot.match(namespace) not in [\"UI\", \"shared\"]:\n illegalNamespaces.append(namespace)\n\n if not illegalNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = illegalNamespaces\n for illegalNamespace in illegalNamespaces:\n self.addError(\"%s is a illegal namespace\" % illegalNamespace)\n self.errorMessage = \"%s illegal namespace\" % (\n len(illegalNamespaces))",
"def test_verbs():\n lexicon = scanner.Scanner()\n\n assert_equal(lexicon.scan(\"go\"), [('verb', 'go')])\n result = lexicon.scan(\"go kill eat\")",
"def __initSpellingActions(self):\n self.spellingActGrp = createActionGroup(self)\n \n self.spellCheckAct = E5Action(\n QCoreApplication.translate('ViewManager', 'Check spelling'),\n UI.PixmapCache.getIcon(\"spellchecking.png\"),\n QCoreApplication.translate(\n 'ViewManager', 'Check &spelling...'),\n QKeySequence(QCoreApplication.translate(\n 'ViewManager', \"Shift+F7\", \"Spelling|Spell Check\")),\n 0,\n self.spellingActGrp, 'vm_spelling_spellcheck')\n self.spellCheckAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', 'Perform spell check of current editor'))\n self.spellCheckAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Check spelling</b>\"\"\"\n \"\"\"<p>Perform a spell check of the current editor.</p>\"\"\"\n ))\n self.spellCheckAct.triggered.connect(self.__spellCheck)\n self.spellingActions.append(self.spellCheckAct)\n \n self.autoSpellCheckAct = E5Action(\n QCoreApplication.translate(\n 'ViewManager', 'Automatic spell checking'),\n UI.PixmapCache.getIcon(\"autospellchecking.png\"),\n QCoreApplication.translate(\n 'ViewManager', '&Automatic spell checking'),\n 0, 0,\n self.spellingActGrp, 'vm_spelling_autospellcheck', True)\n self.autoSpellCheckAct.setStatusTip(QCoreApplication.translate(\n 'ViewManager', '(De-)Activate automatic spell checking'))\n self.autoSpellCheckAct.setWhatsThis(QCoreApplication.translate(\n 'ViewManager',\n \"\"\"<b>Automatic spell checking</b>\"\"\"\n \"\"\"<p>Activate or deactivate the automatic spell checking\"\"\"\n \"\"\" function of all editors.</p>\"\"\"\n ))\n self.autoSpellCheckAct.setChecked(\n Preferences.getEditor(\"AutoSpellCheckingEnabled\"))\n self.autoSpellCheckAct.triggered.connect(\n self.__setAutoSpellChecking)\n self.spellingActions.append(self.autoSpellCheckAct)\n \n self.__enableSpellingActions()",
"def test_shell_add_verbs_already_exists(self):\n with pytest.raises(AlreadyExists):\n self.shell.add_verbs(self.ATestCommand, \"nope\", \"test\")\n # All of the verbs should have been validated first, so \"nope\"\n # shouldn't have been added either.\n assert \"nope\" not in self.shell._verbs",
"def validateActions(name: str, val: dict, propTypeObj: rst.rfSchema.PropType, payloadType: str):\n actionMessages, actionCounts = OrderedDict(), Counter()\n\n parentTypeObj = rst.rfSchema.PropType(payloadType, propTypeObj.schemaObj)\n actionsDict = {act.name: (val.get(act.name, 'n/a'), act.actTag) for act in parentTypeObj.getActions()}\n\n if 'Oem' in val:\n if rst.currentService.config.get('oemcheck'):\n for newAct in val['Oem']:\n actionsDict['Oem.' + newAct] = (val['Oem'][newAct], None)\n else:\n actionCounts['oemActionSkip'] += 1\n\n # For each action found, check action dictionary for existence and conformance\n # No action is required unless specified, target is not required unless specified\n # (should check for viable parameters)\n for k in actionsDict:\n actionDecoded, actDict = actionsDict[k]\n actPass = True\n actOptional = False\n if actionDecoded != 'n/a':\n # validate target\n target = actionDecoded.get('target')\n if target is None:\n actPass = False\n rsvLogger.error('{}: target for action is missing'.format(name + '.' + k))\n elif not isinstance(target, str):\n actPass = False\n rsvLogger.error('{}: target for action is malformed; expected string, got {}'\n .format(name + '.' + k, str(type(target)).strip('<>')))\n # check for unexpected properties\n for prop in actionDecoded:\n if prop not in ['target', 'title', '@Redfish.ActionInfo',\n '@Redfish.OperationApplyTimeSupport'] and '@Redfish.AllowableValues' not in prop:\n actPass = False\n rsvLogger.error('{}: Property \"{}\" is not allowed in actions property. Allowed properties are \"{}\", \"{}\", \"{}\", \"{}\" and \"{}\"'\n .format(name + '.' + k, prop, 'target', 'title', '@Redfish.ActionInfo', '@Redfish.OperationApplyTimeSupport', '*@Redfish.AllowableValues'))\n else:\n # <Annotation Term=\"Redfish.Required\"/>\n if actDict is not None and actDict.find('annotation', {'term': 'Redfish.Required'}):\n actPass = False\n rsvLogger.error('{}: action not found, is mandatory'.format(name + '.' + k))\n else:\n actOptional = True\n rsvLogger.debug('{}: action not found, is not mandatory'.format(name + '.' + k))\n actionMessages[name + '.' + k] = (\n 'Action', '-',\n 'Yes' if actionDecoded != 'n/a' else 'No',\n 'Optional' if actOptional else 'PASS' if actPass else 'FAIL')\n if actOptional:\n actionCounts['optionalAction'] += 1\n elif actPass:\n actionCounts['passAction'] += 1\n else:\n actionCounts['failAction'] += 1\n return actionMessages, actionCounts",
"def test_cli_actions_exist(self):\n actions = [\"create\", \"update\"]\n\n for action in actions:\n result = self._runner.invoke(cli, [action, \"--test\"])\n\n self.assertEqual(result.exit_code, 0)\n self.assertTrue(action in result.output)",
"def check_action_intersect(action_1, action_2):\n if action_1 == action_2:\n return True\n raise NotImplementedError(\"Not yet implemented, replaced pdb.set_trace\")",
"def test_check_push_rules_actions(self) -> None:\n with self.assertRaises(InvalidRuleException):\n self.module_api.check_push_rule_actions([\"foo\"])\n\n with self.assertRaises(InvalidRuleException):\n self.module_api.check_push_rule_actions([{\"foo\": \"bar\"}])\n\n self.module_api.check_push_rule_actions([\"notify\"])\n\n self.module_api.check_push_rule_actions(\n [{\"set_tweak\": \"sound\", \"value\": \"default\"}]\n )",
"def test_lti20_rest_good_dispatch(self):\r\n for ginput, expected in self.GOOD_DISPATCH_INPUTS:\r\n self.assertEquals(self.xmodule.parse_lti_2_0_handler_suffix(ginput), expected)",
"def test_parses_ambiguous_grammars(self):\n lexed_positive = [\n Token(\n value=\"Hegh\",\n token_type=AKT.VERB,\n line_number=0,\n ),\n Token(\n value=\"be'\",\n token_type=AKT.BE,\n line_number=0,\n ),\n ]\n self.assertTrue(parse(AmbiguousKlingonGrammar, lexed_positive))\n\n lexed_negative = [\n Token(\n value=\"Hegh\",\n token_type=AKT.VERB,\n line_number=0,\n ),\n Token(\n value=\"be'\",\n token_type=AKT.BE,\n line_number=0,\n ),\n Token(\n value=\"be'\",\n token_type=AKT.BE,\n line_number=0,\n ),\n ]\n self.assertTrue(parse(AmbiguousKlingonGrammar, lexed_negative))",
"def test_introduce_actions(self):\n pass",
"def check_action_sanity(self):\n for action in crest.get_all_actions(self.model):\n assert action._name is not None, f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is 'None'\"\n assert action._name != \"\", f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(action.transition, crest.Transition), f\"Action {action._name}'s state is not a crest.Transition. It is: {action.transition} ({action.transition.__class__})\"\n assert action.state in crest.get_transitions(action._parent), f\"Action's transition {action.transition._name} ({action.transition}) is not in the transitions of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.target, crest.Port), f\"Action {action._name}'s target is not a crest.Port\"\n assert action.target in api.get_targets(action._parent), f\"Action's target {action.target._name} ({action.target}) is not in the targets of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.function, (crestml.LearnedFunction, types.FunctionType)), f\"Action {action._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'self' in inspect.signature(action.function).parameters, f\"Action {action._name}'s function has no self parameter. entity: {action._parent._name} ({action._parent.__class__.__name__})\"\n assert len(inspect.signature(action.function).parameters) == 1, f\"An action should have only one one argument 'self'\"\n\n for port in SH.get_read_ports_from_update(action.function, action):\n assert port in api.get_sources(action._parent), f\"Action {action._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {action._parent._name} ({action._parent})\"",
"def action_recognized(action, response):\n unrecognized_word = get_unrecognized(action, response)\n if unrecognized_word:\n if unrecognized_word not in gv.kg._unrecognized_words:\n gv.dbg(\"[UTIL] Added unrecognized word \\\"{}\\\"\".format(unrecognized_word))\n gv.kg._unrecognized_words.append(unrecognized_word)\n return False\n return True",
"def test_create_resource_access_review_for_all_namespaces(self):\n pass",
"def test_ListAction(self) -> None:\n\n a1 = SCons.Action.Action([\"x\", \"y\", \"z\", [\"a\", \"b\", \"c\"]])\n assert isinstance(a1, SCons.Action.ListAction), a1\n assert a1.varlist == (), a1.varlist\n assert isinstance(a1.list[0], SCons.Action.CommandAction), a1.list[0]\n assert a1.list[0].cmd_list == \"x\", a1.list[0].cmd_list\n assert isinstance(a1.list[1], SCons.Action.CommandAction), a1.list[1]\n assert a1.list[1].cmd_list == \"y\", a1.list[1].cmd_list\n assert isinstance(a1.list[2], SCons.Action.CommandAction), a1.list[2]\n assert a1.list[2].cmd_list == \"z\", a1.list[2].cmd_list\n assert isinstance(a1.list[3], SCons.Action.CommandAction), a1.list[3]\n assert a1.list[3].cmd_list == [\"a\", \"b\", \"c\"], a1.list[3].cmd_list\n\n a2 = SCons.Action.Action(\"x\\ny\\nz\")\n assert isinstance(a2, SCons.Action.ListAction), a2\n assert a2.varlist == (), a2.varlist\n assert isinstance(a2.list[0], SCons.Action.CommandAction), a2.list[0]\n assert a2.list[0].cmd_list == \"x\", a2.list[0].cmd_list\n assert isinstance(a2.list[1], SCons.Action.CommandAction), a2.list[1]\n assert a2.list[1].cmd_list == \"y\", a2.list[1].cmd_list\n assert isinstance(a2.list[2], SCons.Action.CommandAction), a2.list[2]\n assert a2.list[2].cmd_list == \"z\", a2.list[2].cmd_list\n\n def foo() -> None:\n pass\n\n a3 = SCons.Action.Action([\"x\", foo, \"z\"])\n assert isinstance(a3, SCons.Action.ListAction), a3\n assert a3.varlist == (), a3.varlist\n assert isinstance(a3.list[0], SCons.Action.CommandAction), a3.list[0]\n assert a3.list[0].cmd_list == \"x\", a3.list[0].cmd_list\n assert isinstance(a3.list[1], SCons.Action.FunctionAction), a3.list[1]\n assert a3.list[1].execfunction == foo, a3.list[1].execfunction\n assert isinstance(a3.list[2], SCons.Action.CommandAction), a3.list[2]\n assert a3.list[2].cmd_list == \"z\", a3.list[2].cmd_list\n\n a4 = SCons.Action.Action([\"x\", \"y\"], strfunction=foo)\n assert isinstance(a4, SCons.Action.ListAction), a4\n assert a4.varlist == (), a4.varlist\n assert isinstance(a4.list[0], SCons.Action.CommandAction), a4.list[0]\n assert a4.list[0].cmd_list == \"x\", a4.list[0].cmd_list\n assert a4.list[0].strfunction == foo, a4.list[0].strfunction\n assert isinstance(a4.list[1], SCons.Action.CommandAction), a4.list[1]\n assert a4.list[1].cmd_list == \"y\", a4.list[1].cmd_list\n assert a4.list[1].strfunction == foo, a4.list[1].strfunction\n\n a5 = SCons.Action.Action(\"x\\ny\", strfunction=foo)\n assert isinstance(a5, SCons.Action.ListAction), a5\n assert a5.varlist == (), a5.varlist\n assert isinstance(a5.list[0], SCons.Action.CommandAction), a5.list[0]\n assert a5.list[0].cmd_list == \"x\", a5.list[0].cmd_list\n assert a5.list[0].strfunction == foo, a5.list[0].strfunction\n assert isinstance(a5.list[1], SCons.Action.CommandAction), a5.list[1]\n assert a5.list[1].cmd_list == \"y\", a5.list[1].cmd_list\n assert a5.list[1].strfunction == foo, a5.list[1].strfunction"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that Kinesis Analytics V1 actions are both present in the ses namespace | def test_services_with_multiple_pages_kinesis_analytics(self):
# Kinesis Analytics V1
results = get_actions_for_service("kinesisanalytics")
actions = [
"kinesisanalytics:GetApplicationState", # Only in v1, not v2
"kinesisanalytics:ListApplications", # In both
]
for action in actions:
self.assertTrue(action in results) | [
"def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)",
"def test_services_with_multiple_pages_ses(self):\n # SES V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html\n self.assertTrue(\"ses:PutIdentityPolicy\" in self.all_actions)\n # SES V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonsimpleemailservicev2.html\n self.assertTrue(\"ses:ListImportJobs\" in self.all_actions)\n\n results = get_actions_for_service(\"ses\")\n actions = [\n \"ses:CloneReceiptRuleSet\",\n \"ses:CreateConfigurationSetTrackingOptions\",\n \"ses:CreateReceiptFilter\",\n \"ses:CreateReceiptRule\",\n \"ses:CreateReceiptRuleSet\",\n \"ses:CreateTemplate\",\n \"ses:DeleteConfigurationSetTrackingOptions\",\n \"ses:DeleteIdentity\",\n \"ses:DeleteIdentityPolicy\",\n \"ses:DeleteReceiptFilter\",\n \"ses:DeleteReceiptRule\",\n \"ses:DeleteReceiptRuleSet\",\n \"ses:DeleteTemplate\",\n \"ses:DeleteVerifiedEmailAddress\",\n \"ses:DescribeActiveReceiptRuleSet\",\n \"ses:DescribeConfigurationSet\",\n \"ses:DescribeReceiptRule\",\n \"ses:DescribeReceiptRuleSet\",\n \"ses:GetAccountSendingEnabled\",\n \"ses:GetIdentityDkimAttributes\",\n \"ses:GetIdentityMailFromDomainAttributes\",\n \"ses:GetIdentityNotificationAttributes\",\n \"ses:GetIdentityPolicies\",\n \"ses:GetIdentityVerificationAttributes\",\n \"ses:GetSendQuota\",\n \"ses:GetSendStatistics\",\n \"ses:GetTemplate\",\n \"ses:ListIdentities\",\n \"ses:ListIdentityPolicies\",\n \"ses:ListReceiptFilters\",\n \"ses:ListReceiptRuleSets\",\n \"ses:ListTemplates\",\n \"ses:ListVerifiedEmailAddresses\",\n \"ses:PutIdentityPolicy\",\n \"ses:ReorderReceiptRuleSet\",\n \"ses:SendBounce\",\n \"ses:SendBulkTemplatedEmail\",\n \"ses:SendRawEmail\",\n \"ses:SendTemplatedEmail\",\n \"ses:SetActiveReceiptRuleSet\",\n \"ses:SetIdentityDkimEnabled\",\n \"ses:SetIdentityFeedbackForwardingEnabled\",\n \"ses:SetIdentityHeadersInNotificationsEnabled\",\n \"ses:SetIdentityMailFromDomain\",\n \"ses:SetIdentityNotificationTopic\",\n \"ses:SetReceiptRulePosition\",\n \"ses:TestRenderTemplate\",\n \"ses:UpdateAccountSendingEnabled\",\n \"ses:UpdateConfigurationSetReputationMetricsEnabled\",\n \"ses:UpdateConfigurationSetSendingEnabled\",\n \"ses:UpdateConfigurationSetTrackingOptions\",\n \"ses:UpdateReceiptRule\",\n \"ses:UpdateTemplate\",\n \"ses:VerifyDomainDkim\",\n \"ses:VerifyDomainIdentity\",\n \"ses:VerifyEmailAddress\",\n \"ses:VerifyEmailIdentity\",\n ]\n for action in actions:\n self.assertTrue(action in results)",
"def check_if_event_is_action(msg):\n\treturn dict(msg).__contains__('entityID')",
"def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\n self.assertTrue(lb_v1_only_action in results)\n self.assertTrue(lb_v2_only_action in results)",
"def does_action_exist(self, action):\n exists = 0\n for i in range(len(self.arns)):\n if action in self.arns[i]['actions']:\n exists = exists + 1\n else:\n continue\n return exists > 0",
"def test_subset_cf_keys(self):\n del self.event['username'], self.event['policy_id']\n self._check_santized_event(False)",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_delete_action_spaces(self):\n pass",
"def check_action_intersect(action_1, action_2):\n if action_1 == action_2:\n return True\n raise NotImplementedError(\"Not yet implemented, replaced pdb.set_trace\")",
"def test_create_namespaced_ingress(self):\n pass",
"def aws_es_os_coginto_authentication_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n # ISO Time\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n for response in describe_es_os_domains(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(response,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n esDomainName = response[\"DomainStatus\"][\"DomainName\"]\n esVersion = response[\"DomainStatus\"][\"ElasticsearchVersion\"]\n domainId = response[\"DomainStatus\"][\"DomainId\"]\n domainArn = response[\"DomainStatus\"][\"ARN\"]\n try:\n cognitoEnabledCheck = response[\"DomainStatus\"][\"CognitoOptions\"][\"Enabled\"]\n except:\n cognitoEnabledCheck = False\n # this is a failing check\n if cognitoEnabledCheck is False:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{domainArn}/elasticsearch-cognito-auth-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": domainArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[OpenSearch.2] OpenSearch/AWS ElasticSearch Service domains should use Cognito authentication for Kibana\",\n \"Description\": \"OpenSearch/AWS ElasticSearch Service domain \"\n + esDomainName\n + \" does not use Cognito authentication for Kibana. Refer to the remediation instructions if this configuration is not intended\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"If your domain should use Cognito authentication for Kibana refer to the Amazon Cognito Authentication for Kibana section of the Amazon Elasticsearch Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Analytics\",\n \"AssetService\": \"Amazon OpenSearch Service\",\n \"AssetComponent\": \"Search Domain\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsOpenSearchServiceDomain\",\n \"Id\": domainArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsOpenSearchServiceDomain\": {\n \"Id\": domainId,\n \"DomainName\": esDomainName,\n \"EngineVersion\": esVersion,\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-6\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-16\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-24\",\n \"NIST SP 800-53 Rev. 4 IA-1\",\n \"NIST SP 800-53 Rev. 4 IA-2\",\n \"NIST SP 800-53 Rev. 4 IA-4\",\n \"NIST SP 800-53 Rev. 4 IA-5\",\n \"NIST SP 800-53 Rev. 4 IA-8\",\n \"NIST SP 800-53 Rev. 4 PE-2\",\n \"NIST SP 800-53 Rev. 4 PS-3\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.7.1.1\",\n \"ISO 27001:2013 A.9.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{domainArn}/elasticsearch-cognito-auth-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": domainArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[OpenSearch.2] OpenSearch/AWS ElasticSearch Service domains should use Cognito authentication for Kibana\",\n \"Description\": \"OpenSearch/AWS ElasticSearch Service domain \"\n + esDomainName\n + \" uses Cognito authentication for Kibana.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"If your domain should use Cognito authentication for Kibana refer to the Amazon Cognito Authentication for Kibana section of the Amazon Elasticsearch Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Analytics\",\n \"AssetService\": \"Amazon OpenSearch Service\",\n \"AssetComponent\": \"Search Domain\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsOpenSearchServiceDomain\",\n \"Id\": domainArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsOpenSearchServiceDomain\": {\n \"Id\": domainId,\n \"DomainName\": esDomainName,\n \"EngineVersion\": esVersion,\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-6\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-16\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-24\",\n \"NIST SP 800-53 Rev. 4 IA-1\",\n \"NIST SP 800-53 Rev. 4 IA-2\",\n \"NIST SP 800-53 Rev. 4 IA-4\",\n \"NIST SP 800-53 Rev. 4 IA-5\",\n \"NIST SP 800-53 Rev. 4 IA-8\",\n \"NIST SP 800-53 Rev. 4 PE-2\",\n \"NIST SP 800-53 Rev. 4 PS-3\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.7.1.1\",\n \"ISO 27001:2013 A.9.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding",
"def test_delete_namespaced_ingress(self):\n pass",
"def lambda_handler(event, context):\n set_logging(level=logging.DEBUG)\n\n try:\n payload = json.loads(event[\"Records\"][0][\"Sns\"][\"Message\"])\n account_id = payload['account_id']\n account_name = payload['account_name']\n # get the last region from the list to process\n region = payload['regions'].pop()\n # region = payload['region']\n # if request_id is present in payload, it means this lambda was called from the API\n request_id = payload.get('request_id', None)\n except Exception:\n logging.exception(f\"Failed to parse event\\n{event}\")\n return\n\n try:\n config = Config()\n\n main_account = Account(region=config.aws.region)\n ddb_table = main_account.resource(\"dynamodb\").Table(config.sqspolicy.ddb_table_name)\n\n account = Account(id=account_id,\n name=account_name,\n region=region,\n role_name=config.aws.role_name_identification)\n if account.session is None:\n return\n\n logging.debug(f\"Checking for public SQS policies in {account}\")\n\n # existing open issues for account to check if resolved\n open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SQSPolicyIssue)\n # make dictionary for fast search by id\n # and filter by current region\n open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region}\n logging.debug(f\"SQS in DDB:\\n{open_issues.keys()}\")\n\n checker = SQSPolicyChecker(account=account)\n if checker.check():\n for queue in checker.queues:\n logging.debug(f\"Checking {queue.name}\")\n if queue.public:\n issue = SQSPolicyIssue(account_id, queue.url)\n issue.issue_details.tags = queue.tags\n issue.issue_details.name = queue.name\n issue.issue_details.region = queue.account.region\n issue.issue_details.policy = queue.policy\n if config.sqspolicy.in_whitelist(account_id, queue.url):\n issue.status = IssueStatus.Whitelisted\n else:\n issue.status = IssueStatus.Open\n logging.debug(f\"Setting {queue.name} status {issue.status}\")\n IssueOperations.update(ddb_table, issue)\n # remove issue id from issues_list_from_db (if exists)\n # as we already checked it\n open_issues.pop(queue.url, None)\n\n logging.debug(f\"SQS in DDB:\\n{open_issues.keys()}\")\n # all other unresolved issues in DDB are for removed/remediated queues\n for issue in open_issues.values():\n IssueOperations.set_status_resolved(ddb_table, issue)\n if request_id:\n api_table = main_account.resource(\"dynamodb\").Table(config.api.ddb_table_name)\n DDB.track_progress(api_table, request_id)\n except Exception:\n logging.exception(f\"Failed to check SQS policies for '{account_id} ({account_name})'\")\n return\n\n # push SNS messages until the list with regions to check is empty\n if len(payload['regions']) > 0:\n try:\n Sns.publish(payload[\"sns_arn\"], payload)\n except Exception:\n logging.exception(\"Failed to chain insecure services checking\")\n\n logging.debug(f\"Checked SQS policies for '{account_id} ({account_name})'\")",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_lifecycle_progression_action_spaces(self):\n pass",
"def exists_intent_action(self, intent_keyword):\n pass",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_snapshot_variables_action_spaces(self):\n pass",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_template_action_spaces(self):\n pass",
"def allow_action(self, action):\n aset = set([k for k in action.attrs.keys() if k.startswith(\"variant.\")])\n\n unknown_variants = aset - self.__keyset\n\n # handle variant.debug\n\n for u in unknown_variants:\n # install only unknown variant.debug\n # actions tagged w/ \"false\"\n if u.startswith(\"variant.debug.\") and \\\n action.attrs[u] != \"false\":\n return False\n # could assert here for other\n # unknown variants... best course TBD\n for a in aset & self.__keyset:\n if self[a] != action.attrs[a]:\n return False\n return True",
"def test_get_actions_with_arn_type_and_access_level_case_3(self):\n desired_output = [\n 's3:PutAccountPublicAccessBlock',\n 's3:PutAccessPointPublicAccessBlock'\n ]\n output = get_actions_with_arn_type_and_access_level(\n # \"ram\", \"resource-share\", \"Write\"\n \"s3\", \"*\", \"Permissions management\"\n )\n print(output)\n for item in desired_output:\n self.assertTrue(item in output)\n # self.assertListEqual(desired_output, output)",
"def _is_s3_notif(event):\n return (\n event.get(\"Records\")\n and isinstance(event.get(\"Records\"), list)\n and \"s3\" in event.get(\"Records\")[0]\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that ses v1 and ses v2 actions are both present in the ses namespace | def test_services_with_multiple_pages_ses(self):
# SES V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html
self.assertTrue("ses:PutIdentityPolicy" in self.all_actions)
# SES V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonsimpleemailservicev2.html
self.assertTrue("ses:ListImportJobs" in self.all_actions)
results = get_actions_for_service("ses")
actions = [
"ses:CloneReceiptRuleSet",
"ses:CreateConfigurationSetTrackingOptions",
"ses:CreateReceiptFilter",
"ses:CreateReceiptRule",
"ses:CreateReceiptRuleSet",
"ses:CreateTemplate",
"ses:DeleteConfigurationSetTrackingOptions",
"ses:DeleteIdentity",
"ses:DeleteIdentityPolicy",
"ses:DeleteReceiptFilter",
"ses:DeleteReceiptRule",
"ses:DeleteReceiptRuleSet",
"ses:DeleteTemplate",
"ses:DeleteVerifiedEmailAddress",
"ses:DescribeActiveReceiptRuleSet",
"ses:DescribeConfigurationSet",
"ses:DescribeReceiptRule",
"ses:DescribeReceiptRuleSet",
"ses:GetAccountSendingEnabled",
"ses:GetIdentityDkimAttributes",
"ses:GetIdentityMailFromDomainAttributes",
"ses:GetIdentityNotificationAttributes",
"ses:GetIdentityPolicies",
"ses:GetIdentityVerificationAttributes",
"ses:GetSendQuota",
"ses:GetSendStatistics",
"ses:GetTemplate",
"ses:ListIdentities",
"ses:ListIdentityPolicies",
"ses:ListReceiptFilters",
"ses:ListReceiptRuleSets",
"ses:ListTemplates",
"ses:ListVerifiedEmailAddresses",
"ses:PutIdentityPolicy",
"ses:ReorderReceiptRuleSet",
"ses:SendBounce",
"ses:SendBulkTemplatedEmail",
"ses:SendRawEmail",
"ses:SendTemplatedEmail",
"ses:SetActiveReceiptRuleSet",
"ses:SetIdentityDkimEnabled",
"ses:SetIdentityFeedbackForwardingEnabled",
"ses:SetIdentityHeadersInNotificationsEnabled",
"ses:SetIdentityMailFromDomain",
"ses:SetIdentityNotificationTopic",
"ses:SetReceiptRulePosition",
"ses:TestRenderTemplate",
"ses:UpdateAccountSendingEnabled",
"ses:UpdateConfigurationSetReputationMetricsEnabled",
"ses:UpdateConfigurationSetSendingEnabled",
"ses:UpdateConfigurationSetTrackingOptions",
"ses:UpdateReceiptRule",
"ses:UpdateTemplate",
"ses:VerifyDomainDkim",
"ses:VerifyDomainIdentity",
"ses:VerifyEmailAddress",
"ses:VerifyEmailIdentity",
]
for action in actions:
self.assertTrue(action in results) | [
"def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\n self.assertTrue(lb_v1_only_action in results)\n self.assertTrue(lb_v2_only_action in results)",
"def test_subscriber_access_for_two_vsg_services(self):",
"def test_subscriber_access_if_vsg2_goes_down(self):",
"def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)",
"def test_subscriber_access_if_vsg1_goes_down(self):",
"def test_create_namespaced_ingress(self):\n pass",
"def test_patch_namespaced_ingress(self):\n pass",
"def test_replace_namespaced_ingress(self):\n pass",
"def check_namespace(client, namespace):\n\n try:\n v1_namespace = client.resources.get(api_version='v1', kind='Namespace')\n v1_namespace.get(name=namespace)\n except exceptions.NotFoundError:\n return False\n except Exception as e:\n print(\"Error checking namespace {}: {}\\n\".format(namespace, e))\n sys.exit(1)\n\n return True",
"def test_delete_namespaced_ingress(self):\n pass",
"def checkNamespaces(self) :\n nsp = [x[\"namespace\"] for x in self.GO]\n lnsp = [len(x) for x in nsp]\n lnsp = set(lnsp)\n if (not lnsp == set([1])) :\n raise GOparserError(\"GO term namespace error (len!=1)\")\n nsp = [x[0] for x in nsp]\n nsp = set(nsp)\n if (not nsp == set([\"cellular_component\", \"biological_process\",\n \"molecular_function\"])) :\n raise GOparserError((\"Improper GO term namespaces: \" +\n repr(list(nsp))))",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_release_lifecycle_progression_action_spaces(self):\n pass",
"def test_services_with_multiple_pages_greengrass(self):\n # Greengrass V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiotgreengrass.html\n self.assertTrue(\"greengrass:CreateResourceDefinition\" in self.all_actions)\n # Greengrass V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiotgreengrassv2.html\n self.assertTrue(\"greengrass:CreateComponentVersion\" in self.all_actions)\n results = get_actions_for_service(\"greengrass\")\n actions = [\n \"greengrass:AssociateRoleToGroup\",\n \"greengrass:CreateConnectorDefinition\",\n \"greengrass:CreateConnectorDefinitionVersion\",\n \"greengrass:CreateCoreDefinition\",\n \"greengrass:CreateCoreDefinitionVersion\",\n \"greengrass:CreateDeviceDefinition\",\n \"greengrass:CreateDeviceDefinitionVersion\",\n \"greengrass:CreateFunctionDefinition\",\n \"greengrass:CreateFunctionDefinitionVersion\",\n \"greengrass:CreateGroup\",\n \"greengrass:CreateGroupCertificateAuthority\",\n \"greengrass:CreateGroupVersion\",\n \"greengrass:CreateLoggerDefinition\",\n \"greengrass:CreateLoggerDefinitionVersion\",\n \"greengrass:CreateResourceDefinition\",\n \"greengrass:CreateResourceDefinitionVersion\",\n \"greengrass:CreateSoftwareUpdateJob\",\n \"greengrass:CreateSubscriptionDefinition\",\n \"greengrass:CreateSubscriptionDefinitionVersion\",\n \"greengrass:DeleteConnectorDefinition\",\n \"greengrass:DeleteCoreDefinition\",\n \"greengrass:DeleteDeviceDefinition\",\n \"greengrass:DeleteFunctionDefinition\",\n \"greengrass:DeleteGroup\",\n \"greengrass:DeleteLoggerDefinition\",\n \"greengrass:DeleteResourceDefinition\",\n \"greengrass:DeleteSubscriptionDefinition\",\n \"greengrass:DisassociateRoleFromGroup\",\n \"greengrass:Discover\",\n \"greengrass:GetAssociatedRole\",\n \"greengrass:GetBulkDeploymentStatus\",\n \"greengrass:GetConnectorDefinition\",\n \"greengrass:GetConnectorDefinitionVersion\",\n \"greengrass:GetCoreDefinition\",\n \"greengrass:GetCoreDefinitionVersion\",\n \"greengrass:GetDeploymentStatus\",\n \"greengrass:GetDeviceDefinition\",\n \"greengrass:GetDeviceDefinitionVersion\",\n \"greengrass:GetFunctionDefinition\",\n \"greengrass:GetFunctionDefinitionVersion\",\n \"greengrass:GetGroup\",\n \"greengrass:GetGroupCertificateAuthority\",\n \"greengrass:GetGroupCertificateConfiguration\",\n \"greengrass:GetGroupVersion\",\n \"greengrass:GetLoggerDefinition\",\n \"greengrass:GetLoggerDefinitionVersion\",\n \"greengrass:GetResourceDefinition\",\n \"greengrass:GetResourceDefinitionVersion\",\n \"greengrass:GetSubscriptionDefinition\",\n \"greengrass:GetSubscriptionDefinitionVersion\",\n \"greengrass:GetThingRuntimeConfiguration\",\n \"greengrass:ListBulkDeploymentDetailedReports\",\n \"greengrass:ListBulkDeployments\",\n \"greengrass:ListConnectorDefinitionVersions\",\n \"greengrass:ListConnectorDefinitions\",\n \"greengrass:ListCoreDefinitionVersions\",\n \"greengrass:ListCoreDefinitions\",\n \"greengrass:ListDeviceDefinitionVersions\",\n \"greengrass:ListDeviceDefinitions\",\n \"greengrass:ListFunctionDefinitionVersions\",\n \"greengrass:ListFunctionDefinitions\",\n \"greengrass:ListGroupCertificateAuthorities\",\n \"greengrass:ListGroupVersions\",\n \"greengrass:ListGroups\",\n \"greengrass:ListLoggerDefinitionVersions\",\n \"greengrass:ListLoggerDefinitions\",\n \"greengrass:ListResourceDefinitionVersions\",\n \"greengrass:ListResourceDefinitions\",\n \"greengrass:ListSubscriptionDefinitionVersions\",\n \"greengrass:ListSubscriptionDefinitions\",\n \"greengrass:ResetDeployments\",\n \"greengrass:StartBulkDeployment\",\n \"greengrass:StopBulkDeployment\",\n \"greengrass:UpdateConnectorDefinition\",\n \"greengrass:UpdateCoreDefinition\",\n \"greengrass:UpdateDeviceDefinition\",\n \"greengrass:UpdateFunctionDefinition\",\n \"greengrass:UpdateGroup\",\n \"greengrass:UpdateGroupCertificateConfiguration\",\n \"greengrass:UpdateLoggerDefinition\",\n \"greengrass:UpdateResourceDefinition\",\n \"greengrass:UpdateSubscriptionDefinition\",\n \"greengrass:UpdateThingRuntimeConfiguration\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n # if action not in results:\n # print(action)",
"def check_script(vouts):\n for vout in [v for v in vouts[::-1] if v['hex'].startswith('6a')]:\n verb = BlockchainSpider.decode_op_return(vout['hex'])\n action = Spoolverb.from_verb(verb).action\n if action in Spoolverb.supported_actions:\n return verb\n raise Exception(\"Invalid ascribe transaction\")",
"def test_read_namespaced_ingress(self):\n pass",
"def test_fn_saintsxctf_com_api_authorizer_exists(self) -> None:\n api_id = APIGateway.rest_api_exists(self, self.api_name)\n authorizers = self.apigateway.get_authorizers(restApiId=api_id)\n authorizer_list: List[dict] = authorizers.get('items')\n self.assertEqual(1, len(authorizer_list))\n\n authorizer: dict = authorizer_list[0]\n self.assertEqual('saints-xctf-com-fn-auth', authorizer.get('name'))\n self.assertEqual('TOKEN', authorizer.get('type'))\n\n if self.prod_env:\n authorizer_name = 'function:SaintsXCTFAuthorizerPROD/invocations'\n else:\n authorizer_name = 'function:SaintsXCTFAuthorizerDEV/invocations'\n\n self.assertTrue(authorizer_name in authorizer.get('authorizerUri'))",
"def test_aws_service_api_validate_subscription_post(self):\n pass",
"def check(self):\n action_tag_value = self.message_tree.find(\"./soap:Header/wsa:Action\", self.namespaces).text\n\n service_tag_value = \\\n self.message_tree.find(self.distribution_envelope + '/itk:header', self.namespaces).attrib['service']\n\n if action_tag_value != service_tag_value:\n logging.warning(\"Action type does not match service type: (Action Tag, Service Tag) (%s, %s)\",\n action_tag_value,\n service_tag_value)\n return True, \"Manifest action does not match service action\"\n\n return False, None",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_template_action_spaces(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that kafka actions are not overwritten in the IAM definition | def test_kafka_action_names_overlap_issue(self):
# Kafka actions used to be in two pages but are now one. This verifies the current state.
# results = get_actions_for_service("kafka")
# print(results)
actions = [
"kafka:BatchAssociateScramSecret",
"kafka:BatchDisassociateScramSecret",
"kafka:CreateClusterV2",
"kafka:DeleteConfiguration",
"kafka:DescribeClusterV2",
"kafka:ListClustersV2",
"kafka:ListConfigurationRevisions",
"kafka:ListKafkaVersions",
"kafka:ListScramSecrets",
"kafka:RebootBroker",
"kafka:UpdateBrokerType",
"kafka:UpdateConfiguration",
"kafka:UpdateConnectivity",
"kafka:UpdateSecurity"
]
for action in actions:
self.assertTrue(action in self.all_actions) | [
"def test_excluded_actions_scan_policy_file(self):\n test_policy = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"iam:CreateAccessKey\"\n ],\n \"Resource\": \"*\"\n },\n ]\n }\n results = scan_policy(test_policy)\n expected_results = {\n \"ServicesAffected\": [\n \"iam\",\n \"s3\"\n ],\n \"PrivilegeEscalation\": {\n \"severity\": \"high\",\n \"description\": \"<p>These policies allow a combination of IAM actions that allow a principal with these permissions to escalate their privileges - for example, by creating an access key for another IAM user, or modifying their own permissions. This research was pioneered by Spencer Gietzen at Rhino Security Labs. Remediation guidance can be found <a href=\\\"https://rhinosecuritylabs.com/aws/aws-privilege-escalation-methods-mitigation/\\\">here</a>.</p>\",\n \"findings\": [\n {\n \"type\": \"CreateAccessKey\",\n \"actions\": [\n \"iam:createaccesskey\"\n ]\n }\n ]},\n \"ResourceExposure\": {\n \"severity\": \"high\",\n \"description\": \"<p>Resource Exposure actions allow modification of Permissions to <a href=\\\"https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_identity-vs-resource.html\\\">resource-based policies</a> or otherwise can expose AWS resources to the public via similar actions that can lead to resource exposure - for example, the ability to modify <a href=\\\"https://docs.aws.amazon.com/ram/latest/userguide/what-is.html\\\">AWS Resource Access Manager</a>.</p>\",\n \"findings\": [\n \"iam:CreateAccessKey\"\n ]},\n \"DataExfiltration\": {\n \"severity\": \"medium\",\n \"description\": \"<div style=\\\"text-align:left\\\"><p>Policies with Data Exfiltration potential allow certain read-only IAM actions without resource constraints, such as <code>s3:GetObject</code>, <code>ssm:GetParameter*</code>, or <code>secretsmanager:GetSecretValue</code>. <br> <ul> <li>Unrestricted <code>s3:GetObject</code> permissions has a long history of customer data leaks.</li> <li><code>ssm:GetParameter*</code> and <code>secretsmanager:GetSecretValue</code> are both used to access secrets.</li> <li><code>rds:CopyDBSnapshot</code> and <code>rds:CreateDBSnapshot</code> can be used to exfiltrate RDS database contents.</li> </ul></p></div>\",\n \"findings\":[\n \"s3:GetObject\"\n ]},\n \"ServiceWildcard\": {\n \"severity\": \"medium\",\n \"description\": \"<p>\\\"Service Wildcard\\\" is the unofficial way of referring to IAM policy statements that grant access to ALL actions under a service - like s3:*. Prioritizing the remediation of policies with this characteristic can help to efficiently reduce the total count of issues in the Cloudsplaining report.</p>\",\n \"findings\": []\n },\n \"CredentialsExposure\": {\n \"severity\": \"high\",\n \"description\": \"<p>Credentials Exposure actions return credentials as part of the API response , such as ecr:GetAuthorizationToken, iam:UpdateAccessKey, and others. The full list is maintained here: https://gist.github.com/kmcquade/33860a617e651104d243c324ddf7992a</p>\",\n \"findings\": [\n \"iam:CreateAccessKey\"\n ]},\n \"InfrastructureModification\": {\n \"severity\": \"low\",\n \"description\": \"\",\n \"findings\":[\n \"iam:CreateAccessKey\",\n \"s3:GetObject\"\n ]}\n }\n # print(json.dumps(results, indent=4))\n self.maxDiff = None\n self.assertDictEqual(results, expected_results)",
"def test_introduce_actions(self):\n pass",
"def test_update_workflow_permission(self):\n pass",
"def set_actions(self, actions):",
"def test_get_actions_with_arn_type_and_access_level_case_3(self):\n desired_output = [\n 's3:PutAccountPublicAccessBlock',\n 's3:PutAccessPointPublicAccessBlock'\n ]\n output = get_actions_with_arn_type_and_access_level(\n # \"ram\", \"resource-share\", \"Write\"\n \"s3\", \"*\", \"Permissions management\"\n )\n print(output)\n for item in desired_output:\n self.assertTrue(item in output)\n # self.assertListEqual(desired_output, output)",
"def update_actions(self):\n pass",
"async def before_action(self, action, *args, **kwargs):\n return True",
"def before_action(self, action):\n return True",
"async def before_action(self, action: str, *args, **kwargs) -> bool:\n return True",
"def test_check_push_rules_actions(self) -> None:\n with self.assertRaises(InvalidRuleException):\n self.module_api.check_push_rule_actions([\"foo\"])\n\n with self.assertRaises(InvalidRuleException):\n self.module_api.check_push_rule_actions([{\"foo\": \"bar\"}])\n\n self.module_api.check_push_rule_actions([\"notify\"])\n\n self.module_api.check_push_rule_actions(\n [{\"set_tweak\": \"sound\", \"value\": \"default\"}]\n )",
"def test_set_alert_acl(self):\n pass",
"def test_update_workflow_step_permission(self):\n pass",
"def test_remove_alert_access(self):\n pass",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_delete_action_spaces(self):\n pass",
"def _validate_setup(self):\n if self.default_action is not None:\n action_meta: ActionMetaData\n for action_meta in self.config.second_pass_actions:\n if len(action_meta.positional) > 0:\n raise CommandLineConfigError(\n 'No positional arguments allowed when default ' +\n f\"action '{self.default_action}' \" +\n f'given for method {action_meta.name}')",
"def test_custom_action_response_descriptor_octopus_server_web_api_actions_machine_policy_template_action_spaces(self):\n pass",
"def test_get_actions_with_arn_type_and_access_level_case_4(self):\n desired_output = [\n 'secretsmanager:ListSecrets'\n ]\n output = get_actions_with_arn_type_and_access_level(\n \"secretsmanager\", \"*\", \"List\"\n )\n self.assertListEqual(desired_output, output)",
"def add_permission(self):\n statement_id = '{}_api_{}'.format(self.app_name, self.trigger_settings['api_name'])\n principal = 'apigateway.amazonaws.com'\n lambda_alias_arn = get_lambda_alias_arn(self.app_name, self.env, self.region)\n lambda_unqualified_arn = get_lambda_arn(self.app_name, self.env, self.region)\n resource_name = self.trigger_settings.get('resource', '')\n resource_name = resource_name.replace('/', '')\n method_api_source_arn = 'arn:aws:execute-api:{}:{}:{}/{}/{}/{}'.format(\n self.region, self.account_id, self.api_id, self.env, self.trigger_settings['method'], resource_name)\n global_api_source_arn = 'arn:aws:execute-api:{}:{}:{}/*/*/{}'.format(self.region, self.account_id, self.api_id,\n resource_name)\n add_lambda_permissions(\n function=lambda_alias_arn,\n statement_id=statement_id + self.trigger_settings['method'],\n action='lambda:InvokeFunction',\n principal=principal,\n env=self.env,\n region=self.region,\n source_arn=method_api_source_arn)\n add_lambda_permissions(\n function=lambda_alias_arn,\n statement_id=statement_id,\n action='lambda:InvokeFunction',\n principal=principal,\n env=self.env,\n region=self.region,\n source_arn=global_api_source_arn)\n add_lambda_permissions(\n function=lambda_unqualified_arn,\n statement_id=statement_id + self.trigger_settings['method'],\n action='lambda:InvokeFunction',\n principal=principal,\n env=self.env,\n region=self.region,\n source_arn=method_api_source_arn)\n add_lambda_permissions(\n function=lambda_unqualified_arn,\n statement_id=statement_id,\n action='lambda:InvokeFunction',\n principal=principal,\n env=self.env,\n region=self.region,\n source_arn=global_api_source_arn)",
"def _is_legal_action(self, action: Action):\n assert 0 <= action < self.nA, f\"{action} is not a legal action.\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
1. Maintain a decreasing stack by scanning nums from left to right. 2. Then scan the nums from right to left and calculate the maxWidth between each ramp. | def maxWidthRamp(self, nums: list[int]) -> int:
maxWidth = 0
descStack = []
# Generate decreasing stack.
for i, num in enumerate(nums):
if not descStack or nums[descStack[-1]] > num:
descStack.append(i)
# Check elements from right to left.
for j in reversed(range(len(nums))):
while descStack and nums[descStack[-1]] <= nums[j]:
maxWidth = max(maxWidth, j - descStack.pop())
return maxWidth | [
"def peg_width_per_levels(base_width):\n limiter = 2\n decrementer = -2\n decrementing_width = int(base_width)\n peg_count_per_level = []\n while decrementing_width >= limiter:\n peg_count_per_level.append(int(decrementing_width))\n decrementing_width += decrementer\n return peg_count_per_level",
"def calculate_medians(numbers):\n max_heap = MaxHeap() # For storing the smaller half of numbers\n min_heap = MinHeap() # For storing the larger half of numbers\n medians = []\n for number in numbers:\n if max_heap.peek_max() is None or max_heap.peek_max() > number:\n max_heap.insert(number)\n if max_heap.size > min_heap.size + 1:\n min_heap.insert(max_heap.extract_max())\n else:\n min_heap.insert(number)\n if min_heap.size > max_heap.size + 1:\n max_heap.insert(min_heap.extract_min())\n if max_heap.size >= min_heap.size:\n medians.append(max_heap.peek_max())\n else:\n medians.append(min_heap.peek_min())\n return sum(medians) % 10000",
"def width(x):\n return (upper_bound(x) - lower_bound(x)) / 2",
"def max_window(arr):\n length = len(arr)\n # Distance to the smallest element to the right\n arr_l = next_smaller_dist(idx_next_smaller(arr))\n # Distance to the smallest element to the left\n arr_r = list(reversed(next_smaller_dist(idx_next_smaller(list(reversed(arr))))))\n max_windows = [0 for _ in range(length)]\n for i in range(length):\n if arr_l[i] == -1 and arr_r[i] == -1:\n max_windows[i] = len(arr_l)\n elif arr_l[i] == -1 and arr_r[i] != -1:\n max_windows[i] = (length - i) + (arr_r[i] - 1)\n elif arr_l[i] != -1 and arr_r[i] == -1:\n max_windows[i] = (i + 1) + (arr_l[i] - 1)\n else:\n max_windows[i] = (arr_l[i] - 1) + (arr_r[i] - 1) + 1\n return max_windows",
"def findPeakElement(nums):\n \"\"\"\n # 暴力\n nums.insert(0, -float('inf'))\n nums.append(-float('inf'))\n n = len(nums)\n for i in range(1, n - 1):\n if nums[i - 1] < nums[i] > nums[i + 1]:\n return i - 1\n \"\"\"\n # 二分查找\n l, r = 0, len(nums) - 1\n while l < r:\n m = l + (r - l) // 2\n if nums[m] > nums[m+1]:\n r = m\n else:\n l = m + 1\n return l",
"def calculate_largest_rectangle(heights):\n i = 1\n stack_of_indices = [0]\n max_area = 0\n start_index, end_index = 0, 0\n while i < len(heights):\n curr_height = heights[i]\n prev_height = heights[stack_of_indices[-1]]\n if curr_height > prev_height: # should be > or >= ???? Looks both OK, but IMO, > is more efficient\n stack_of_indices.append(i)\n else:\n while stack_of_indices and curr_height < heights[stack_of_indices[-1]]:\n index = stack_of_indices.pop()\n height = heights[index]\n if len(stack_of_indices) == 0:\n width = i\n else:\n next_index_from_stack = stack_of_indices[-1]\n width = i - next_index_from_stack - 1\n area = height * width\n if area > max_area:\n max_area = area\n if stack_of_indices:\n start_index = stack_of_indices[-1] + 1\n else:\n start_index = 0\n end_index = i - 1\n stack_of_indices.append(i)\n i += 1\n\n if stack_of_indices:\n # Remember index i = len(heights)\n while stack_of_indices:\n index = stack_of_indices.pop()\n height = heights[index]\n if len(stack_of_indices) == 0:\n width = i\n else:\n next_index_from_stack = stack_of_indices[-1]\n width = i - next_index_from_stack - 1\n area = height * width\n if area > max_area:\n max_area = area\n if stack_of_indices:\n start_index = stack_of_indices[-1] + 1\n else:\n start_index = 0\n end_index = i - 1\n\n return max_area, start_index, end_index",
"def part2(depths: List[int]) -> int:\n window_1 = sum(depths[:3])\n window_2 = sum(depths[1:4])\n count = 1 if window_2 > window_1 else 0\n end = 4\n while end < len(depths):\n window_1 += depths[end - 1] - depths[end - 4]\n window_2 += depths[end] - depths[end - 3]\n if window_2 > window_1:\n count += 1\n end += 1\n return count",
"def maximumGap(nums):\n \"\"\"\n # 先实现,强行sort后找最大间距 72%\n nums.sort()\n ans = 0\n for i in range(1, len(nums)):\n ans = max(nums[i] - nums[i - 1], ans)\n return ans\n \"\"\"\n # 桶排序,结果就是每个非空桶之间的间距最大值 O(n)\n if len(nums) < 2:\n return 0\n Buckets = [Bucket() for _ in range(len(nums)+1)]\n max_num = max(nums)\n min_num = min(nums)\n if max_num == min_num:\n return 0\n for num in nums:\n index = int((num-min_num)/(max_num-min_num)*len(nums))\n bucket = Buckets[index]\n if bucket.isempty:\n bucket.m = num\n bucket.M = num\n bucket.isempty = False\n else:\n bucket.m = min(num,bucket.m)\n bucket.M = max(num,bucket.M)\n res = 0\n pre_max = Buckets[0].M\n for i in range(1,len(Buckets)):\n if not Buckets[i].isempty:\n res = max(res,Buckets[i].m - pre_max)\n pre_max = Buckets[i].M\n return res",
"def minSubArrayLen(target: int, nums: List[int]) -> int:\n assert 1 <= target <= 10 ** 9, \"Target sum must be between 1 and 10 ** 9\"\n assert 1 <= len(nums) <= 105, \"Length of nums must be between 1 and 105\"\n for num in nums:\n assert 1 <= num <= 10 ** 4, \"Number in nums must be between 1 and 10 ** 4\"\n\n minLength = float('inf')\n leftPointer = 0\n sums = 0\n for index in range(len(nums)):\n sums += nums[index]\n\n while sums >= target:\n minLength = min(minLength, index - leftPointer + 1)\n sums -= nums[leftPointer]\n leftPointer += 1\n\n return minLength if minLength != float('inf') else 0",
"def split_array(nums, m):\n\n def split(max_sum):\n\n count = 0\n num_sum = 0\n for i, n in enumerate(nums):\n\n if num_sum + n < max_sum:\n num_sum += n\n\n else:\n count += 1\n num_sum = n\n\n return count\n\n low = max(nums)\n high = sum(nums)\n\n i = 0\n while high - low > 1:\n max_sum = (low + high + 1) // 2\n\n count = split(max_sum)\n if count < m:\n high = max_sum\n\n else:\n low = max_sum + 1\n\n i += 1\n if i > 9995:\n print(low, high)\n if i > 10000:\n raise Exception(\"Took too long\")\n\n return high - 1",
"def get_next_width(current_width,width_array):\n active_width = float(current_width)/MaxWidth\n\n active_width_constant = width_array.index(get_width_constant(active_width,width_array))\n\n width_multiplier = width_array[(active_width_constant+1)%len(width_array)]\n\n return int((MaxWidth-(WinBorder*2))*width_multiplier)",
"def largest_power_sizes(power):\n\tmaxes = []\n\tmax_ind = []\n\tfor size in range(1,301):\n\t\tind, mxx = largest_power(power, size)\n\t\tmaxes.append(mxx)\n\t\tmax_ind.append(ind)\n\treturn np.argmax(maxes)+1, max_ind[np.argmax(maxes)], np.max(maxes)",
"def _heapify_max(x):\n n = len(x)\n for i in reversed(range(n//2)):\n _siftup_max(x, i)",
"def limit_stack(runs: list, lst: list) -> None:\n if len(runs) >= 3:\n\n c, b, a = runs[-1][1] - runs[-1][0], \\\n runs[-2][1] - runs[-2][0], \\\n runs[-3][1] - runs[-3][0]\n\n while (b <= c or a <= b + c) and len(runs) > 1:\n if b <= c:\n _merge2(lst, runs[-2][0], runs[-2][1], runs[-1][1])\n runs[-2] = (runs[-2][0], runs[-1][1])\n runs.pop()\n\n # now there are 2 runs, when starting with 3\n # need to figure out what to do when there are 2, as there will\n # be an index error below\n if len(runs) >= 3:\n c, b, a = runs[-1][1] - runs[-1][0], \\\n runs[-2][1] - runs[-2][0], \\\n runs[-3][1] - runs[-3][0]\n else:\n # merge so len(run) == 1, ending while loop\n _merge2(lst, runs[-2][0], runs[-2][1], runs[-1][1])\n runs[-2] = (runs[-2][0], runs[-1][1])\n runs.pop()\n # doesn't matter what c, b, a are since len(runs) == 1\n\n if a <= b + c and len(runs) > 1:\n if a < c:\n _merge2(lst, runs[-3][0], runs[-3][1], runs[-2][1])\n runs[-3] = (runs[-3][0], runs[-2][1])\n del(runs[-2])\n\n else:\n _merge2(lst, runs[-2][0], runs[-2][1], runs[-1][1])\n runs[-2] = (runs[-2][0], runs[-1][1])\n runs.pop()\n\n if len(runs) >= 3:\n c, b, a = runs[-1][1] - runs[-1][0], \\\n runs[-2][1] - runs[-2][0], \\\n runs[-3][1] - runs[-3][0]\n else:\n # merge so len(run) == 1, ending while loop\n _merge2(lst, runs[-2][0], runs[-2][1], runs[-1][1])\n runs[-2] = (runs[-2][0], runs[-1][1])\n runs.pop()\n # doesn't matter what c, b, a are since len(runs) == 1\n\n\n # c, b, a = runs.pop(), runs.pop(), runs.pop()\n # if len(b) > len(c) and len(a) > len(b) + len(c):\n # runs.append(a)\n # runs.append(b)\n # runs.append(c)\n # # returns runs to original state\n # else:\n # while len(b) <= len(c) or len(a) <= len(b) + len(c):\n # if len(b) <= len(c):\n # _merge2(lst, b[0], b[1], c[1])\n # d = (b[0], c[1])\n # runs.append(a)\n # runs.append(d)\n # elif len(a) <= len(b) + len(c):\n # if len(a) < len(c):\n # _merge2(lst, a[0], a[1], b[1])\n # d = (a[0], b[1])\n #\n # runs.append(d)\n # else:\n # _merge2(lst, b[0], b[1], c[1])\n # d = (b[0], c[1])\n # runs.append(d)",
"def min_width(blocks):\r\n assert(len(blocks) > 0)\r\n return sum(blocks) + len(blocks) - 1",
"def step_width(right, left):\n\tstep_width = []\n\tfor i in range(len(right)):\n\n\t\twidth = abs(right[i][1] - left[i][1])\n\t\tstep_width.append(width)\n\n\treturn step_width",
"def calculate_min_max_tiles(self):",
"def splitArray(self, nums: List[int], m: int) -> int:\n l = max(nums)\n r = sum(nums)\n ans = r\n\n while l <= r:\n mid = (l + r) // 2\n range_sum = 0\n range_sum_count = 1\n for i in range(len(nums)):\n if (range_sum + nums[i] > mid):\n range_sum = nums[i]\n range_sum_count += 1\n else:\n range_sum += nums[i]\n if range_sum_count <= m:\n ans = min(ans, mid)\n r = mid - 1\n else:\n l = mid + 1\n return ans",
"def build_max_heap(A):\r\n i = int((len(A)-2)//2)\r\n while i >= 0:\r\n max_heapify(A, i)\r\n i -= 1\r\n return A"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an input (instance of the BenchInput tuple), constructs and validates a disjunctive ChaumPedersen proof, returning the time (in seconds) to do each operation. | def chaum_pedersen_bench(bi: BenchInput) -> Tuple[float, float]:
(keypair, r, s) = bi
ciphertext = get_optional(elgamal_encrypt(0, r, keypair.public_key))
start1 = timer()
proof = make_disjunctive_chaum_pedersen_zero(
ciphertext, r, keypair.public_key, ONE_MOD_Q, s
)
end1 = timer()
valid = proof.is_valid(ciphertext, keypair.public_key, ONE_MOD_Q)
end2 = timer()
if not valid:
raise Exception("Wasn't expecting an invalid proof during a benchmark!")
return end1 - start1, end2 - end1 | [
"def dpTime():\n print \"calculating...\"\n startTime = time.time()\n dpa = dpAdvisor(subjects, 20)\n endTime = time.time()\n #printSubjects(dpa)\n print \"%.4f\" % (endTime-startTime)",
"def dpTime():\n start_time = time.time()\n subjects = loadSubjects(SUBJECT_FILENAME)\n maxWork = 50\n answer = dpAdvisor(subjects, maxWork)\n end_time = time.time()\n printSubjects(answer)\n print 'Time taken: ', end_time - start_time\n return None",
"def process_input(input: dict = None) -> tuple:\n load = input[\"load\"]\n co2_cost = input[\"fuels\"][\"co2(euro/ton)\"]\n gas_cost = input[\"fuels\"][\"gas(euro/MWh)\"]\n wind = input[\"fuels\"][\"wind(%)\"] / 100.0\n kerosine_cost = input[\"fuels\"][\"kerosine(euro/MWh)\"]\n powerplants = input[\"powerplants\"]\n\n # Reducing powers on p_min to proceed with restrictions in form 0 ≤ p' ≤ p'_max; p' = p - p_min\n\n powers = [\n p[\"efficiency\"] * (p[\"pmax\"] - p[\"pmin\"])\n if p[\"type\"] in [\"gasfired\", \"turbojet\"]\n else wind * (p[\"pmax\"] - p[\"pmin\"])\n for p in powerplants\n ]\n\n # Reducing initial load according to the new restrictions\n\n load -= sum(\n [\n p[\"efficiency\"] * p[\"pmin\"]\n if p[\"type\"] in [\"gasfired\", \"turbojet\"]\n else wind * p[\"pmin\"]\n for p in powerplants\n ]\n )\n names = [p[\"name\"] for p in powerplants]\n\n # Defining costs of the fuels of each powerplant\n\n costs = []\n for i, powerplant in enumerate(powerplants):\n if powerplant[\"type\"] == \"gasfired\":\n # For gasfired powerplants costs depend on gas and CO_2 costs\n costs.append(gas_cost / powerplant[\"efficiency\"] + 0.3 * co2_cost)\n elif powerplant[\"type\"] == \"turbojet\":\n # For turbojets costs depend on kerosin costs\n costs.append(kerosine_cost / powerplant[\"efficiency\"])\n else:\n # For wind turbines costs are 0\n costs.append(0)\n return powers, costs, load, names",
"def run_timings():\n\n running_times = 0\n run_count = 0\n\n while recorded_time := input(f\"Enter your 10k time: \"):\n if not recorded_time:\n break\n running_times += float(recorded_time)\n run_count += 1\n\n return running_times / run_count",
"def _calcExecTime(self, migTask, dPrime):\n #print \"ae\", self\n # Let's start making U = 0.9999 (which probably causes deadline misses).\n # If we force U = 1, we won't be able to use La.\n if self.util() >= 0.9999:\n self._lastCost = 0.0\n return 0.0\n cPrime = (0.9999 - self.util())*migTask.period()\n\n # Temporarily add the slice\n tempSlice = WmSlice(-1, cPrime, dPrime, migTask)\n self._addSlice(tempSlice)\n\n L = self._L()\n min_d = self._minDeadline()\n\n #print \"L\", L\n #print self\n #print \"Calculating cost. dPrime\", dPrime\n\n # QPA\n t = self._lastDeadline(L)\n h = self._h(t)\n #print t\n while round(t,12) >= round(min_d,12): # We are checking demand only for the migratory task\n # We round the checking to 12 decimal places. Otherwise, it could make the algorithm repeat undefinedly, in\n # case new calculated cost is not 100% precise. We do the same when applying floor(). The other comparisons don't\n # need this correction, since they are not so critical.\n if round(h,12) > round(t,12):\n #print \"HIGH. t %.15f\" % t, \"h(t) %.15f\" % h, \". C was\", cPrime\n cPrime = (t - self._h_oth(t, tempSlice)) / floor(round((t + migTask.period() - dPrime)/migTask.period(), 12))\n #print \"New C is\", cPrime\n tempSlice._wcet = cPrime # Update slice cost to fix demand\n\n if cPrime <= 0.0: # Stop if the cost gets negative\n self._removeLastSlice()\n self._lastCost = 0.0\n return 0.0\n\n #print \"OK. t\", t, \"h(t)\",h, \"new t\",\n t = self._lastDeadline(t)\n #print t\n h = self._h(t)\n #print \"OK. t\", t, \"h(t)\",h\n\n #print self\n #print \"Final cost\", cPrime\n #if not self._qpa():\n # print self.tasks()\n #assert self._qpa()\n\n self._removeLastSlice()\n self._lastCost = cPrime\n return cPrime",
"def calculateWaitingTime(self, inputs):\n CollisionCounter.CollisionCounter.getInstance().waitingTimeCalculated(self.time)\n timeUntilDepature = self.getAtt('departure_time', inputs) - self.time\n remainingLoadingTime = self.calculateLoadingTime(inputs)\n # calculates first maximum possible waiting time\n sampleTime = int((timeUntilDepature - remainingLoadingTime) / self.participants)\n\n if sampleTime >= 1:\n # result is big enough for a standard treatment\n self.waitingTime = MyRandom.RandomNumber.getInstance().getRandomNumber(sampleTime + 1)\n elif sampleTime < 1:\n # reslut is too small, special treatment necessary\n upperLimit = (10 * (1 - (math.exp(sampleTime - 1)))) + 1\n self.waitingTime = MyRandom.RandomNumber.getInstance().getRandomNumber(max((min(upperLimit,\n timeUntilDepature)) + 1, 1))\n # decides whether charging is allowed during waiting time\n if not self.stayedConnected:\n self.stayConnected = True\n self.stayedConnected = True\n else:\n self.stayConnected = False\n self.stayedConnected = False",
"def timeThem(*args, **kwargs):\n\n funcs = []\n funcArgs = list(args[:])\n \n #filter arguments\n for arg in args:\n if callable(arg):\n funcs.append(arg)\n funcArgs.remove(arg)\n \n key = \"inNumber\"\n inNumber=10\n if key in kwargs:\n inNumber = kwargs[key]\n del kwargs[key]\n\n durations = []\n refTime = 0.0\n\n for func in funcs:\n retVal = func(*funcArgs, **kwargs)\n duration = timeit(partial(func, *funcArgs, **kwargs), number=inNumber)\n \n comparison = \"\"\n if refTime <= 0.0:\n refTime = duration\n else:\n comparison = \" ( *{:.2f})\".format(duration / refTime)\n \n print(\"{: <16} : {:.4f}\".format(func.__name__, duration) + comparison + \" returns '{}' ({})\".format(retVal, type(retVal).__name__))\n durations.append(duration)\n \n return durations",
"def enter_data_for_time_calc():\n print(\"Pace & Distance -> Time\")\n print(\"=\" * 50)\n\n pace = input(\"Pace[min/km]: \")\n distance = float(input(\"Distance[km]: \"))\n\n calc_time(pace, distance)",
"def benchmark(func, inputs):\n t0 = time.clock()\n results = [func(x) for x in inputs]\n t1 = time.clock()\n average_time = (t1 - t0) / len(inputs)\n return average_time, results",
"def run_timings():\n\n running_times = []\n\n while recorded_time := input(f\"Enter your 10k time: \"):\n if not recorded_time:\n break\n running_times.append(float(recorded_time))\n average_pace = sum(running_times) / len(running_times)\n return average_pace",
"def bruteForceTime():\n\n # TODO...\n\n advisorTime(bruteForceAdvisor)",
"def staleness(T, clock) -> float:\n return clock - T",
"def evaluate(self, time) -> float:\n ...",
"def dpTime():\n # TODO...\n subjects = loadSubjects(SUBJECT_FILENAME)\n # print \"subjects:\", subjects\n test_maxWork = [30]\n for each in test_maxWork:\n print \"maximum workload\", each, '\\n',\n startTime = time.time()\n selected = dpAdvisor(subjects, each)\n endTime = time.time()\n print endTime - startTime, \"seconds.\"\n printSubjects(selected)",
"def run_timing():\n\n total_time = 0\n num_runs = 0\n msg = 'Enter 10 km run time (min): '\n\n while one_run_time := convert_input(input(msg), 'float', True):\n total_time += one_run_time\n num_runs += 1\n \n if num_runs == 0:\n print(\"Don't be lazy, go for a run!)\")\n return\n\n avg_time = total_time / num_runs\n\n print(f'Average of {avg_time:.2f} minutes, over {num_runs} runs')",
"def _get_timings_perinput(funcs, input_=None):\n\n global _TIMEOUT\n global _NUM_REPEATS\n\n timings_l = []\n\n from IPython import get_ipython\n if get_ipython() is None:\n iter_funcs = trange(len(funcs), desc='Loop functions', leave=False)\n else:\n iter_funcs = range(len(funcs))\n\n for j in iter_funcs:\n f = funcs[j]\n ii = 1\n process_next = True\n while process_next:\n for jj in 1, 2, 5:\n iter_rep = ii * jj\n if input_ is None:\n t = min(timeit.repeat(functools.partial(f), repeat=_NUM_REPEATS, number=iter_rep))\n else:\n t = min(timeit.repeat(functools.partial(f, *input_), repeat=_NUM_REPEATS, number=iter_rep))\n if t > _TIMEOUT:\n process_next = False\n break\n ii *= 10\n timings_l.append(t / iter_rep)\n return timings_l",
"def bruteForceTime():\n start_time = time.time()\n subjects = loadSubjects(SUBJECT_FILENAME)\n maxWork = 8\n answer = bruteForceAdvisor(subjects, maxWork)\n end_time = time.time()\n printSubjects(answer)\n print 'Time taken: ', end_time - start_time\n return None",
"def typingSpeedCalc(self):\n output = []\n index = 0\n for chunk in self.chunks:\n initialChunk = chunk[0]\n data = [x.strip() for x in initialChunk.split(',')]\n if data[1].isnumeric():\n userTimestamp = int(data[1])\n else:\n continue\n initialTime = userTimestamp\n\n finalChunk = chunk[-1]\n data = [x.strip() for x in finalChunk.split(',')]\n if data[1].isnumeric():\n userTimestamp = int(data[1])\n else:\n continue\n finalTime = userTimestamp\n\n chunkPair = len(chunk)\n\n density = 0\n duration = (finalTime - initialTime) / 1000\n if chunkPair > 1:\n density = chunkPair / duration\n density = round(density, 3)\n output.append(density)\n index += 1\n return output",
"def test_time():\n for N in range(8,20):\n _ ,time = solveN(N)\n if(time>600):\n print(\"Test don't passed at N={N} should be less than 10 min Taken:{time}\")\n break\n print(f\"Time Test passed for N = {N} {time:.2f}s taken\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test of function choosing if log rotation is needed | def test_need_to_rotate_log(self):
self.assertTrue(need_to_rotate_log(0, 20, 'daily', 15, 'daily'), 'rotate log by time')
self.assertFalse(need_to_rotate_log(10, 20, 'daily', 15, 'hourly'), 'do not rotate log by time')
self.assertTrue(need_to_rotate_log(10, 20, 'daily', 25, None), 'rotate log by max size')
self.assertFalse(need_to_rotate_log(10, 20, 'hourly', 5, 'hourly'), 'do not rotate log by min size') | [
"def _should_rotate_log(self, handler):\n if handler[\"rotate_log\"]:\n rotate_time_index = handler.get(\"rotate_log_index\", \"day\")\n try:\n rotate_time_index = self._decode_time_rotation_index(rotate_time_index)\n except ValueError:\n rotate_time_index = 2\n\n rotate_time_delta = handler.get(\"rotate_log_delta\", 1)\n\n cur_t = time.gmtime()\n first_different_index = 9\n for i in range(9):\n if cur_t[i] != handler[\"log_rot_time\"][i]:\n first_different_index = i\n break\n\n if first_different_index < rotate_time_index:\n # If the time deltas differ by a time step greater than what we\n # have set for the rotation (I.e., months instead of days) we will\n # automatically rotate.\n return True\n else:\n time_delta = (\n cur_t[rotate_time_index]\n - handler[\"log_rot_time\"][rotate_time_index]\n )\n return time_delta >= rotate_time_delta\n\n return False",
"def _is_time_to_log(self):\n log_frec = self.logging_frecuency\n return (log_frec > 0 and\n ((self.total_steps % log_frec != 0 and\n self.step == self.total_steps - 1)\n or self.step % log_frec == log_frec - 1))",
"def log2(a):",
"def test_rotated(self):\n self._calibration_test(\"rotated\")",
"def _handle_log_rotations(self):\n for h in self.capture_handlers:\n if self._should_rotate_log(h):\n self._rotate_log(h)",
"def log(a , b):\n return math.log(a,b)",
"def log_Schechter_log(self, logl, alpha, logls, logl0):\n phi = (logl - logls) * (alpha+1) * np.log(10.) - np.power(10., logl-logls)\n lik = phi.copy()\n lik [logl < logl0] = -1e99\n return lik",
"def log2(x):\n ...",
"def log(x):\r\n\r\n return math.log(x)",
"def log_cust(x):\n if type(x) != str:\n if x < 0:\n return 0\n elif x == 0:\n return 0\n elif x > 0:\n return np.log(x)",
"def _compute_log_value(self):",
"def rotation_mode():\r\n pass",
"def test_rotation(self):\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n days = [(self.path + \".\" + log.suffix(day * 86400)) for day in range(3)]\n\n # test automatic rotation\n log._clock = 0.0 # 1970/01/01 00:00.00\n log.write(\"123\")\n log._clock = 43200 # 1970/01/01 12:00.00\n log.write(\"4567890\")\n log._clock = 86400 # 1970/01/02 00:00.00\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(days[0]))\n self.assertFalse(os.path.exists(days[1]))\n log._clock = 172800 # 1970/01/03 00:00.00\n log.write(\"\")\n self.assertTrue(os.path.exists(days[0]))\n self.assertTrue(os.path.exists(days[1]))\n self.assertFalse(os.path.exists(days[2]))\n log._clock = 259199 # 1970/01/03 23:59.59\n log.write(\"3\")\n self.assertFalse(os.path.exists(days[2]))",
"def test_logistic():\n r=np.random.normal(size=20)\n assert np.isclose( ilogistic(logistic(r)),r ).all()",
"def add_log_if_improves_skew(feature, df) :\r\n featureData = df[feature] \r\n logged = np.log(featureData)\r\n if abs(logged.skew()) >= abs(featureData.skew()) :\r\n return False\r\n df[feature+\"_log\"] = logged\r\n return True",
"def logarithmic_parametrization(self) -> bool:\n return self._logarithmic_parametrization",
"def test_get_log(self):\n result = log_lib.get_log(True)\n self.assertTrue(callable(result))\n result(\"dummy-message\")\n\n result = log_lib.get_log(False)\n self.assertTrue(callable(result))\n result(\"dummy-message\")",
"def ilog(x,delta):\n if(delta < x and x < 1.0 - delta):\n return np.log( -np.log(x) )\n elif(x < delta):\n return np.log( -np.log(delta) )\n else: \n return np.log( -np.log(1.0 - delta) )",
"def HasRotated(logfile, hash):\n timestamp = utcnow()\n cursor.execute('''SELECT hash, date FROM rotate\n WHERE logfile = \"%s\"''' % (logfile,))\n result = cursor.fetchone()\n # If the database doesn't have an entry for our logfile then we need to\n # create one for it using the passed logfile hash and the current\n # timestamp.\n if not result:\n print \"New logfile, adding hash and date.\"\n cursor.execute('''INSERT INTO rotate (logfile, hash, date)\n VALUES (\"%s\", \"%s\", \"%s\")''' % (logfile, hash, timestamp))\n con.commit()\n return timestamp\n if result[0] == hash:\n # The current logfile hash matches the recorded one at last rotation,\n # we just return the old timestamp.\n return result[1]\n # If we get here, the logfile hash is different, indicating that rotation\n # has occured. We therefore set and return a new timestamp.\n print logfile, \"has rotated\"\n cursor.execute('''UPDATE rotate SET hash = \"%s\", date = \"%s\"\n WHERE logfile = \"%s\"''' % (hash, timestamp, logfile))\n con.commit()\n return timestamp"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests of try rotation with compress in configuration | def test_process_log_with_compress_in_configuration(self):
with tempfile.TemporaryDirectory() as sandbox:
with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout:
srcfile = Path(sandbox, 'pokus.log')
srcfile.touch()
destfile = Path(sandbox, 'backup', 'pokus.log')
compressors = process_log(
datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30),
{
'target': '{{path}}/backup/{{name}}.{{ext}}',
'interval': 'hourly',
'compress': 'gzip -9'
},
'hourly',
str(srcfile),
10
)
self.assertEqual(compressors, [[sandbox, 'gzip', '-9', str(destfile)]])
self.assertFalse(srcfile.exists())
self.assertTrue(destfile.exists())
self.assertEqual(fake_stdout.getvalue(), 'Checking "{src}"... rotating... "{src}" -> "{dest}" done.\n'.format(src=srcfile, dest=destfile)) | [
"def test_compress_works(self):\n tau = 45.0\n mrate = 60.0\n Mrate = 100.0\n gain = 5\n\n tmax = 50.0\n dt = 0.2\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.gain = gain\n\n self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)\n\n M1 = simulation.StateMonitor(self.rule, 'out')\n\n sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)\n sim1.run(tmax)\n\n # make sure we normally go outside the range\n self.assertGreater(np.sum(M1.out < mrate), 0)\n self.assertGreater(np.sum(M1.out > Mrate), 0)\n\n self.rule.compress_rates = True\n\n M2 = simulation.StateMonitor(self.rule, 'out')\n\n sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)\n sim2.run(tmax)\n\n self.assertEqual(np.sum(M2.out < mrate), 0)\n self.assertEqual(np.sum(M2.out > Mrate), 0)",
"def test_image_rotate(self):\n self.image_rotate.rotate(90)\n self.assertEqual(str(self.image_rotate.size()), str((1024, 1280)))",
"def test_compress():\n print('Testing compress')\n\n # Cases given to test this problem\n assert_equals('c1o17l1k1a1n1g1a1r1o2',\n hw1.compress('cooooooooooooooooolkangaroo'))\n assert_equals('a3', hw1.compress('aaa'))\n assert_equals('', hw1.compress(''))\n\n # Additional cases to test this problem\n assert_equals('a1p2l1e1', hw1.compress('apple'))\n assert_equals('g1o6d1a1w1g4s3', hw1.compress('goooooodawggggsss'))",
"def test_auto_compression():\n with dask.config.set({\"test123\": \"auto\"}):\n try:\n import lz4 # noqa: F401\n\n assert get_compression_settings(\"test123\") == \"lz4\"\n return\n except ImportError:\n pass\n\n try:\n import snappy # noqa: F401\n\n assert get_compression_settings(\"test123\") == \"snappy\"\n except ImportError:\n assert get_compression_settings(\"test123\") is None",
"def test_compression_tanh(self):\n tau = 48.0\n mrate = 60.0\n Mrate = 100.0\n gain = 5\n\n tmax = 50.0\n dt = 0.2\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.gain = gain\n\n self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)\n\n M1 = simulation.StateMonitor(self.rule, 'out')\n\n sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)\n sim1.run(tmax)\n\n self.rule.compress_rates = True\n\n M2 = simulation.StateMonitor(self.rule, 'out')\n\n sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)\n sim2.run(tmax)\n\n mavg = 0.5*(mrate + Mrate)\n mdiff = 0.5*(Mrate - mrate)\n\n expected = mavg + mdiff*np.tanh((M1.out - mavg)/mdiff)\n\n self.assertTrue(np.allclose(M2.out, expected), msg=\n \"mean(abs(out - expected))={}\".format(np.mean(np.abs(M2.out - expected))))",
"def test_compress(self):\n self.logger.info(\"STEP: Create the workspace directory to be compressed.\")\n workspace = Workspace(Mock)\n directory = Path.cwd().joinpath(\"workspace\")\n directory.mkdir()\n workspace.workspace = directory\n\n # Create a file to verify compression.\n directory.joinpath(\"file.txt\").touch()\n\n test_folder = Path.cwd().joinpath(\"testfolder\")\n test_folder.mkdir()\n self.items.append(test_folder)\n\n self.logger.info(\"STEP: Compress the directory.\")\n workspace.compress()\n\n self.logger.info(\n \"STEP: Verify that the directory was compressed using the gztar format.\"\n )\n self.items.append(test_folder)\n compressed_workspace = Path.cwd().joinpath(\"workspace.tar.gz\")\n unpack_archive(compressed_workspace, test_folder, format=\"gztar\")\n compressed_file = test_folder.joinpath(\"workspace/file.txt\")\n self.assertTrue(compressed_file.exists() and compressed_file.is_file())",
"def test_compress_deterministic(self):\n\n class DeterministicGZipMiddleware(GZipMiddleware):\n max_random_bytes = 0\n\n r1 = DeterministicGZipMiddleware(self.get_response)(self.req)\n r2 = DeterministicGZipMiddleware(self.get_response)(self.req)\n self.assertEqual(r1.content, r2.content)\n self.assertEqual(self.get_mtime(r1.content), 0)\n self.assertEqual(self.get_mtime(r2.content), 0)",
"def test_backup_with_compress_flag(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backupset.backup_compressed = False\n self.backup_cluster()\n no_compression = self.get_database_file_info()\n self.log.info(\"\\nDelete old backup and do backup again with compress flag\")\n self.backup_create()\n self.backupset.backup_compressed = self.input.param(\"backup-compressed\", False)\n self.backup_cluster()\n with_compression = self.get_database_file_info()\n self.validate_backup_compressed_file(no_compression, with_compression)",
"def test_compressed_artifacts_valid(self):\n with Lock(MONGO_LOCK_FILE) as l:\n compressions = [\"gzip\", \"lzma\"]\n for comp in compressions:\n artifact_name = str(uuid4())\n description = \"description for foo \" + artifact_name\n device_type = \"project-\" + str(uuid4())\n data = b\"foo_bar\"\n\n with artifact_rootfs_from_data(\n name=artifact_name,\n data=data,\n devicetype=device_type,\n compression=comp,\n ) as art:\n self.ac.log.info(\n \"uploading artifact (compression: {})\".format(comp)\n )\n self.ac.add_artifact(description, art.size, art)\n l.unlock()",
"def _check_rotated_filename_candidates(self):\n # savelog(8)\n candidate = \"%s.0\" % self.filename\n if (exists(candidate) and exists(\"%s.1.gz\" % self.filename) and\n (stat(candidate).st_mtime > stat(\"%s.1.gz\" % self.filename).st_mtime)):\n return candidate\n\n # logrotate(8)\n # with delaycompress\n candidate = \"%s.1\" % self.filename\n if exists(candidate):\n return candidate\n\n # without delaycompress\n candidate = \"%s.1.gz\" % self.filename\n if exists(candidate):\n return candidate\n\n rotated_filename_patterns = (\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d` + with `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d` + without `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].gz\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d-%s` + with `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d-%s` + without `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].gz\",\n # for TimedRotatingFileHandler\n \".[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]\",\n )\n for rotated_filename_pattern in rotated_filename_patterns:\n candidates = glob.glob(self.filename + rotated_filename_pattern)\n if candidates:\n candidates.sort()\n return candidates[-1] # return most recent\n\n # no match\n return None",
"def test_compressed(self):\n try:\n import zlib\n except ImportError:\n self.skipTest('zlib is missing')\n\n ba = amf3.ByteArray()\n\n self.assertFalse(ba.compressed)\n\n z = zlib.compress(b'b' * 100)\n ba = amf3.ByteArray(z)\n\n self.assertTrue(ba.compressed)\n\n z = zlib.compress(b'\\x00' * 100)\n ba = amf3.ByteArray(z)\n\n self.assertTrue(ba.compressed)",
"def test_logrotate(host):\n _f_logrotate_cnt = host.file(\"/etc/logrotate.conf\").content_string\n _regex = re.compile(\"compresscmd /usr/bin/xz\")\n\n assert re.findall(_regex, _f_logrotate_cnt)",
"def test_compression(self):\n save_netcdf(self.cube, self.filepath)\n\n data = Dataset(self.filepath, mode=\"r\")\n filters = data.variables[\"air_temperature\"].filters()\n\n self.assertTrue(filters[\"zlib\"])\n self.assertEqual(filters[\"complevel\"], 1)",
"def test_rotate_auth(self):\n pass",
"def test_rotated(self):\n self._calibration_test(\"rotated\")",
"def validate_compression(self, mode, image_spec, **kw):\n config = kw.get(\"config\")\n io = config.get(\"io_total\", \"1G\")\n kw[\"io\"] = io\n kw[\"imagespec\"] = image_spec\n pool_name = image_spec.split(\"/\")[0]\n\n # running io to check compression of data\n self.rbd_bench(**kw)\n\n out = self.exec_cmd(cmd=\"ceph df detail --format json-pretty\", output=True)\n output = json.loads(out.rstrip())\n pool_info = output[\"pools\"]\n\n # Iterate over the pools to get compressed data information\n for pool in pool_info:\n if pool[\"name\"] == pool_name:\n compressed_data = pool[\"stats\"][\"compress_bytes_used\"]\n break\n log.debug(\n f\"Pool statistics refer compress_bytes_used for bytes compressed: {pool['stats']}\"\n )\n\n if mode == \"compressible\":\n if compressed_data == 0:\n log.error(\"Test Failed: Data did not get compressed in compressible mode\")\n return 1\n else:\n log.info(\n f\"Test Passed: Data got compressed, bytes compressed: {compressed_data}\"\n )\n return 0\n\n elif mode == \"incompressible\":\n if compressed_data != 0:\n log.error(\"Test Failed: Data gets compressed in incompressible mode\")\n return 1\n else:\n log.info(\n \"Test Passed: Data did not get compressed due to incompressible mode set\"\n )\n return 0\n else:\n log.error(f\"Invalid mode: {mode}\")\n return 1",
"def test_compress_result():\n import itertools\n\n pg = pygarv.PyGarv(mkh5_f=no_pg_h5)\n\n # degenerate dblock len == 0, not typical\n result = np.zeros(shape=(0,), dtype=bool)\n fails = pg._compress_result(result)\n assert fails == []\n\n # smoke test all combinations of T,F up to len 8\n # eyeball check of singletons, runs in any\n # position appears ok\n for n in range(9):\n results = list(itertools.product([0, 1], repeat=n))\n for r in results:\n result = np.array(r)\n fails = pg._compress_result(result)\n # print(result)\n # print(fails)\n # print()\n\n cleanup_h5()",
"def perform_tests():\n print \"\\n****\\nTesting Doublecompress...\\n\"\n dc_pass = unit_doublecompress()\n if (dc_pass):\n result = 'PASS'\n else:\n result = 'FAIL'\n print \">>> \" + result\n\n return dc_pass",
"def get_compression(self):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test get_spec_config on empty conf | def test_get_spec_config_empty(self):
spec_conf = get_spec_config({}, '')
self.assertEqual(spec_conf, {}) | [
"def test_get_spec_config_defaults(self):\n spec_conf = get_spec_config({\n 'defaults': {\n 'foo': 'bar'\n }\n }, '')\n self.assertEqual(spec_conf, {'foo': 'bar'})",
"def test_get_missing_param(self):\n Config.init(sample_conf)\n res = Config.get(\"foo\")\n self.assertEqual(res, None)",
"def get_config_spec(cls):\n return False",
"def test_get_spec_config_match(self):\n spec_conf = get_spec_config({\n 'defaults': {\n 'default_foo': 'default_bar',\n 'foo': 'bar'\n },\n 'specific': [\n {'mask': ['filenomatch'], 'foo': 'bar_nomatch'},\n {'mask': ['filematch'], 'foo': 'match'},\n {'mask': ['filenomatch2'], 'foo': 'bar_nomatch2'}\n ]\n }, 'filematch')\n self.assertEqual(spec_conf, {'default_foo': 'default_bar', 'foo': 'match', 'mask': ['filematch']})",
"def test_config_var_not_empty():\n assert app_deployer.config is not None",
"def test_get_with_empty_value(self):\n self.assertEqual(self.config.get('none_types','other_value'),None)\n self.assertEqual(self.config.get('none_types','other_value','something'),'something')",
"def test_GetString_with_empty_config_values(self):\n val = self.config.GetString('section.empty')\n self.assertEqual(val, None)",
"def test_config():\n return {}",
"def _expected_config(self) -> Dict[str, Optional[str]]:\n return EXPECTED_CONFIG",
"def test_get_canary_config_using_get(self):\n pass",
"def test_get_config(oss_fuzz_benchmark):\n assert benchmark_config.get_config(conftest.OSS_FUZZ_BENCHMARK_NAME) == (\n conftest.OSS_FUZZ_BENCHMARK_CONFIG)",
"def test_get_canary_configs_using_get(self):\n pass",
"def test_no_default(self):\n with self.assertRaises(ConfigError) as cm:\n imageroller.main.read_config(\n self._cmd_args,\n imageroller.test.get_config_parser(self._no_default))\n # ConcurrentWorkers is the first value that is checked\n self.assertEqual(str(cm.exception),\n \"Config must contain ConcurrentWorkers\")",
"def test_no_valid_config1():\n cfg = gc3libs.config.Configuration()\n with pytest.raises(gc3libs.exceptions.NoConfigurationFile):\n cfg.load()",
"def test_read_invalid_confs(conf):\n with pytest.raises(gc3libs.exceptions.ConfigurationError):\n read_invalid_conf(conf[1])",
"def test_get_configuration_template(self):\n pass",
"def test_there_are_config_examples():\n assert examples_cfg_files",
"def test_validate_config_empty_config(self):\n\n sample_config = {}\n\n expected_config = {\n 'hosts': [],\n 'syncs': [],\n 'recursive': False,\n 'tags': [],\n }\n\n result = syncme.validate_config(sample_config)\n self.assertTrue(result)\n self.assertDictEqual(sample_config, expected_config)",
"def test_get_empty_list(self):\n self.assertEquals(Configuration.get_by_name_as_list(\"abc\"), [])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test get_spec_config on conf with defaults | def test_get_spec_config_defaults(self):
spec_conf = get_spec_config({
'defaults': {
'foo': 'bar'
}
}, '')
self.assertEqual(spec_conf, {'foo': 'bar'}) | [
"def test_get_spec_config_match(self):\n spec_conf = get_spec_config({\n 'defaults': {\n 'default_foo': 'default_bar',\n 'foo': 'bar'\n },\n 'specific': [\n {'mask': ['filenomatch'], 'foo': 'bar_nomatch'},\n {'mask': ['filematch'], 'foo': 'match'},\n {'mask': ['filenomatch2'], 'foo': 'bar_nomatch2'}\n ]\n }, 'filematch')\n self.assertEqual(spec_conf, {'default_foo': 'default_bar', 'foo': 'match', 'mask': ['filematch']})",
"def test_get_spec_config_empty(self):\n spec_conf = get_spec_config({}, '')\n self.assertEqual(spec_conf, {})",
"def test_defaults(self):\n self.assertEqual(self.config.CONFIG_NAME, 'value')",
"def test_config_fixture(self):\n self.conf.set_default(\"length_diff_percent\", 1000.0, group=\"test\")\n self.conf.set_default(\"time_diff_percent\", 1000.0, group=\"test\")\n self.conf.set_default(\"max_time\", 10, group=\"test\")\n self.conf.set_default(\"max_length\", 500, group=\"test\")",
"def test_config():\n return {}",
"def test_get_canary_config_using_get(self):\n pass",
"def test_get_with_default(self):\n self.assertEqual(self.config.get('basic','salutation'),None)\n self.assertEqual(self.config.get('basic','salutation','bonjour'),\n 'bonjour')",
"def _expected_config(self) -> Dict[str, Optional[str]]:\n return EXPECTED_CONFIG",
"def test_config_reader_can_read_example_configs(example_config):\n read_config(example_config)",
"def test_get_missing_param(self):\n Config.init(sample_conf)\n res = Config.get(\"foo\")\n self.assertEqual(res, None)",
"def get_config_spec(cls):\n return False",
"def test_get_config(oss_fuzz_benchmark):\n assert benchmark_config.get_config(conftest.OSS_FUZZ_BENCHMARK_NAME) == (\n conftest.OSS_FUZZ_BENCHMARK_CONFIG)",
"def test_get_canary_configs_using_get(self):\n pass",
"def test_get_configuration_template(self):\n pass",
"def test_SpecConfig_class():\n res = SpecConfig(**SPEC_CONFIG)\n assert res.path_out == SPEC_CONFIG['path_out']",
"def test_default_config(sphinx_app_wrapper):\n sphinx_app = sphinx_app_wrapper.create_sphinx_app()\n cfg = sphinx_app.config\n assert cfg.project == \"Sphinx-Gallery <Tests>\"\n # no duplicate values allowed The config is present already\n with pytest.raises(ExtensionError) as excinfo:\n sphinx_app.add_config_value('sphinx_gallery_conf', 'x', True)\n assert 'already present' in str(excinfo.value)",
"def test_set_defaults(self):\r\n self.assertEqual(self.config.values['option1'], 1337)\r\n self.assertNotIn('option2', self.config.values)",
"def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })",
"def test_teams_id_builder_configs_default_get(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test get_spec_config on matching conf | def test_get_spec_config_match(self):
spec_conf = get_spec_config({
'defaults': {
'default_foo': 'default_bar',
'foo': 'bar'
},
'specific': [
{'mask': ['filenomatch'], 'foo': 'bar_nomatch'},
{'mask': ['filematch'], 'foo': 'match'},
{'mask': ['filenomatch2'], 'foo': 'bar_nomatch2'}
]
}, 'filematch')
self.assertEqual(spec_conf, {'default_foo': 'default_bar', 'foo': 'match', 'mask': ['filematch']}) | [
"def test_get_canary_configs_using_get(self):\n pass",
"def test_get_canary_config_using_get(self):\n pass",
"def test_get_spec_config_defaults(self):\n spec_conf = get_spec_config({\n 'defaults': {\n 'foo': 'bar'\n }\n }, '')\n self.assertEqual(spec_conf, {'foo': 'bar'})",
"def test_get_spec_config_empty(self):\n spec_conf = get_spec_config({}, '')\n self.assertEqual(spec_conf, {})",
"def test_config_reader_can_read_example_configs(example_config):\n read_config(example_config)",
"def test_get_config(oss_fuzz_benchmark):\n assert benchmark_config.get_config(conftest.OSS_FUZZ_BENCHMARK_NAME) == (\n conftest.OSS_FUZZ_BENCHMARK_CONFIG)",
"def get_config_spec(cls):\n return False",
"def test_SpecConfig_class():\n res = SpecConfig(**SPEC_CONFIG)\n assert res.path_out == SPEC_CONFIG['path_out']",
"def test_get_configuration_template(self):\n pass",
"def test_there_are_config_examples():\n assert examples_cfg_files",
"def test_config():\n return {}",
"def test_config_read_sample(self):\n\n try:\n open(self.config.name)\n except FileNotFoundError:\n pytest.skip(\"config file not found\")\n\n config_obj = read_config(self.config)\n self.assertIsInstance(config_obj, Config)",
"def test_modelconfigurations_get(self):\n pass",
"def test_get_registration_configuration(self):\n pass",
"def test_config_get(self):\n self.assertEqual(redismod.config_get(\"*\"), \"A\")",
"def test_get_yaml_spec(self):\n pass",
"def test_compliance_configuration(self, evidence):\n evidence_config = json.loads(evidence.content)\n if evidence_config != self.config.raw_config:\n evidence = json.dumps(evidence_config, indent=2).split('\\n')\n config = json.dumps(self.config.raw_config, indent=2).split('\\n')\n self.add_failures(\n 'Differences found',\n {\n 'Fetcher Configuration': evidence,\n 'Check Configuration': config\n }\n )",
"def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })",
"def _expected_config(self) -> Dict[str, Optional[str]]:\n return EXPECTED_CONFIG"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that given modifier name is valid one. If not raise exception based on violation. | def _isValidModifier(self, modifiers, modifierName):
if Modifiers.ILLEGAL_MODIFIER_PATTER.search(modifierName):
msg = ('Modifier named "{0}" in sheet {1} contains illegal characters. '
'Supported characters are a to z, A to Z, 0 to 9 and underscore "_". '
'Spaces are not allowed characters, use underscore instead. For example '
'"some_mod".'
).format(modifierName, MODIFIER_LIST_SHEET_NAME)
raise errors.UnsupportedCharacter(MODIFIER_LIST_SHEET_NAME, msg)
if modifierName in map(lambda mod: mod.name, modifiers):
msg = ('Modifier named "{0}" already exists in the sheet {1}. '
'Modifier names must be unique. To fix remove or rename '
'duplicates.'
).format(modifierName, MODIFIER_LIST_SHEET_NAME)
raise errors.DuplicateError(MODIFIER_LIST_SHEET_NAME, msg) | [
"def validate_name(name, reserved_names=()):",
"def check_name(name):\n if len(name) > WorkflowCRD.NAME_MAX_LENGTH:\n raise ValueError(\n \"Name is too long. Max length: {}, now: {}\"\n \"\".format(WorkflowCRD.NAME_MAX_LENGTH, len(name))\n )\n if \".\" in name:\n raise ValueError(\"Name cannot include dot.\")\n if \"_\" in name:\n raise ValueError(\"Name cannot include underscore.\")\n\n match_obj = re.match(WorkflowCRD.NAME_PATTERN, name)\n if not match_obj:\n raise ValueError(\n \"Name is invalid. Regex used for validation is %s\"\n % WorkflowCRD.NAME_PATTERN\n )",
"def raiseNameError(text):\n pattern = re.compile(\"[a-zA-Z]\")\n if not pattern.match(text):\n raise Exception(\"Invalid Name Entered\")",
"def is_valid_name_error(name: str) -> Optional[GraphQLError]:\n if not isinstance(name, str):\n raise TypeError(\"Expected name to be a string.\")\n if name.startswith(\"__\"):\n return GraphQLError(\n f\"Name {name!r} must not begin with '__',\"\n \" which is reserved by GraphQL introspection.\"\n )\n if not re_name.match(name):\n return GraphQLError(\n f\"Names must match /^[_a-zA-Z][_a-zA-Z0-9]*$/ but {name!r} does not.\"\n )\n return None",
"def _check_is_name_valid(self, name):\n if name in self.forbidden_names or name.endswith(\n self.forbidden_extensions) or self.__check_is_match_regex(name):\n return False\n return True",
"def _validate_mod(self, mod: Modifier):\r\n return not mod.name in self.mods",
"def test_valid_name_invalid():\n assert not valid_name(\"\")\n assert not valid_name(\"a\"*21)",
"def checkName(name):\n name = name.encode('utf-8')\n if re.match(\"^[.A-Za-z0-9_-]*$\", name):\n return True\n else:\n raise ValueError(\n 'Invalid input name: ({}).'\n ' Name can only contain letters, numbers,'\n ' dots, underscores and dashes.'.format(name)\n )",
"def check_name(name):\n if not isinstance(name, str):\n raise TypeError('Donor Name must be a string.')\n if name == \"\":\n raise ValueError('Name is required for every donor.')",
"def validate_name(name: str) -> None:\n\n # Disallow empty.\n if not name:\n raise CleanError('Feature set name cannot be empty.')\n\n # Require starting with a letter.\n if not name[0].isalpha():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - names must start with a letter.'\n )\n\n # Require only letters, numbers, and underscores.\n if not name.replace('_', '').isalnum():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - only letters, numbers, and underscores are allowed.'\n )\n\n # Require all lowercase.\n if not name.islower():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - only lowercase letters are allowed.'\n )\n\n # Disallow leading, trailing, or consecutive underscores.\n # (these will result in a '' in the split results which evals to False)\n if not all(name.split('_')):\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - leading, trailing, and consecutive underscores are'\n ' not allowed.'\n )",
"def validate_name(name:str) -> bool:\r\n return name.isalpha() and name.count(\" \") == 0 and len(name) >= 2",
"def _validate_from_name(self, name):\n return name[:15]",
"def checkAttrName(self, node):\n name = node.attrname\n if name.startswith(\"_\") and name != \"_\":\n # Note: \"_\" *is* allowed.\n self.error(node, '\"%s\" is an invalid attribute name '\n 'because it starts with \"_\".' % name)\n if name.endswith('__roles__'):\n self.error(node, '\"%s\" is an invalid attribute name '\n 'because it ends with \"__roles__\".' % name)",
"def validateMemberName(n):\n try:\n if len(n) < 1:\n raise Exception('Name must be at least one byte in length')\n if len(n) > 255:\n raise Exception('Name exceeds maximum length of 255')\n if n[0].isdigit():\n raise Exception('Names may not begin with a digit')\n if mbr_re.search(n):\n raise Exception(\n 'Names contains a character outside the set [A-Za-z0-9_]')\n except Exception as e:\n raise MarshallingError(f'Invalid member name \"{n}\": {str(e)}')",
"def _check_score_name(score_name):\n\n error_checking.assert_is_string(score_name)\n if score_name in VALID_SCORE_NAMES:\n return\n\n error_string = (\n 'Valid scores (listed below) do not include \"{0:s}\":\\n{1:s}'\n ).format(score_name, str(VALID_SCORE_NAMES))\n\n raise ValueError(error_string)",
"def test_nonreserved_name(self):\n try:\n field_name_validator('_identifier')\n except ValidationError:\n self.fail('Field name raised ValidationError unexpectedly')",
"def _check_attr_name(self, key):\n\n if not isinstance(key, basestring):\n raise TypeError(\"An attribute name must be a string.\")\n\n if not re.match('^[A-Za-z_]+[0-9A-Za-z_-]*$', key):\n\n raise NameException(\"Attribute name %s is invalid. \"\n \"Attribute names may not contain periods or \"\n \"comas.\" % key)",
"def check_workflow_name_regex_or_exit(workflow_name):\n if not re.match(r'^[A-Za-z0-9][A-Za-z0-9_\\-.]*$', workflow_name):\n raise WorkflowArchiverError(\"Workflow name contains special characters.\\n\"\n \"The allowed regular expression filter for workflow \"\n \"name is: ^[A-Za-z0-9][A-Za-z0-9_\\\\-.]*$\")",
"def test_validate_package_name_negative(self):\n with self.assertRaises(BadParameter):\n validate_package_name(\"incorrect-name\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines if a given datetime.datetime is aware. | def is_aware(value):
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None | [
"def is_aware(value: datetime) -> bool:\n\n return value.utcoffset() is not None",
"def dt_is_aware(dtime):\n if dtime.tzinfo is not None and dtime.tzinfo.utcoffset(dtime) is not None:\n return True\n\n return False",
"def dt_is_aware(dt_value):\n return dt_value.tzinfo is not None and dt_value.tzinfo.utcoffset(dt_value) is not None",
"def test_tz_aware_date(self):\n naive_date = datetime.datetime.now()\n self.assertTrue(timezone.is_aware(tz_aware_date(naive_date)))\n aware_date = timezone.now()\n self.assertTrue(timezone.is_aware(tz_aware_date(aware_date)))",
"def ensure_aware(dt):\n if timezone.is_aware(dt):\n return dt\n return timezone.make_aware(dt)",
"def valid_datetime(dt):\n if isinstance(dt.tzinfo, tzinfo) and not datetime_ambiguous(dt):\n return True\n return False",
"def _datetime_ambiguous(dattim: dt.datetime) -> bool:\n assert dattim.tzinfo is not None\n opposite_fold = dattim.replace(fold=not dattim.fold)\n return _datetime_exists(dattim) and dattim.utcoffset() != opposite_fold.utcoffset()",
"def _datetime_exists(dattim: dt.datetime) -> bool:\n assert dattim.tzinfo is not None\n original_tzinfo = dattim.tzinfo\n # Check if we can round trip to UTC\n return dattim == dattim.astimezone(UTC).astimezone(original_tzinfo)",
"def is_datetime(self) -> bool:\n return False",
"def is_datetime(self):\n answer = self._call('is_datetime')\n return answer.yes",
"def test_freeze_with_timezone_aware_datetime_in_non_utc():\n utc_now = datetime.datetime.utcnow()\n assert utc_now.tzinfo is None\n assert utc_now == datetime.datetime(1970, 1, 1, 4)",
"def check_dt_consistency(date_dt):\n\n # https://en.wikipedia.org/wiki/Tz_database\n # https://www.iana.org/time-zones\n \n if date_dt.tzinfo is None:\n return True\n else:\n \n # This check is quite heavy but there is apparently no other way to do it.\n if date_dt.utcoffset() != dt_from_s(s_from_dt(date_dt), tz=date_dt.tzinfo).utcoffset():\n return False\n else:\n return True",
"def make_aware(value, timezone=None, is_dst=None):\n if timezone is None:\n timezone = tzlocal.get_localzone()\n if hasattr(timezone, \"localize\"):\n # This method is available for pytz time zones.\n return timezone.localize(value, is_dst=is_dst)\n else:\n # Check that we won't overwrite the timezone of an aware datetime.\n if is_aware(value):\n raise ValueError(\n \"make_aware expects a naive datetime, got %s\" % value\n )\n # This may be wrong around DST changes!\n return value.replace(tzinfo=timezone)",
"def is_datetime_like(val, conservative=False): # pylint: disable=unused-argument\n if conservative and val is None:\n return False\n if conservative:\n try:\n int(val)\n return False\n except (ValueError, TypeError):\n pass\n return _check_like(val, (np.datetime64, ), (datetime, ),\n is_valid_datetime64)",
"def make_aware(value: datetime, timezone=None, is_dst=None) -> datetime:\n\n if timezone is None:\n timezone = get_current_timezone()\n\n if hasattr(timezone, \"localize\"):\n # This method is available for pytz time zones.\n return timezone.localize(value, is_dst=is_dst)\n else:\n # Check that we won't overwrite the timezone of an aware datetime.\n if is_aware(value):\n raise ValueError(\"make_aware expects a naive datetime, got %s\" % value)\n # This may be wrong around DST changes!\n return value.replace(tzinfo=timezone)",
"def correct_datetime(record_datetime):\n assert record_datetime.date() == datetime.now(timezone.utc).date()",
"def check_datetime(self):\n\n date_now = str(datetime.datetime.today().date())[:16]\n dt_now = str(datetime.datetime.today())[:16]\n\n if dt_now in self.schedule.schedule[date_now].keys():\n logger.info(f'*find dt* | {dt_now}')\n return self.schedule.schedule[date_now][dt_now]\n\n return False",
"def _check_dt(dtime, utc_check=True):\n if not dt_is_aware(dtime):\n msg = 'Received a naive datetime object - check class input.'\n raise UtilityException(msg)\n\n if utc_check:\n if dtime.tzinfo is not pytz.UTC:\n msg = 'Got non utc tz {t} - use pypond.util.sanitize_dt()'.format(t=dtime.tzinfo)\n raise UtilityException(msg)",
"def _uses_datetimeblock(dtype: Union[np.dtype, ExtensionDtype]) -> bool:\n vtype = dtype.type\n return issubclass(vtype, np.datetime64)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Define ZMQ connection and return socket to work with | def connect_to_worker():
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5555")
return socket | [
"def get_connection(self):\n\n # Socket type DEALER is used in asynchronous request/reply patterns.\n # It prepends identity of the socket with each message.\n socket = self.zmq_context.socket(zmq.DEALER)\n socket.setsockopt(zmq.IDENTITY, self.identity)\n socket.connect('tcp://127.0.0.1:5010')\n return socket",
"def createSocket() -> zmq.Socket:\n\tcontext = zmq.Context()\n\tsocket = context.socket(zmq.SUB)\n\taddress = \"tcp://\" + Constants.SUB_HOST + \":\" + Constants.PORT\n\t\n\tsocket.connect(address)\n\tsocket.subscribe(Constants.TOPIC)\n\tsocket.subscribe(Constants.SEPARATOR_TOPIC)\n\t\n\treturn socket",
"def connect(self):\n try:\n self.zmq_context = zmq.Context()\n self.socket = self.zmq_context.socket(zmq.REQ)\n self.socket.connect(self.zmq_url)\n except Exception as ex:\n logger.error(\"Error establishing zmq channel: \" + str(ex))\n raise",
"def meta_trader_connector():\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(SOCKET_LOCAL_HOST)\n return socket",
"def _init_zmq_interface(self):\n if self._parsed_args.debug:\n self.logger.debug(\"**debug mode** no connection to manager\")\n return\n self._socket = self._context.socket(zmq.REQ)\n zmq_host = self.config[\"zmq\"][\"host\"]\n zmq_port = self.config[\"zmq\"][\"ports\"][\"workers\"]\n self._socket.setsockopt(zmq.TCP_KEEPALIVE, 1)\n self._socket.setsockopt(zmq.TCP_KEEPALIVE_IDLE, 900)\n self._socket.connect(f\"tcp://{zmq_host}:{zmq_port}\")\n self.logger.info(f\"connected to {zmq_host} port {zmq_port}\")",
"def create_socket(self, host, port, socket_type):\n context = zmq.Context.instance()\n\n socket = context.socket(socket_type)\n\n socket.connect('tcp://{0}:{1}'.format(host, port))\n\n return socket",
"def start(self):\n zmq_uri = (\n \"{protocol}://{address}:{port}\".format(\n protocol=self.protocol, address=self.address, port=self.port\n )\n if self.port\n else \"{protocol}://{address}\".format( # noqa\n protocol=self.protocol, address=self.address\n )\n )\n log.debug(\"ZMQ URI: %s\", zmq_uri)\n self.ctx = zmq.Context()\n if hasattr(zmq, self.type):\n skt_type = getattr(zmq, self.type)\n else:\n skt_type = zmq.PULL\n self.sub = self.ctx.socket(skt_type)\n self.sub.connect(zmq_uri)\n if self.hwm is not None:\n self.sub.setsockopt(zmq.RCVHWM, self.hwm)\n if self.recvtimeout is not None:\n log.debug(\"Setting RCVTIMEO to %d\", self.recvtimeout)\n self.sub.setsockopt(zmq.RCVTIMEO, self.recvtimeout)\n if self.keepalive is not None:\n log.debug(\"Setting TCP_KEEPALIVE to %d\", self.keepalive)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE, self.keepalive)\n if self.keepalive_idle is not None:\n log.debug(\"Setting TCP_KEEPALIVE_IDLE to %d\", self.keepalive_idle)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_IDLE, self.keepalive_idle)\n if self.keepalive_interval is not None:\n log.debug(\"Setting TCP_KEEPALIVE_INTVL to %d\", self.keepalive_interval)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_INTVL, self.keepalive_interval)",
"def zmq_make(context, url, linger=0):\n addr = urlparse(url)\n scheme, transport = (addr.scheme.split(\"+\", 2) + [\"tcp\"])[:2]\n kind, bind = schemes[scheme]\n logging.info(\"kind %s bind %s\", kind, bind)\n socket = context.socket(kind)\n socket.setsockopt(zmq.LINGER, linger)\n return socket",
"def setup_socket(self):\n context = zmq.Context()\n socket = context.socket(zmq.REP)\n socket.bind('tcp://*:5555')\n\n self.prev_raw_frame = None\n self.socket = socket",
"def zmq():\n global _server # pylint: disable=global-statement\n global port # pylint: disable=global-statement\n if(_server is None and mpi.is_zmqserver()):\n _server = ZmqServer(port)\n return _server",
"def connect_to_worker (self):\n context = zmq.Context ()\n print (\"Connecting to worker at %s responsible for casu #%d...\" % (self.wrk_addr, self.casu_number))\n socket = context.socket (zmq.REQ)\n socket.connect (self.wrk_addr)\n return socket",
"def setup(self):\n self.context = zmq.Context()\n self.sub_socket = self.context.socket(zmq.SUB)\n if self.filter:\n self.sub_socket.setsockopt(zmq.SUBSCRIBE, self.filter)\n self.sub_socket.connect('tcp://'+self.host+':'+str(self.com_port))\n return self",
"def init_socket(self):\n self.context = zmq.Context.instance()\n self.socket = self.context.socket(zmq.SUB)\n\n self.socket.setsockopt(zmq.SUBSCRIBE, self.topic.encode(\"utf-8\"))\n self.socket.connect(self.sub_url)",
"def __init__(self, port=None):\n self.context = zmq.Context()\n self.socket = self.context.socket(zmq.PAIR)\n self.socket.bind(\"tcp://*:%s\" % port)",
"def _create_socket():\n sock = socket.socket()\n return sock",
"def build_socket(self, paradigm, topic, url):\n\n socket = None\n if paradigm == \"sub\":\n socket = self.context.socket(zmq.SUB)\n socket.connect(url)\n socket.setsockopt_string(zmq.SUBSCRIBE, topic)\n elif paradigm == \"pub\":\n socket = self.context.socket(zmq.PUB)\n socket.bind(url)\n elif paradigm == \"req\":\n socket = self.context.socket(zmq.REQ)\n socket.connect(url)\n elif paradigm == \"rep\":\n socket == self.context.socket(zmq.REP)\n socket.bind(url)\n else:\n raise Exception(\"Please provide a valid paradigm\")\n\n return socket",
"def _setup(type):\n global active\n if active is not None:\n close()\n assert active is None, \"Already setup, cannot setup again\"\n assert type == zmq.REQ or type == zmq.REP, \"Invalid type to setup\"\n active = context.socket(type)\n # Connect or bind depending on type\n if type == zmq.REQ:\n active.connect(CONNECTION_STRING)\n else:\n active.bind(CONNECTION_STRING)",
"def create_socket_test(self):\n context = Mock()\n p = stellr.pool.PoolManager(context)\n socket = Mock()\n context.socket.return_value = socket\n\n s = p._create_socket(ADDRESS)\n self.assertEqual(s, socket)\n context.socket.assert_called_once_with(zmq.REQ)\n socket.connect.assert_called_once_with(ADDRESS)",
"def init_connexion():\n connexion = socket(AF_INET, SOCK_STREAM)\n connexion.bind((hote, port))\n\n return connexion"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Used to handle not responding zmq server | def raise_timeout(*args, **kwargs):
raise ZMQNotResponding('ZMQ server is not responding') | [
"def fix_zmq_exit():\n import zmq\n ctx = zmq.Context.instance()\n ctx.term()",
"def test_recv_nomsg(self):\n flag, msg_recv = self.recv_instance.recv(timeout=self.sleeptime)\n assert(not flag)\n nt.assert_equal(msg_recv, self.recv_instance.eof_msg)",
"def checkConnection(self,msg):\n if (len(msg) == 0):\n sleep(self.m_to/2)\n print >>sys.stderr, 'Closing due to possible server fault'\n self.close()",
"def test_keep_alive_cancelled(self):\n sleep(0.005) # Wait before a keep-alive message will be sent\n self.inverter.send(b\"\\x01\\x02\\x03\", b\"\") # Send something arbitrary\n self.sock.recv(4096) # Retrieve the sent message\n sleep(0.008) # Wait until just before the next keep-alive is supposed to happen\n # Check that no message was sent\n self.sock.setblocking(False)\n with self.assertRaises(BlockingIOError):\n self.sock.recv(4096)",
"def broker_null(self, data):\n\n print(\"Heartbeat\")\n #TODO: Reset heartbeat timer or something like that",
"def serverGoingDown(self):\n pass",
"def connectionLost(self, reason):\n print \"connection lost from\", self.addr\n reactor.stop()",
"def connectionLost(self, reason):\n print \"lost connection to\", host, \"port\", port\n reactor.stop()",
"def _handle_request_noblock(self):\n try:\n request, client_address = self.get_request()\n except OSError:\n return\n if self.verify_request(request, client_address):\n self.process_request(request, client_address)\n else:\n self.shutdown_request(request)",
"def start_server(self) -> None:\n with self.socket.bind(self.address):\n print(\"ZeroMQ Server listening at {}\".format(self.address))\n while True:\n payload_rx = self.socket.recv(flags=0)\n if payload_rx:\n self.decode_payload(payload_rx)\n self.socket.send_string(self.reply(), flags=0, copy=False)",
"def _onresponse(self, msg):\n reqid = msg['tunnel']\n if not self._pending_requests.has_key(reqid):\n return\n event, handler = self._pending_requests[reqid]\n if event != None:\n # leave result to be picked up by blocked client\n self._pending_requests[reqid] = msg['data']\n # notify\n event.set()\n elif handler != None:\n del self._pending_requests[reqid]\n handler(reqid, msg['data'])",
"def handle_shutdown(self):",
"def setup_reply():\n _setup(zmq.REP)",
"def on_ping_req_ack(self):\n self.state = \"alive\"\n self.owner.node_alive()",
"def assert_server_process_terminate_called(self):",
"def test_notifyNoConnectionNoisy(self):\n self.noisyAttemptMgr.notifyNoConnection(\"noisyRelayer\")\n self.assertTrue(self.eventLog)\n self.reactor.advance(60)",
"def handle_nak(self):",
"def run(self):\n inputs = [self.sock]\n outputs = [self.sock]\n while inputs:\n try:\n read, write, exceptional = select.select(inputs, outputs, inputs)\n # if server unexpectedly quits, this will raise ValueError exception (file descriptor < 0)\n except ValueError:\n print('Server error')\n GUI.display_alert('Server error has occurred. Exit app')\n self.sock.close()\n break\n\n if self.sock in read:\n with self.lock:\n try:\n data = \"\".encode(ENCODING)\n dataString = data.decode(ENCODING)\n while(not \"EOF\" in dataString):\n data += self.sock.recv(3965758)\n dataString = data.decode(ENCODING) \n # print('lsa')\n # print(\"wesel\")\n except socket.error:\n print(\"Socket error\")\n GUI.display_alert('Socket error has occurred. Exit app')\n self.sock.close()\n break\n\n self.process_received_data(data)\n\n if self.sock in write:\n if not self.queue.empty():\n data = self.queue.get()\n self.send_message(data)\n self.queue.task_done()\n else:\n time.sleep(0.05)\n\n if self.sock in exceptional:\n print('Server error')\n GUI.display_alert('Server error has occurred. Exit app')\n self.sock.close()\n break",
"def monitor_cb(self, events) : \n if not self.FremenClient.wait_for_server(timeout = rospy.Duration(1)):\n rospy.logerr(\"NO FREMEN SERVER FOUND. Fremenserver restart might be required\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
this functions creates a draft with the email data given the user id should be either 'me', either 'users/email.com' either 'users/{AAD_userId}', | def create_draft(auth, subject, body, addresses, user_id, cc_addresses=[], attachments_list=None):
data = {}
data['Subject'] = subject
data['Body'] = {}
data['Body']['ContentType'] = 'HTML'
data['Body']['Content'] = body
data['ToRecipients'] = [{'EmailAddress': {'Address': addr}} for addr in addresses]
data['ccRecipients'] = [{'EmailAddress': {'Address': addr}} for addr in cc_addresses]
if attachments_list is not None:
data['Attachments'] = attachments_list
params = json.dumps(data).encode('utf8')
url = "{api_url}/{user_id}/messages".format(api_url=API_URL, user_id=user_id)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer {}'.format(auth.access_token)
}
req = urllib.request.Request(url, params, headers)
try:
resp = urllib.request.urlopen(req)
resp_data = json.load(resp)
logging.getLogger(__name__).info("Draft created")
return resp_data['id']
except urllib.error.HTTPError as err:
raise AzureError(err) | [
"def create_draft(self, subject=\"\", to=\"\", cc=\"\", bcc=\"\", content=\"\", draft_folder=None):\n \n new_message = self._create_message_wrapper(subject, to, cc, bcc, content)\n \n if not self.is_simulate:\n try:\n if draft_folder is not None:\n self._imap_client.append(draft_folder, str(new_message))\n elif self._imap_account.is_gmail:\n self._imap_client.append('[Gmail]/Drafts', str(new_message))\n else:\n import imapclient\n drafts = self._imap_client.find_special_folder(imapclient.DRAFTS)\n if drafts is not None:\n self._imap_client.append(drafts, str(new_message))\n except IMAPClient.Error, e:\n logger.critical('create_draft() failed')\n return \n\n logger.debug(\"create_draft(): Your draft %s has been created\" % subject)",
"def create_email(user):\n if 'research' in user.get_domains():\n domain = 'research'\n else: domain = 'academic'\n subject = \"ECE/CIS Account Created\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n \n message = \"Your ECE/CIS %s account has been created with the username: %s\\n\\n\" % (domain, user.username)\n message += \"Please do not reply to this message. If you need assistance with your account, please visit:\\n\"\n message += \"%s\\n\\n\" % helprequest\n message += \"-- EE/CIS Labstaff\\n\"\n\n send('account@eecis.udel.edu', 'ECE/CIS Account System', \\\n [user.email], subject, message, MAILHOST)",
"def test_create_draft(app, service, identity_simple, input_data):\n draft = service.create(identity_simple, input_data)\n draft_dict = draft.to_dict()\n\n assert draft.id\n\n for key, value in input_data.items():\n assert draft[key] == value\n\n # Check for pid and parent pid\n assert draft['id']\n assert draft['parent']['id']\n assert draft['is_published'] is False\n assert draft['versions']['is_latest_draft'] is True\n assert draft['versions']['is_latest'] is False\n assert 'errors' not in draft_dict",
"def create_draft(convo_ID, template_ID):\n # Get response template through helper function.\n # Make an API request to reply to a conversation with the content in that template\n response_template = get_canned_response(template_ID)\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/drafts\"\n payload = {\n \"body\": response_template[\"body\"],\n \"subject\": response_template[\"subject\"],\n \"author_id\": \"tea_188ud\", # [needs to change later on]\n \"channel_id\": \"cha_14tfp\", # [also will need to be changed for team based settings]\n }\n files = []\n headers = {\"Authorization\": BEARER_TOKEN}\n requests.request(\"POST\", url, headers=headers, json=payload, files=files)",
"async def createdm(self, ctx, user: discord.User):\n try:\n dm_channel = await ex.get_dm_channel(user=user)\n if dm_channel:\n user = await ex.get_user(user.id)\n user.mod_mail_channel_id = ctx.channel.id\n ex.cache.mod_mail[user.id] = ctx.channel.id # full list\n await ex.conn.execute(\"INSERT INTO general.modmail(userid, channelid) VALUES ($1, $2)\", user.id, ctx.channel.id)\n await dm_channel.send(f\"> {ctx.author.display_name} ({ctx.author.id}) has created a DM with you. All messages sent here will be sent to them.\")\n await ctx.send(f\"> A DM has been created with {user.id}. All messages you type in this channel will be sent to the user.\")\n else:\n await ctx.send(\"> I was not able to create a DM with that user.\")\n except Exception as e:\n await ctx.send(f\"ERROR - {e}\")\n log.console(e)",
"def create(self, tdata, request, response, **args):\n\n assert 'agree' in request.form\n assert not request.form.get('phone') # check invisible field for spam\n\n args = dfilter(request.form, ['name', 'password', 'email',\n 'fullname', 'gender', 'thumb', 'thumb_file_id'])\n args.update({\n 'sites' : [args['name'].lower() + '.' + config.server_name]\n ,'email' : args.get('email').lower()\n #,'flags' : { 'add_invites_on_save' : True }\n })\n if not args.get('fullname'): args['fullname'] = args['name']\n\n referral = self._check_referral(request)\n if (not referral and not self.flags.get('open_signup')):\n return self.serve_json(response, { 'error': 'referral' })\n if referral:\n referrer = self.db.User.fetch(referral['user'])\n else:\n if self.flags.get('open_signup'):\n referrer = self.db.User.site_user\n else:\n assert referrer, 'Referring user not found'\n args['referrer'] = referrer.id\n\n credential_id = request.form.get('credential_id')\n if credential_id:\n credentials = self.db.Temp.fetch(credential_id)\n request.requester.fb_client.credentials = credentials\n fb_profile = request.requester.fb_client.me()\n args.update({\n 'oauth': {'facebook': credentials}\n ,'facebook' : fb_profile\n })\n if request.form.get('age'):\n args.update({'birth_year':\n datetime.now().year - int(request.form.get('age'))})\n\n try:\n user = self.db.User.create(args)\n except Exception, e:\n return self.serve_json(response, { 'error':\n 'username exists or invalid username' })\n\n email_lists = map(lambda email_list: {\n 'name': email_list.name\n }, mail.MetaMailer.unsubscribable('user'))\n subscribed = []\n for email_list in email_lists:\n subscribed.append(email_list['name'])\n email_list['subscribed'] = True\n update = {}\n update['email_subscriptions'] = subscribed\n user.update(**update)\n\n # TODO: offer suggested users to follow.\n # new user follows NewHive\n self.db.Star.create(user, self.db.User.site_user)\n # self._friends_to_listen(request, user)\n # self._friends_not_to_listen(request, user)\n\n if user.get('referrer') != self.db.User.site_user.id:\n self.db.FriendJoined.create(user, referrer)\n # new user follows referrer\n self.db.Star.create(user, referrer)\n \n if referral:\n if referral.get('reuse'):\n referral.increment({'reuse': -1})\n referral.update_cmd({'$push': {'users_created': user.id}})\n if referral['reuse'] <= 0: referral.update(used=True)\n else:\n referral.update(\n used=True,\n user_created=user.id,\n user_created_name=user['name'],\n user_created_date=user['created']\n )\n contact = self.db.Contact.find({'referral_id': referral.id})\n if contact: contact.update(user_created=user.id)\n\n #user.give_invites(config.initial_invite_count)\n if args.has_key('thumb_file_id'):\n file = self.db.File.fetch(args.get('thumb_file_id'))\n if file:\n file.update(owner=user.id)\n\n try: mail.Welcome(db = self.db, jinja_env=self.jinja_env).send(user)\n except: \n # log_error(db, request=request, message=\"unable to welcome send email for {}\".format(user.get('email')))\n pass\n\n request.form = dict(username = args['name'], secret = args['password'])\n self.login(tdata, request, response)\n return self.redirect(response, '/' + user['name'] + '/profile')",
"def create_data(self, email='teest@test.com', name='Testing', phone_number='+917452369841', password='Test@123', address='100,100 , velachery, Chennai-42',\n is_volunteer=False, is_customer=False, is_active=True):\n return Volunteer.objects.create_user(name=name, phone_number=phone_number, password=password,\n address=address, email=email, is_volunteer=is_volunteer, is_customer=is_customer, is_active=True)",
"def addCommentToEmail(self, id, body, owner_user_id):\n data = dict(\n BODY = body,\n OWNER_USER_ID = owner_user_id,\n )\n urldata = json.dumps(data)\n text = self.generateRequest('/v2.1/Emails/' + str(id) + '/Comments', 'POST', urldata)\n return json.loads(text)",
"async def createdm(self, ctx, user: discord.User):\n try:\n dm_channel = await ex.get_dm_channel(user=user)\n if dm_channel is not None:\n ex.cache.mod_mail[user.id] = ctx.channel.id\n await ex.conn.execute(\"INSERT INTO general.modmail(userid, channelid) VALUES ($1, $2)\", user.id, ctx.channel.id)\n await dm_channel.send(f\"> {ctx.author.display_name} ({ctx.author.id}) has created a DM with you. All messages sent here will be sent to them.\")\n await ctx.send(f\"> A DM has been created with {user.id}. All messages you type in this channel will be sent to the user.\")\n else:\n await ctx.send(\"> I was not able to create a DM with that user.\")\n except Exception as e:\n await ctx.send(f\"ERROR - {e}\")\n log.console(e)",
"def create_associated_email(sender, **kwargs):\n user = kwargs['instance']\n if kwargs['created']:\n email = AssociatedEmail(user=user, email=user.email, is_primary_email=True)\n if user.is_active:\n email.verification_date = timezone.now()\n email.is_verified = True\n email.save()",
"def test_update_user_endpoint_new_email(self):\n print(\"Generate a new email and check if email is not allocated\")\n email_id = Workflows.generate_new_email(suffix=self.global_config[\"email_id_suffix\"])\n kwargs = {'email_id': email_id, 'return_response_obj': True,\n 'url': self.test_args[\"relative_url_check_email\"]}\n response = self.test_check_email_endpoint(**kwargs)\n assert json.loads(response.text)[\"data\"][\"available\"] is True, \"Unable to generate a new email id\"\n\n print(\"Update email id\")\n response = self.test_update_user_endpoint(**kwargs)\n\n print(\"Verify Response body\")\n assert json.loads(response.text)[\"message\"] == self.test_args[\"expected_result\"], \"Test Failed\"",
"def _FormatConversationEmail(cls, client, recipient_id, viewpoint, activity):\r\n from viewfinder.backend.db.identity import Identity\r\n from viewfinder.backend.db.photo import Photo\r\n from viewfinder.backend.db.user import User\r\n\r\n # Get email address of recipient.\r\n recipient_user = yield gen.Task(User.Query, client, recipient_id, None)\r\n if recipient_user.email is None:\r\n # No email address associated with user, so can't send email.\r\n raise gen.Return(None)\r\n\r\n identity_key = 'Email:%s' % recipient_user.email\r\n\r\n # Create ShortURL that sets prospective user cookie and then redirects to the conversation.\r\n viewpoint_url = yield AlertManager._CreateViewpointURL(client, recipient_user, identity_key, viewpoint)\r\n\r\n sharer = yield gen.Task(User.Query, client, activity.user_id, None)\r\n sharer_name = AlertManager._GetNameFromUser(sharer, prefer_given_name=False)\r\n\r\n # Create the cover photo ShortURL by appending a \"next\" query parameter to the viewpoint ShortURL.\r\n cover_photo_url = None\r\n cover_photo_height = None\r\n cover_photo_width = None\r\n if viewpoint.cover_photo != None:\r\n next_url = '/episodes/%s/photos/%s.f' % (viewpoint.cover_photo['episode_id'], viewpoint.cover_photo['photo_id'])\r\n cover_photo_url = \"%s?%s\" % (viewpoint_url, urlencode(dict(next=next_url)))\r\n\r\n photo = yield gen.Task(Photo.Query, client, viewpoint.cover_photo['photo_id'], None)\r\n\r\n if photo.aspect_ratio < 1:\r\n cover_photo_height = AlertManager._MAX_COVER_PHOTO_DIM\r\n cover_photo_width = int(AlertManager._MAX_COVER_PHOTO_DIM * photo.aspect_ratio)\r\n else:\r\n cover_photo_width = AlertManager._MAX_COVER_PHOTO_DIM\r\n cover_photo_height = int(AlertManager._MAX_COVER_PHOTO_DIM / photo.aspect_ratio)\r\n\r\n email_args = {'from': EmailManager.Instance().GetInfoAddress(),\r\n 'to': recipient_user.email,\r\n 'subject': '%s added you to a conversation' % sharer_name}\r\n util.SetIfNotEmpty(email_args, 'toname', recipient_user.name)\r\n if sharer_name:\r\n email_args['fromname'] = '%s via Viewfinder' % sharer_name\r\n\r\n # Create the unsubscribe URL.\r\n unsubscribe_cookie = User.CreateUnsubscribeCookie(recipient_id, AccountSettings.EMAIL_ALERTS)\r\n unsubscribe_url = 'https://%s/unsubscribe?%s' % (options.options.domain,\r\n urlencode(dict(cookie=unsubscribe_cookie)))\r\n\r\n # Set viewpoint title.\r\n viewpoint_title = viewpoint.title if viewpoint is not None else None\r\n\r\n fmt_args = {'cover_photo_url': cover_photo_url,\r\n 'cover_photo_height': cover_photo_height,\r\n 'cover_photo_width': cover_photo_width,\r\n 'viewpoint_url': viewpoint_url,\r\n 'unsubscribe_url': unsubscribe_url,\r\n 'sharer_name': sharer_name,\r\n 'viewpoint_title': viewpoint_title,\r\n 'toname': recipient_user.name}\r\n\r\n resources_mgr = ResourcesManager.Instance()\r\n\r\n email_args['html'] = escape.squeeze(resources_mgr.GenerateTemplate('alert_conv_base.email',\r\n is_html=True,\r\n **fmt_args))\r\n email_args['text'] = resources_mgr.GenerateTemplate('alert_conv_base.email',\r\n is_html=False,\r\n **fmt_args)\r\n\r\n raise gen.Return(email_args)",
"def create_user_questionnaire_in_progress(self):\n username = 'pseudo'\n email = 'martine@tests.com'\n password = '00000000'\n user_created = self.user.objects.create_user(id=2, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n list_advice_id = [1, 5, 10]\n self.add_advice_to_user_created(user_created, list_advice_id)\n\n return user_created",
"def send_mail_to_draft_action():\n def send_mail_to_draft(modeladmin, request, queryset):\n \"\"\" send mails to draft folder action \"\"\"\n emails = list()\n for mail in queryset:\n for receiver in mail.receivers.all():\n email = generate_mail(mail, receiver)\n emails.append(email)\n\n send_mail_to_folder('Drafts', emails)\n\n send_mail_to_draft.short_description = \"send emails to draft\"\n\n return send_mail_to_draft",
"def _post(self, object='emailTemplate', path=None, params=None):\n if params is None:\n params = {}\n result = self.client.post(object=object, path=path, params=params)\n return result",
"def create_or_modify_mailing_campaign(request, mailing_id=None):\n success = False\n data_obj = {}\n errors = []\n try:\n if request.method == 'POST':\n json_obj = json.loads(request.body)\n campaign_name = json_obj.get('campaign_name', None)\n from_name = json_obj.get('from_name', None)\n reply_email = json_obj.get('reply_email', None)\n subject = json_obj.get('subject', None)\n link_redirect_to = json_obj.get('link_redirect_to', None)\n title = json_obj.get('title', None)\n subtitle = json_obj.get('subtitle', None)\n body_section1 = json_obj.get('body_section1', None)\n body_section2 = json_obj.get('body_section2', None)\n type = json_obj.get('type', None)\n template_id = json_obj.get('template_id', None)\n db_ids = json_obj.get('db_ids', None)\n\n current_template = ModuleTemplate.objects.get(id=template_id)\n\n if mailing_id:\n mailing = ModuleCampaign.objects.get(id=mailing_id, owner__user=request.user)\n mailing.campaign_name = campaign_name\n mailing.from_name = from_name\n mailing.reply_email = reply_email\n mailing.subject = subject\n mailing.link_redirect_to = link_redirect_to\n mailing.title = title\n mailing.subtitle = subtitle\n mailing.body_section1 = body_section1\n mailing.body_section2 = body_section2\n mailing.template_campaign = current_template\n\n mailing, recipients = set_and_get_contacts_list(mailing, db_ids)\n mailing.save()\n data_obj = mailing.as_dict\n else:\n current_user = Client.objects.get(user=request.user)\n mailing = ModuleCampaign.objects.create(\n campaign_name=campaign_name,\n from_name=from_name,\n reply_email=reply_email,\n owner=current_user,\n template_campaign=current_template,\n subject=subject,\n link_redirect_to=link_redirect_to,\n title=title,\n status=1,\n subtitle=subtitle,\n body_section1=body_section1,\n body_section2=body_section2\n\n )\n mailing, recipients = set_and_get_contacts_list(mailing, db_ids)\n mailing.save()\n data_obj = mailing.as_dict\n # send campaign\n if type == 2:\n send_mail_via_mandrill(None, None, mailing.subject, \"test_practicas\",\n get_template_context_for_mailing(mailing), recipients,\n mailing.reply_email, mailing.from_name, mailing.id)\n mailing.date_send = datetime.datetime.now()\n mailing.save()\n success = True\n except Exception as e:\n errors = e.args\n\n data = {'success': success, 'errors': errors, 'data': data_obj}\n return json_response(data)",
"def create_message(self,recipient=None):",
"def create_user_emails_sheets_subscribers():\n input_range = \"Sheet1\"\n\n sheetsService = build(\n 'sheets', 'v4', credentials=credentials, cache_discovery=False)\n\n # Empty sheet\n sheetsService.spreadsheets().values().clear(\n spreadsheetId=spreadsheet_id, range=input_range).execute()\n\n # Get all basic users' email\n users = list(User.objects.filter(is_active=True,\n role=\"BU\").values('email', 'profile_id'))\n\n # Check their consent status and update accordingly\n subscribers = []\n for user in users:\n if user['profile_id'] != None:\n profile = SubscriberProfile.objects.get(id=user['profile_id'])\n status = profile.consent_status\n if status == \"IMPLIED\" and profile.expired_at < date.today():\n profile.consent_status = \"EXPIRED\"\n profile.save()\n elif status == \"EXPRESSED\" or status == \"IMPLIED\":\n user.pop('profile_id')\n user.update({\"first_name\": profile.first_name,\n \"last_name\": profile.last_name, \"consent_status\": profile.consent_status})\n subscribers.append(user)\n\n # Get newsletter only users' email\n nlusers = list(NLUser.objects.all())\n\n # Check their consent status and update accordingly\n for nluser in nlusers:\n status = nluser.consent_status\n if status == \"IMPLIED\" and nluser.expired_at < date.today():\n nluser.consent_status = \"EXPIRED\"\n nluser.save()\n elif status == \"EXPRESSED\" or status == \"IMPLIED\":\n subscribers.append({\"email\": nluser.email, \"first_name\": nluser.first_name,\n \"last_name\": nluser.last_name, \"consent_status\": nluser.consent_status})\n\n # Append user info into values (only users that has email verified)\n values = [['Email', 'First name', 'Last name', 'Consent Status']]\n for subscriber in subscribers:\n values.append(list(subscriber.values()))\n\n body = {\n 'values': values\n }\n\n try:\n sheetsService.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=input_range,\n valueInputOption=\"USER_ENTERED\", body=body).execute()\n except HttpError as error:\n print('An error occurred: %s' % error)\n raise error\n # return None\n\n # Automatically format the sheets\n requests = [\n {\n \"autoResizeDimensions\": {\n \"dimensions\": {\n \"sheetId\": 0,\n \"dimension\": \"COLUMNS\",\n \"startIndex\": 0,\n \"endIndex\": 4\n }\n }\n },\n {\n \"repeatCell\": {\n \"range\": {\n \"sheetId\": 0,\n \"startRowIndex\": 0,\n \"endRowIndex\": 1,\n \"startColumnIndex\": 0,\n \"endColumnIndex\": 4\n },\n \"cell\": {\n \"userEnteredFormat\": {\n \"textFormat\": {\n \"bold\": True\n }\n }\n },\n \"fields\": \"userEnteredFormat(textFormat)\"\n }\n }\n ]\n\n body = {\n 'requests': requests\n }\n\n try:\n sheetsService.spreadsheets().batchUpdate(\n spreadsheetId=spreadsheet_id, body=body).execute()\n except HttpError as error:\n print('An error occurred: %s' % error)\n raise error",
"def test_admin_approval_complete_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
iterator which goes through all the pages to find all the emails | def get_all_emails_it(auth, user_id, folder_id='AllItems', pages_limit=None, pages_size=50, **kwargs):
i = 0
args_dict = dict(kwargs, top=pages_size, skip=pages_size * i)
curr_emails = get_emails(auth, user_id, folder_id, **args_dict)
while len(curr_emails) != 0:
yield curr_emails
if pages_limit is not None and i >= pages_limit:
break
i += 1
args_dict = dict(kwargs, top=pages_size, skip=pages_size * i)
curr_emails = get_emails(auth, user_id, folder_id, **args_dict) | [
"def test_get_inbox_emails_paginated(self):\n pass",
"def get_email_addresses(startdate, enddate, user, password):\n emails = []\n page = 1\n more_pages = True\n\n while more_pages:\n response = requests.get(\n 'https://restapi.surveygizmo.com/v2/survey/{survey}'\n '/surveyresponse?'\n 'filter[field][0]=datesubmitted'\n '&filter[operator][0]=>=&filter[value][0]={start}+0:0:0'\n '&filter[operator][1]=<&filter[value][1]={end}+0:0:0'\n '&filter[field][1]=status&filter[operator][1]=='\n '&filter[value][1]=Complete'\n '&resultsperpage=500'\n '&page={page}'\n '&user:pass={user}:{password}'.format(\n survey=EMAIL_COLLECTION_SURVEY_ID, start=startdate,\n end=enddate, page=page, user=user, password=password))\n\n results = json.loads(response.content)\n total_pages = results['total_pages']\n more_pages = page < total_pages\n emails = emails + [r['[question(13)]'] for r in results['data']]\n\n return emails",
"def iter_page_links(self) -> Iterable[str]:\n base_url = 'https://www.med.navy.mil'\n r = requests.get(self.starting_url, verify=CERTIFICATE_DIR + '/cat3.pem')\n soup = bs4.BeautifulSoup(r.content, features=\"html.parser\")\n\n # get target column of list items\n issuance_list = soup.find('div', attrs={'class': 'noindex ms-wpContentDivSpace'})\n matches = [\"Publications\", \"BUMEDNotes\", \"BUMEDInstructions\"]\n # extract links\n links = [link for link in issuance_list.find_all('a')]\n for link in links[2:-1]:\n if any(x in str(link) for x in matches):\n if not link['href'].startswith('http'):\n url = base_url + link['href']\n else:\n url = link['href']\n yield url",
"def extract_emails_from_category(initial_url, first_page=int(1)):\r\n\tresult_emails = set() #we will return this\r\n\t#last page regex\r\n\tlp_regex = re.compile('[0-9]+/;')\r\n\t#Open URL\r\n\tsoup = bs4.BeautifulSoup(urlopen(initial_url), \"html5lib\")\r\n\t#extract the link to the last page. It is inside div.paging-bottom > ul > li with text \">>\"\r\n\tnavigation = soup.find_all(\"div\",id=\"paging-bottom\")\r\n\tif not navigation:\r\n\t\tprint(\"This page is weird. It has no navigation. Aborting\\n\")\r\n\t\treturn result_emails\r\n\r\n\ttxt_elem = navigation[0].ul.find_all(text=\">>\")[0]\r\n\t#link to last page\r\n\tlink = txt_elem.parent\r\n\t#Get its url.. smthg like /ourivesarias-joalharias/134/;jsessionid=67E1932531B84B3E77AAF47A29B263CE\r\n\turl = link['href']\r\n\t#Pick the number of the last page\r\n\tmatch = lp_regex.search(url)\r\n\tif match:\r\n\t\tlast_page = match.group()[0:-2]\r\n\t\tlast_page_i = int(last_page)\r\n\telse:\r\n\t\tprint(\"This category has no navigation to the last page\\n\")\r\n\t\tlast_page_i = first_page\r\n\t\t\r\n\t#Sanity Check\r\n\tif last_page_i < first_page:\r\n\t\tlast_page_i = first_page\r\n\t\t\r\n\tprint(\"Working on category %s\" % initial_url)\r\n\t#Now that we have the last page. Time to iterate on each one and get the emails\r\n\tfor page in xrange( first_page, last_page_i ):\r\n\t\tpage_url = initial_url + str(page) + '/' #This is fragile\r\n\t\tprint(\"Scanning page %d of %d (%s).\" % (page, last_page_i, page_url))\r\n\t\ttry:\r\n\t\t\temails = extract_emails_from_page(bs4.BeautifulSoup( unicode(urlopen(page_url).read(),'utf-8','ignore'), \"html5lib\"))\r\n\t\t\twrite_emails_to_set(emails, result_emails)\r\n\t\t\ttime.sleep(5)\r\n\t\texcept IOError:\r\n\t\t\tprint(\"Coult not fetch url %s. Skipped\\n\" % page_url)\r\n\treturn result_emails",
"def extract_emails_from_page(soup):\r\n\temail_pattern = re.compile('([\\w\\-\\.+]+@(\\w[\\w\\-]+\\.)+[\\w\\-]+)')\r\n\ttry:\r\n\t\tpage_content = str(soup)\r\n\texcept:\r\n\t\tprint('Error parsing page. Skipped\\n')\r\n\t\treturn []\r\n\tmatches = email_pattern.findall(page_content)\r\n\tif matches:\r\n\t\treturn [ match[0] for match in matches ]\r\n\treturn []",
"def __iter__(self):\n for page_code in self.page_codes:\n yield self.page(page_code)",
"def scrape_emails(webpage):\n emails = []\n html = requests.get(webpage)\n email_regex = re.compile(r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z]+')\n emails = email_regex.findall(html.text)\n return emails",
"def test_get_emails(self):\n pass",
"def scrape_for_email(site):\n# ----opens html file to search for emails----\n with open('site_text.html', 'r') as site_file:\n get_website = site_file.read()\n\n# ----EMAIL SEARCH----\n email_pattern = r'''(?:[a-z0-9!#$%&'*+\\/=?^_`{|}~-]+(?:\\.\n [a-z0-9!#$%&'*+\\/=?^_`{|}~-]+)*|\"(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\n \\x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])*\")@\n (?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])\n ?|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4]\n [0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\n \\x21-\\x5a\\x53-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])+)\\])'''\n email_regex = re.compile(email_pattern, re.VERBOSE)\n\n email_search = re.findall(email_regex, get_website)\n\n email_set = set()\n for email in email_search:\n email_set.add(email)\n\n counter2 = 1\n if len(email_set) > 0:\n print('----EMAIL----')\n print(f'Found {len(email_set)}')\n for email in email_set:\n jemail = ''.join(email)\n print(f'{counter2}) {jemail}')\n counter2 += 1\n else:\n print('[----No Emails were found!----]')",
"def get_emails(website, max_depth): \n D = download.Download() \n return D.get_emails(website, max_depth=max_depth)",
"def get_emails(self):\n email_ids = self.get_email_ids()\n Email = get_email_class()\n return [email for email in Email.objects.filter(pk__in=email_ids)]",
"def fetch_all(self):\n emails = []\n res, messages = self._mailconn.search(None, 'ALL')\n if res == 'OK':\n for msg in messages[0].split():\n try:\n res, data = self._mailconn.fetch(msg.decode('utf-8'), '(RFC822)')\n except Exception as error:\n self.close_mail_connection()\n print('No email to read: '+error)\n exit()\n \n msg = email.message_from_string((data[0][1]).decode('utf-8'))\n if not isinstance(msg, str):\n if self.is_sender_in_whitelist(msg['From']):\n emails.append(msg)\n\n return emails",
"def scan_strings(self):\n for page in self:\n for string in page.strings:\n yield page, string",
"def get_emails(self, page=1, per_page=10):\n\n client = Client({\n 'method': Client.METHOD_GET,\n 'url': self.config.get_api_url('transactional-emails'),\n 'params_get': {\n 'page': page,\n 'per_page': per_page\n }\n })\n\n return client.request()",
"def read_email(con):\n collect = []\n con.select()\n\n type, data = con.search(None, 'ALL')\n mail_ids = data[0]\n\n id_list = mail_ids.split() \n\n for i in id_list:\n typ, data = con.fetch(str(int(i)), '(RFC822)' )\n \n try:\n for response_part in data:\n if isinstance(response_part, tuple):\n sys.stdout.write(\"\\r{}\".format(i))\n sys.stdout.flush()\n \n msg = email.message_from_string(response_part[1].decode('utf-8'))\n email_subject = msg['subject']\n email_from = msg['from']\n collect.append([i, email_subject, email_from])\n except:\n print(\"Error\")\n pass\n return(collect)",
"def iterate_emails():\n for eml in EmailObj.objects.all():\n opt = input(\"quit? y/n: \")\n if opt in (\"y\", \"Y\"): break\n else:\n print(\"-------------\")\n print(\"uidl: \", eml.uidl)\n print(\"subject: \", eml.subject)\n print(\"to: \", eml.recipient)\n print(\"from: \", eml.sender)\n print(\"date: \", eml.date)",
"def itermwpages():\n query = {\"format\": \"xml\",\n \"action\": \"query\",\n \"list\": \"allpages\",\n \"aplimit\": 100}\n while True:\n resp = requests.get(API_URL, params=query)\n root = etree.fromstring(resp.content)\n for p in root.iterfind(\"query/allpages/p\"):\n yield p.get(\"title\")\n cont = root.find(\"query-continue/allpages\")\n if cont is not None:\n query[\"apcontinue\"] = cont.get(\"apcontinue\")\n else:\n break",
"def email_addresses(self):\n for item in self._prop_list:\n yield EmailAddress(item)",
"def _fetch_emails_by_uids(self, uids, limit=FETCH_LIMIT, **kwargs):\n limit = limit or -1\n for uid in uids:\n msg = self.fetch_email_by_uid(uid, **kwargs)\n msg.uid = uid\n yield msg\n limit -= 1\n if limit == 0:\n break"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the masked ratio. | def get_masked_ratio(mask):
hist = mask.histogram()
return hist[0] / np.prod(mask.size) | [
"def maskedFraction(self):\n\n\t\tif not self._masked:\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn self._masked_fraction",
"def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw",
"def mask_percent(img):\n if (len(img.shape) == 3) and (img.shape[2] == 3):\n img = np.sum(img, axis=-1)\n\n mask_percentage = 100 * (1 - np.count_nonzero(img) / img.size )\n return mask_percentage",
"def fill_ratio(self) -> fractions.Fraction:\n n_true = sum(intent.count() for intent in self._intents)\n return fractions.Fraction(n_true, self.shape.size)",
"def ratio_normal_conductor(self):\n return 1.0 - self.ratio_superconductor()",
"def _overflow_rate(mask):\n return util.sum(util.cast(mask, int)) / util.count(mask)",
"def mask_percent(np_img):\n if (len(np_img.shape) == 3) and (np_img.shape[2] == 3):\n np_sum = np_img[:, :, 0] + np_img[:, :, 1] + np_img[:, :, 2]\n mask_percentage = 100 - np.count_nonzero(np_sum) / np_sum.size * 100\n else:\n mask_percentage = 100 - np.count_nonzero(np_img) / np_img.size * 100\n return mask_percentage",
"def mask_density(mask):\n return get_number_of_unpruned_weights(mask).float() / get_number_of_weights(mask).float()",
"def get_signal2noise_ratio(self) -> float:",
"def _get_area_ratio(self, mask, target_class):\n class_pixels = mask[np.where(mask == target_class)].size\n\n return class_pixels / mask.size",
"def mask_percentage(self):\n return 100 - self.tissue_percentage",
"def trueMaskPercentInTile(mask, tileSize, centerLocation):\n currMask = returnTile(mask, tileSize, centerLocation)\n return currMask.sum()/(tileSize*tileSize*1.0)",
"def GetRatio(self):\n ...",
"def mask_rate(rate, error, maxsig):\n # initialise mask array with existing NaNs\n mask = ~isnan(error)\n # original Nan count\n orig = np.count_nonzero(mask)\n # determine where error is larger than the maximum sigma threshold\n mask[mask] &= error[mask] > maxsig\n # replace values with NaNs\n rate[mask] = nan\n error[mask] = nan\n # calculate percentage of masked pixels\n nummasked = int(np.count_nonzero(mask)/orig*100)\n log.info('Percentage of pixels masked = {}%'.format(nummasked))\n\n return rate, error",
"def ratio(self):\n return self._ratio",
"def _ratio(sim: xr.DataArray, ref: xr.DataArray) -> xr.DataArray:\n out = sim / ref\n out.attrs[\"units\"] = \"\"\n return out",
"def denominator(self):\n return 1",
"def golden_ratio():\n\n return ratio(1)",
"def fraction_filled(self):\n return float(self.daisychain.num_cables) / float(self.max_cables)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a dictionary with domain architectures exclusive in a single pathogen type group. | def generateArchitectureDataStructure(db, collapse_pathogen_groups=False):
# Calculate total numbers of species and strains for each pathogen group
counts_species_pathogen_dict = defaultdict(lambda: defaultdict(int))
for row in db.getNumSpeciesPathogen():
counts_species_pathogen_dict[row['pathogen_type']]['num_species'] = row['num_species']
counts_species_pathogen_dict[row['pathogen_type']]['num_strains'] = row['num_strains']
architecture_pathogen_dict = defaultdict(list)
arch_strains_species_dict = defaultdict(lambda: defaultdict(list))
for row in db.getArchitecturePathogenTypeIterator():
strains = row['species']
species = str(strains).split(' (')[0]
pathogen_type = row['pathogen_type']
architecture_id = row['architecture']
architecture_acc = row['architecture_acc']
architecture_pathogen_dict[(architecture_id, architecture_acc)].append(pathogen_type)
arch_strains_species_dict[(architecture_id, architecture_acc)]['species'].append(species)
arch_strains_species_dict[(architecture_id, architecture_acc)]['strains'].append(strains)
for architecture in architecture_pathogen_dict.keys():
# If an architecture is only present in proteins of a certain pathogen_type,
# it should have only 1 pathogen_type
pathogen_groups_set = set(architecture_pathogen_dict[architecture])
if not exclusive_arch(pathogen_groups_set, collapse_pathogen_groups):
architecture_pathogen_dict.pop(architecture)
arch_strains_species_dict.pop(architecture)
else:
# Check if the architecture is present in all species and strains
species_set = set(arch_strains_species_dict[architecture]['species'])
strains_set = set(arch_strains_species_dict[architecture]['strains'])
total_num_species, total_num_strains = get_number_ssp_stt_members(counts_species_pathogen_dict,
pathogen_groups_set,
collapse_pathogen_groups)
arch_strains_species_dict[architecture]['total_num_species'] = total_num_species
arch_strains_species_dict[architecture]['total_num_strains'] = total_num_strains
if total_num_species == len(species_set):
arch_strains_species_dict[architecture]['all_species']
if total_num_strains == len(strains_set):
arch_strains_species_dict[architecture]['all_strains']
return architecture_pathogen_dict, arch_strains_species_dict | [
"def get_architectures() -> dict:\n archs = {}\n for arch in list(Architecture):\n archs[arch.name] = arch\n\n return archs",
"def for_sim_type(sim_type):\n if sim_type not in cfg:\n return {}\n return pkcollections.map_to_dict(cfg[sim_type])",
"def formDomain(self):\r\n domains = {}\r\n for x in range(0, 9):\r\n for y in range(0, 9):\r\n if (x, y) not in self.assigned:\r\n domains[(x, y)] = []\r\n for value in range(1, 10):\r\n domains[(x, y)].append(value)\r\n return domains",
"def environments_of(groups):\n types = {}\n for group in groups:\n for env in group.environments:\n et = env.environmentType\n envs = types.setdefault((et.id, et.name), set())\n envs.add((env.id, env.name))\n return types",
"def _dependencies_dict(self, deptype=\"all\"):\n _sort_fn = lambda x: (x.spec.name,) + _sort_by_dep_types(x)\n _group_fn = lambda x: x.spec.name\n deptype = dp.canonical_deptype(deptype)\n selected_edges = self._dependencies.select(deptypes=deptype)\n result = {}\n for key, group in itertools.groupby(sorted(selected_edges, key=_sort_fn), key=_group_fn):\n result[key] = list(group)\n return result",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"entity_major\": [\n self.from_entity(entity=\"entity_major\", intent=[\"intent_major_info\", \"inform\"])],\n }",
"def _produce_sets(config: FrozenBox) -> Dict[str, PersonSet]:\n person_set_configs = [*config.domain]\n result = {}\n for person_set in person_set_configs:\n result[person_set.id] = PersonSet(\n person_set.id,\n person_set.name,\n person_set[\"values\"],\n person_set.color,\n {},\n )\n return result",
"def make_isa_encode_group(self, group_index, ins_group):\n if vencode():\n msgb(\"ENCODING GROUP\", \" %s -- %s\" % (group_index, ins_group))\n fname = \"xed_encode_group_%d\" % (group_index)\n fo = function_object_t(fname,'xed_bool_t')\n fo.add_arg(\"%s* xes\" % xed_encoder_request)\n fo.add_code_eol( \"xed_bool_t okay=1\")\n fo.add_code_eol( \"xed_ptrn_func_ptr_t fb_ptrn_function\" )\n\n #import pdb; pdb.set_trace()\n\n ins_group.sort() # call before emitting group code\n\n # iform initialization table\n iform_ids_table = ins_group.gen_iform_ids_table() # table initialization data\n iclasses_number = len(ins_group.get_iclasses())\n iforms_number = len(ins_group.iforms)\n table_type = 'static const xed_uint16_t '\n table_decl = 'iform_ids[%d][%d] = {' % (iclasses_number,\n iforms_number)\n fo.add_code(table_type + table_decl)\n for line in iform_ids_table:\n fo.add_code(line)\n fo.add_code_eol('}')\n\n # isa-set initialization table set 1/0 values to help limit\n # encode to producing the isa-sets present on the specified\n # self.chip. The all_ones and all_zeros are Very frequently\n # useful optimizations to reduce code size and speed up\n # checking.\n isa_set_table, all_ones, all_zeros = ins_group.gen_iform_isa_set_table( self.isa_set_db[self.chip] )\n if all_ones==False and all_zeros==False:\n table_type = 'static const xed_bool_t '\n table_decl = 'isa_set[{}][{}] = {{'.format(iclasses_number, iforms_number)\n fo.add_code(table_type + table_decl)\n for line in isa_set_table:\n fo.add_code(line)\n fo.add_code_eol('}')\n \n get_iclass_index = 'xed_encoder_get_iclasses_index_in_group'\n obj_name = encutil.enc_strings['obj_str']\n code = 'xed_uint8_t iclass_index = %s(%s)' % (get_iclass_index,obj_name)\n fo.add_code_eol(code)\n pad4 = ' '*4\n pad8 = ' '*8\n\n for i,iform in enumerate(ins_group.iforms):\n # FIXME:2007-07-05 emit the iform.operand_order check of\n # the xed_encode_order[][] array\n\n # emit code that checks the operand order\n \n # made special operand orders for 0 1 and 2\n # operands. store the dictionary of operand orders,\n # look up the list. If there are zero entries, no\n # memcmp is needed. If there is one entry, replace the\n # memcmp with an equality check. If there are two\n # operands, replace the memcmp with two equality\n # tests. Otherwise use the memcmp.\n\n # FIXME 2007-09-11 use the static count of the values of\n # the number of operands rather than looking it up in\n # xed_encode_order_limit. Save many array derefs per\n # encode. 2014-04-15: xed_encode_order_limit[] does not\n # currently show up in the generated code so the above\n # fixme is moot.\n\n \n # This \"if\" is for encoder chip checking. if ENCODE_FORCE\n # is set, we let everything encode. Otherwise we use the\n # isa_set array set using the specified --encoder-chip at\n # comple time.\n if all_ones:\n fo.add_code('if (1) { // ALL ONES')\n elif all_zeros:\n fo.add_code('if (xed3_operand_get_encode_force(xes)) { // ALL ZEROS')\n else:\n fo.add_code('if (xed3_operand_get_encode_force(xes) || isa_set[iclass_index][{}]) {{ // MIXED'.format(i))\n \n try:\n operand_order = self.all_operand_name_list_dict[iform.operand_order_key]\n except:\n operand_order = None\n cond1 = None\n nopnd = None\n optimized = False\n if operand_order:\n nopnd = len(operand_order.lst)\n if 0:\n msge(\"OPNDORDER for group %d is (%d) %s \" % (\n group_index, \n nopnd, \n str(operand_order.lst)))\n cond1 = \"xes->_n_operand_order == %d\" % (nopnd)\n if nopnd==0:\n optimized = True\n fo.add_code(pad4 + \"if (%s) {\" % (cond1))\n elif nopnd ==1:\n optimized = True\n cond2 = \"xes->_operand_order[0] == XED_OPERAND_%s\"\n cond2 = cond2 % (operand_order.lst[0])\n fo.add_code(pad4 + \"if (%s && %s) {\" % (cond1,cond2))\n elif nopnd ==2:\n optimized = True\n cond2 = \"xes->_operand_order[0] == XED_OPERAND_%s\" \n cond2 = cond2 % (operand_order.lst[0])\n cond3 = \"xes->_operand_order[1] == XED_OPERAND_%s\"\n cond3 = cond3 % (operand_order.lst[1])\n fo.add_code(pad4 + \"if (%s && %s && %s) {\" % (cond1,cond2,cond3))\n\n memcmp_type = 'xed_uint8_t' \n if not optimized:\n if cond1 == None:\n cond1 = \"xed_encode_order_limit[%d]==xes->_n_operand_order\"\n cond1 = cond1 % (iform.operand_order)\n if nopnd == None:\n cond2 = (\"memcmp(xed_encode_order[%d], \" +\n \"xes->_operand_order, \" +\n \"sizeof(%s)*xed_encode_order_limit[%d])==0\")\n cond2 = cond2 % (iform.operand_order, memcmp_type, \n iform.operand_order)\n else:\n cond2 = (\"memcmp(xed_encode_order[%d], \" +\n \"xes->_operand_order, sizeof(%s)*%d)==0\")\n cond2 = cond2 % (iform.operand_order, memcmp_type, nopnd)\n\n fo.add_code(pad4 + \"if (%s && %s) {\" % (cond1, cond2))\n if viform():\n msgb(\"IFORM\", str(iform))\n\n \n # For binding, this emits code that sets\n # conditions_satisfied based on some long expression and\n # then tests it and sets some operand storage fields. For\n # emitting, it checks the iform and emits bits.\n captures = None\n lines = iform.rule.emit_isa_rule(i,ins_group)\n fo.add_code_eol(pad8 + \"xed_bool_t conditions_satisfied=0\" )\n for line in lines:\n fo.add_code(pad8 + line)\n \n fo.add_code(pad4 + '} // initial conditions')\n fo.add_code('} // xed_enc_chip_check ')\n \n fo.add_code_eol(\"(void) okay\")\n fo.add_code_eol(\"(void) xes\")\n fo.add_code_eol('return 0')\n return fo",
"def _get_type_mapping():\n return {\n Box.SPACE_NAME: Box,\n Dict.SPACE_NAME: Dict,\n Discrete.SPACE_NAME: Discrete\n }",
"def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('ANPR').get('abstractTypes')\n exolinks = globalMap.get('ANPR').get('exolinks')\n\n # DataType ScaleFunction\n currentMap = {}\n abstractTypes['ScaleFunction'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00008'] = currentMap\n loadMaps['ANPR.ScaleFunction'] = currentMap\n currentMap['tag'] = 'ANPR.ScaleFunction'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00008'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class AnnealProtocol\n currentMap = {}\n abstractTypes['AnnealProtocol'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00004'] = currentMap\n loadMaps['ANPR.AnnealProtocol'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'annealProtocols'\n currentMap['class'] = molsim.api.AnnealProtocol.AnnealProtocol\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AnnealProtocol.application\n currentMap = {}\n contentMap['application'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:33:00_00003'] = currentMap\n loadMaps['ANPR.AnnealProtocol.application'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.application'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:33:00_00003'\n currentMap['name'] = 'application'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnnealProtocol.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AnnealProtocol.applicationVersion\n currentMap = {}\n contentMap['applicationVersion'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-05-18-10:52:09_00002'] = currentMap\n loadMaps['ANPR.AnnealProtocol.applicationVersion'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.applicationVersion'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-05-18-10:52:09_00002'\n currentMap['name'] = 'applicationVersion'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnnealProtocol.code\n currentMap = {}\n contentMap['code'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:33:00_00004'] = currentMap\n loadMaps['ANPR.AnnealProtocol.code'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.code'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:33:00_00004'\n currentMap['name'] = 'code'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnnealProtocol.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00003'] = currentMap\n loadMaps['ANPR.AnnealProtocol.details'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00003'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute AnnealProtocol.methodStoreName\n currentMap = {}\n contentMap['methodStoreName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-05-18-10:52:09_00001'] = currentMap\n loadMaps['ANPR.AnnealProtocol.methodStoreName'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.methodStoreName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-05-18-10:52:09_00001'\n currentMap['name'] = 'methodStoreName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnnealProtocol.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00002'] = currentMap\n loadMaps['ANPR.AnnealProtocol.name'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00002'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role AnnealProtocol.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AnnealProtocol.annealStages\n currentMap = {}\n contentMap['annealStages'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:33:00_00002'] = currentMap\n loadMaps['ANPR.AnnealProtocol.annealStages'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.annealStages'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:33:00_00002'\n currentMap['name'] = 'annealStages'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('ANPR').get('abstractTypes')\n\n # Role AnnealProtocol.energyTerms\n currentMap = {}\n contentMap['energyTerms'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:08_00003'] = currentMap\n loadMaps['ANPR.AnnealProtocol.energyTerms'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.energyTerms'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:08_00003'\n currentMap['name'] = 'energyTerms'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('ANPR').get('abstractTypes')\n\n # Role AnnealProtocol.software\n currentMap = {}\n contentMap['software'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-05-18-10:52:14_00001'] = currentMap\n loadMaps['ANPR.AnnealProtocol.software'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocol.software'\n currentMap['type'] = 'exolink'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-05-18-10:52:14_00001'\n currentMap['name'] = 'software'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['copyOverride'] = True\n currentMap['content'] = globalMap.get('METH').get('exolinks')\n # End of AnnealProtocol\n\n currentMap = abstractTypes.get('AnnealProtocol')\n aList = ['application', 'applicationVersion', 'code', 'details', 'methodStoreName', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['energyTerms', 'annealStages', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['annealStages', 'energyTerms']\n currentMap['children'] = aList\n\n # Class AnnealProtocolStore\n currentMap = {}\n abstractTypes['AnnealProtocolStore'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00003'] = currentMap\n loadMaps['ANPR.AnnealProtocolStore'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocolStore'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00003'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'annealProtocolStores'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = molsim.api.AnnealProtocol.AnnealProtocolStore\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AnnealProtocolStore.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AnnealProtocolStore.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnnealProtocolStore.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute AnnealProtocolStore.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnnealProtocolStore.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnnealProtocolStore.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00009'] = currentMap\n loadMaps['ANPR.AnnealProtocolStore.name'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocolStore.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00009'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role AnnealProtocolStore.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AnnealProtocolStore.annealProtocols\n currentMap = {}\n contentMap['annealProtocols'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00004'] = currentMap\n loadMaps['ANPR.AnnealProtocolStore.annealProtocols'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocolStore.annealProtocols'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00004'\n currentMap['name'] = 'annealProtocols'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANPR').get('abstractTypes')\n\n # Role AnnealProtocolStore.refPotentialTerms\n currentMap = {}\n contentMap['refPotentialTerms'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00008'] = currentMap\n loadMaps['ANPR.AnnealProtocolStore.refPotentialTerms'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealProtocolStore.refPotentialTerms'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00008'\n currentMap['name'] = 'refPotentialTerms'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANPR').get('abstractTypes')\n # End of AnnealProtocolStore\n\n currentMap = abstractTypes.get('AnnealProtocolStore')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['name']\n currentMap['simpleAttrs'] = aList\n aList = ['refPotentialTerms', 'annealProtocols', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['annealProtocols', 'refPotentialTerms']\n currentMap['children'] = aList\n\n # Class AnnealStage\n currentMap = {}\n abstractTypes['AnnealStage'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00005'] = currentMap\n loadMaps['ANPR.AnnealStage'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'annealStages'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = molsim.api.AnnealProtocol.AnnealStage\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AnnealStage.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AnnealStage.finalTemp\n currentMap = {}\n contentMap['finalTemp'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00014'] = currentMap\n loadMaps['ANPR.AnnealStage.finalTemp'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.finalTemp'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00014'\n currentMap['name'] = 'finalTemp'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Attribute AnnealStage.function\n currentMap = {}\n contentMap['function'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00005'] = currentMap\n loadMaps['ANPR.AnnealStage.function'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.function'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00005'\n currentMap['name'] = 'function'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'linear'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00008')\n\n # Attribute AnnealStage.functionParams\n currentMap = {}\n contentMap['functionParams'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00006'] = currentMap\n loadMaps['ANPR.AnnealStage.functionParams'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.functionParams'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00006'\n currentMap['name'] = 'functionParams'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute AnnealStage.initialTemp\n currentMap = {}\n contentMap['initialTemp'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00013'] = currentMap\n loadMaps['ANPR.AnnealStage.initialTemp'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.initialTemp'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00013'\n currentMap['name'] = 'initialTemp'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Attribute AnnealStage.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00011'] = currentMap\n loadMaps['ANPR.AnnealStage.name'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00011'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnnealStage.numSteps\n currentMap = {}\n contentMap['numSteps'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00012'] = currentMap\n loadMaps['ANPR.AnnealStage.numSteps'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.numSteps'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00012'\n currentMap['name'] = 'numSteps'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00001')\n\n # Attribute AnnealStage.numSubSteps\n currentMap = {}\n contentMap['numSubSteps'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00004'] = currentMap\n loadMaps['ANPR.AnnealStage.numSubSteps'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.numSubSteps'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00004'\n currentMap['name'] = 'numSubSteps'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00001')\n\n # Attribute AnnealStage.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00010'] = currentMap\n loadMaps['ANPR.AnnealStage.serial'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00010'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute AnnealStage.timeStep\n currentMap = {}\n contentMap['timeStep'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-14:33:51_00001'] = currentMap\n loadMaps['ANPR.AnnealStage.timeStep'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.timeStep'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-14:33:51_00001'\n currentMap['name'] = 'timeStep'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00007')\n\n # Attribute AnnealStage.timeStepScaling\n currentMap = {}\n contentMap['timeStepScaling'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-14:47:43_00001'] = currentMap\n loadMaps['ANPR.AnnealStage.timeStepScaling'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.timeStepScaling'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-14:47:43_00001'\n currentMap['name'] = 'timeStepScaling'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00007')\n\n # Role AnnealStage.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AnnealStage.potentialScales\n currentMap = {}\n contentMap['potentialScales'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00015'] = currentMap\n loadMaps['ANPR.AnnealStage.potentialScales'] = currentMap\n currentMap['tag'] = 'ANPR.AnnealStage.potentialScales'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00015'\n currentMap['name'] = 'potentialScales'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n # End of AnnealStage\n\n currentMap = abstractTypes.get('AnnealStage')\n aList = ['finalTemp', 'function', 'initialTemp', 'numSteps', 'numSubSteps', 'serial', 'timeStep', 'timeStepScaling']\n currentMap['headerAttrs'] = aList\n aList = ['functionParams', 'name', 'potentialScales']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class EnergyTerm\n currentMap = {}\n abstractTypes['EnergyTerm'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00006'] = currentMap\n loadMaps['ANPR.EnergyTerm'] = currentMap\n currentMap['tag'] = 'ANPR.EnergyTerm'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00006'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'energyTerms'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = molsim.api.AnnealProtocol.EnergyTerm\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute EnergyTerm.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute EnergyTerm.code\n currentMap = {}\n contentMap['code'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00001'] = currentMap\n loadMaps['ANPR.EnergyTerm.code'] = currentMap\n currentMap['tag'] = 'ANPR.EnergyTerm.code'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00001'\n currentMap['name'] = 'code'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute EnergyTerm.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00011'] = currentMap\n loadMaps['ANPR.EnergyTerm.details'] = currentMap\n currentMap['tag'] = 'ANPR.EnergyTerm.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00011'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute EnergyTerm.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:43:08_00006'] = currentMap\n loadMaps['ANPR.EnergyTerm.name'] = currentMap\n currentMap['tag'] = 'ANPR.EnergyTerm.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:43:08_00006'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute EnergyTerm.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:43:08_00005'] = currentMap\n loadMaps['ANPR.EnergyTerm.serial'] = currentMap\n currentMap['tag'] = 'ANPR.EnergyTerm.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:43:08_00005'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Role EnergyTerm.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role EnergyTerm.potentialScales\n currentMap = {}\n contentMap['potentialScales'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:43:08_00002'] = currentMap\n loadMaps['ANPR.EnergyTerm.potentialScales'] = currentMap\n currentMap['tag'] = 'ANPR.EnergyTerm.potentialScales'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:43:08_00002'\n currentMap['name'] = 'potentialScales'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('ANPR').get('abstractTypes')\n # End of EnergyTerm\n\n currentMap = abstractTypes.get('EnergyTerm')\n aList = ['serial']\n currentMap['headerAttrs'] = aList\n aList = ['code', 'details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['potentialScales', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['potentialScales']\n currentMap['children'] = aList\n\n # Class PotentialScale\n currentMap = {}\n abstractTypes['PotentialScale'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00007'] = currentMap\n loadMaps['ANPR.PotentialScale'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00007'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'potentialScales'\n currentMap['class'] = molsim.api.AnnealProtocol.PotentialScale\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute PotentialScale.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute PotentialScale.code\n currentMap = {}\n contentMap['code'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-14:31:19_00011'] = currentMap\n loadMaps['ANPR.PotentialScale.code'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale.code'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-14:31:19_00011'\n currentMap['name'] = 'code'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = 'std'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute PotentialScale.finalScale\n currentMap = {}\n contentMap['finalScale'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00018'] = currentMap\n loadMaps['ANPR.PotentialScale.finalScale'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale.finalScale'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00018'\n currentMap['name'] = 'finalScale'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Attribute PotentialScale.function\n currentMap = {}\n contentMap['function'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00019'] = currentMap\n loadMaps['ANPR.PotentialScale.function'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale.function'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00019'\n currentMap['name'] = 'function'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'linear'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00008')\n\n # Attribute PotentialScale.functionParams\n currentMap = {}\n contentMap['functionParams'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00020'] = currentMap\n loadMaps['ANPR.PotentialScale.functionParams'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale.functionParams'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00020'\n currentMap['name'] = 'functionParams'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute PotentialScale.initialScale\n currentMap = {}\n contentMap['initialScale'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00017'] = currentMap\n loadMaps['ANPR.PotentialScale.initialScale'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale.initialScale'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00017'\n currentMap['name'] = 'initialScale'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Role PotentialScale.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role PotentialScale.annealStage\n currentMap = {}\n contentMap['annealStage'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00016'] = currentMap\n loadMaps['ANPR.PotentialScale.annealStage'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale.annealStage'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:09_00016'\n currentMap['name'] = 'annealStage'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['copyOverride'] = True\n\n # Role PotentialScale.refPotentialTerm\n currentMap = {}\n contentMap['refPotentialTerm'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00010'] = currentMap\n loadMaps['ANPR.PotentialScale.refPotentialTerm'] = currentMap\n currentMap['tag'] = 'ANPR.PotentialScale.refPotentialTerm'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00010'\n currentMap['name'] = 'refPotentialTerm'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['copyOverride'] = True\n # End of PotentialScale\n\n currentMap = abstractTypes.get('PotentialScale')\n aList = ['finalScale', 'function', 'initialScale']\n currentMap['headerAttrs'] = aList\n aList = ['code', 'functionParams']\n currentMap['simpleAttrs'] = aList\n aList = ['annealStage', 'refPotentialTerm']\n currentMap['optLinks'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class RefPotentialTerm\n currentMap = {}\n abstractTypes['RefPotentialTerm'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00009'] = currentMap\n loadMaps['ANPR.RefPotentialTerm'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00009'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'refPotentialTerms'\n currentMap['class'] = molsim.api.AnnealProtocol.RefPotentialTerm\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefPotentialTerm.application\n currentMap = {}\n contentMap['application'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00004'] = currentMap\n loadMaps['ANPR.RefPotentialTerm.application'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm.application'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00004'\n currentMap['name'] = 'application'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = 'general'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute RefPotentialTerm.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefPotentialTerm.code\n currentMap = {}\n contentMap['code'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00005'] = currentMap\n loadMaps['ANPR.RefPotentialTerm.code'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm.code'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00005'\n currentMap['name'] = 'code'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute RefPotentialTerm.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00002'] = currentMap\n loadMaps['ANPR.RefPotentialTerm.details'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00002'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute RefPotentialTerm.index\n currentMap = {}\n contentMap['index'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00006'] = currentMap\n loadMaps['ANPR.RefPotentialTerm.index'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm.index'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00006'\n currentMap['name'] = 'index'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00001')\n\n # Attribute RefPotentialTerm.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00001'] = currentMap\n loadMaps['ANPR.RefPotentialTerm.name'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00001'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role RefPotentialTerm.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefPotentialTerm.potentialScales\n currentMap = {}\n contentMap['potentialScales'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00009'] = currentMap\n loadMaps['ANPR.RefPotentialTerm.potentialScales'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm.potentialScales'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-06-13:18:31_00009'\n currentMap['name'] = 'potentialScales'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n\n # Role RefPotentialTerm.refTermParameters\n currentMap = {}\n contentMap['refTermParameters'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00003'] = currentMap\n loadMaps['ANPR.RefPotentialTerm.refTermParameters'] = currentMap\n currentMap['tag'] = 'ANPR.RefPotentialTerm.refTermParameters'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00003'\n currentMap['name'] = 'refTermParameters'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('ANPR').get('abstractTypes')\n # End of RefPotentialTerm\n\n currentMap = abstractTypes.get('RefPotentialTerm')\n aList = ['index']\n currentMap['headerAttrs'] = aList\n aList = ['application', 'code', 'details', 'name', 'potentialScales']\n currentMap['simpleAttrs'] = aList\n aList = ['refTermParameters', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['refTermParameters']\n currentMap['children'] = aList\n\n # Class RefTermParameter\n currentMap = {}\n abstractTypes['RefTermParameter'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:32:58_00001'] = currentMap\n loadMaps['ANPR.RefTermParameter'] = currentMap\n currentMap['tag'] = 'ANPR.RefTermParameter'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:32:58_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'refTermParameters'\n currentMap['objkey'] = 'code'\n currentMap['class'] = molsim.api.AnnealProtocol.RefTermParameter\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefTermParameter.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefTermParameter.code\n currentMap = {}\n contentMap['code'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00007'] = currentMap\n loadMaps['ANPR.RefTermParameter.code'] = currentMap\n currentMap['tag'] = 'ANPR.RefTermParameter.code'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00007'\n currentMap['name'] = 'code'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute RefTermParameter.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00010'] = currentMap\n loadMaps['ANPR.RefTermParameter.details'] = currentMap\n currentMap['tag'] = 'ANPR.RefTermParameter.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00010'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute RefTermParameter.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00009'] = currentMap\n loadMaps['ANPR.RefTermParameter.name'] = currentMap\n currentMap['tag'] = 'ANPR.RefTermParameter.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00009'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute RefTermParameter.value\n currentMap = {}\n contentMap['value'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00008'] = currentMap\n loadMaps['ANPR.RefTermParameter.value'] = currentMap\n currentMap['tag'] = 'ANPR.RefTermParameter.value'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:49:37_00008'\n currentMap['name'] = 'value'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Role RefTermParameter.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of RefTermParameter\n\n currentMap = abstractTypes.get('RefTermParameter')\n aList = ['value']\n currentMap['headerAttrs'] = aList\n aList = ['code', 'details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to AnnealProtocol\n currentMap = {}\n exolinks['AnnealProtocol'] = currentMap\n loadMaps['ANPR.exo-AnnealProtocol'] = currentMap\n currentMap['tag'] = 'ANPR.exo-AnnealProtocol'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00004'\n currentMap['name'] = 'AnnealProtocol'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = molsim.api.AnnealProtocol.AnnealProtocol\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to AnnealProtocolStore\n currentMap = {}\n exolinks['AnnealProtocolStore'] = currentMap\n loadMaps['ANPR.exo-AnnealProtocolStore'] = currentMap\n currentMap['tag'] = 'ANPR.exo-AnnealProtocolStore'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00003'\n currentMap['name'] = 'AnnealProtocolStore'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = molsim.api.AnnealProtocol.AnnealProtocolStore\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to AnnealStage\n currentMap = {}\n exolinks['AnnealStage'] = currentMap\n loadMaps['ANPR.exo-AnnealStage'] = currentMap\n currentMap['tag'] = 'ANPR.exo-AnnealStage'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00005'\n currentMap['name'] = 'AnnealStage'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = molsim.api.AnnealProtocol.AnnealStage\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to EnergyTerm\n currentMap = {}\n exolinks['EnergyTerm'] = currentMap\n loadMaps['ANPR.exo-EnergyTerm'] = currentMap\n currentMap['tag'] = 'ANPR.exo-EnergyTerm'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00006'\n currentMap['name'] = 'EnergyTerm'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = molsim.api.AnnealProtocol.EnergyTerm\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to PotentialScale\n currentMap = {}\n exolinks['PotentialScale'] = currentMap\n loadMaps['ANPR.exo-PotentialScale'] = currentMap\n currentMap['tag'] = 'ANPR.exo-PotentialScale'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00007'\n currentMap['name'] = 'PotentialScale'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = molsim.api.AnnealProtocol.PotentialScale\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(globalMap.get('ANPR').get('exolinks'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to RefPotentialTerm\n currentMap = {}\n exolinks['RefPotentialTerm'] = currentMap\n loadMaps['ANPR.exo-RefPotentialTerm'] = currentMap\n currentMap['tag'] = 'ANPR.exo-RefPotentialTerm'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-05-13:42:05_00009'\n currentMap['name'] = 'RefPotentialTerm'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = molsim.api.AnnealProtocol.RefPotentialTerm\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00001'))\n\n # Out-of-package link to RefTermParameter\n currentMap = {}\n exolinks['RefTermParameter'] = currentMap\n loadMaps['ANPR.exo-RefTermParameter'] = currentMap\n currentMap['tag'] = 'ANPR.exo-RefTermParameter'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-02-20-13:32:58_00001'\n currentMap['name'] = 'RefTermParameter'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = molsim.api.AnnealProtocol.RefTermParameter\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))",
"def _part_group_cell_mapper(bd_type):\n js, iss = np.meshgrid(range(smt.cols), range(smt.rows)) # zero indexed to agree with python interpretation\n idx = bd_type.flatten() != -1\n out = dict(zip(range(1, idx.sum() + 1), list(zip(iss.flatten()[idx], js.flatten()[idx]))))\n return out",
"def create_group_dict():\n ifile = house_keeping + 'msid_list_all'\n data = mcf.read_data_file(ifile)\n\n catg_dict = {}\n for ent in data:\n atemp = re.split('\\s+', ent)\n catg_dict[atemp[0].strip()] = atemp[1].strip()\n\n return catg_dict",
"def build_doctype_map(self):\n\t\tself.doctype_map = {}\n\n\t\tactive_domains = frappe.get_active_domains()\n\t\tall_doctypes = frappe.get_all(\n\t\t\t\"DocType\",\n\t\t\tfields=[\n\t\t\t\t\"name\",\n\t\t\t\t\"in_create\",\n\t\t\t\t\"module\",\n\t\t\t\t\"istable\",\n\t\t\t\t\"issingle\",\n\t\t\t\t\"read_only\",\n\t\t\t\t\"restrict_to_domain\",\n\t\t\t],\n\t\t)\n\n\t\tfor dt in all_doctypes:\n\t\t\tif not dt.restrict_to_domain or (dt.restrict_to_domain in active_domains):\n\t\t\t\tself.doctype_map[dt[\"name\"]] = dt",
"def get_type_mappings(platform):\n types = platforms[platform][TYPES]\n\n mappings = {}\n for _type in types:\n mappings[_type] = types[_type]\n return mappings",
"def mk_obj():\n\n def make_dict():\n return collections.defaultdict(make_dict);\n\n def set_path(d, path, value):\n if path[-1] in ['reset', 'issue', 'en']: return\n for key in path[:-1]:\n d = d[key]\n d[path[-1]] = value\n # add a placeholder for where the register's value\n # (other keys are metadata about this value)\n d[path[-1]]['value'] = 0\n\n the_dict = make_dict()\n for register in configuration.registers:\n set_path(\n the_dict,\n register.split('_'),\n configuration.registers[register],\n )\n\n return the_dict",
"def group_bases(all_bases):\n groups = {}\n for base in all_bases:\n group = base['Group']\n if group in groups:\n groups[group].append(base)\n else:\n groups[group] = [base]\n return groups",
"def _known_microarchitectures():\n # pylint: disable=fixme\n # TODO: Simplify this logic using object_pairs_hook to OrderedDict\n # TODO: when we stop supporting python2.6\n\n def fill_target_from_dict(name, data, targets):\n \"\"\"Recursively fills targets by adding the micro-architecture\n passed as argument and all its ancestors.\n\n Args:\n name (str): micro-architecture to be added to targets.\n data (dict): raw data loaded from JSON.\n targets (dict): dictionary that maps micro-architecture names\n to ``Microarchitecture`` objects\n \"\"\"\n values = data[name]\n\n # Get direct parents of target\n parent_names = values[\"from\"]\n for parent in parent_names:\n # Recursively fill parents so they exist before we add them\n if parent in targets:\n continue\n fill_target_from_dict(parent, data, targets)\n parents = [targets.get(parent) for parent in parent_names]\n\n vendor = values[\"vendor\"]\n features = set(values[\"features\"])\n compilers = values.get(\"compilers\", {})\n generation = values.get(\"generation\", 0)\n\n targets[name] = Microarchitecture(\n name, parents, vendor, features, compilers, generation\n )\n\n known_targets = {}\n data = archspec.cpu.schema.TARGETS_JSON[\"microarchitectures\"]\n for name in data:\n if name in known_targets:\n # name was already brought in as ancestor to a target\n continue\n fill_target_from_dict(name, data, known_targets)\n\n # Add the host platform if not present\n host_platform = platform.machine()\n known_targets.setdefault(host_platform, generic_microarchitecture(host_platform))\n\n return known_targets",
"def build_network_definition(rsn_oms):\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"build_network_definition. rsn_oms class: %s\",\n rsn_oms.__class__.__name__)\n\n # platform types:\n platform_types = rsn_oms.config.get_platform_types()\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"got platform_types %s\", str(platform_types))\n\n # platform map:\n map = rsn_oms.config.get_platform_map()\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"got platform map %s\", str(map))\n\n # build topology:\n pnodes = NetworkUtil.create_node_network(map)\n dummy_root = pnodes['']\n root_pnode = pnodes[dummy_root.subplatforms.keys()[0]]\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"topology's root platform_id=%r\", root_pnode.platform_id)\n\n # now, populate the attributes and ports for the platforms\n\n def build_attributes_and_ports(pnode):\n \"\"\"\n Recursive routine to call set_attributes and set_ports on each pnode.\n \"\"\"\n set_attributes(pnode)\n set_ports(pnode)\n\n for sub_platform_id, sub_pnode in pnode.subplatforms.iteritems():\n build_attributes_and_ports(sub_pnode)\n\n def set_attributes(pnode):\n platform_id = pnode.platform_id\n attr_infos = rsn_oms.attr.get_platform_attributes(platform_id)\n if not isinstance(attr_infos, dict):\n raise PlatformDriverException(\n \"%r: get_platform_attributes returned: %s\" % (\n platform_id, attr_infos))\n\n if log.isEnabledFor(logging.TRACE):\n log.trace(\"%r: attr_infos: %s\", platform_id, attr_infos)\n\n if not platform_id in attr_infos:\n raise PlatformDriverException(\n \"%r: get_platform_attributes response does not \"\n \"include entry for platform_id: %s\" %(\n platform_id, attr_infos))\n\n ret_infos = attr_infos[platform_id]\n for attrName, attr_defn in ret_infos.iteritems():\n attr = AttrNode(attrName, attr_defn)\n pnode.add_attribute(attr)\n\n def set_ports(pnode):\n platform_id = pnode.platform_id\n port_infos = rsn_oms.port.get_platform_ports(platform_id)\n if not isinstance(port_infos, dict):\n raise PlatformDriverException(\n \"%r: get_platform_ports response is not a dict: %s\" % (\n platform_id, port_infos))\n\n if log.isEnabledFor(logging.TRACE):\n log.trace(\"%r: port_infos: %s\", platform_id, port_infos)\n\n if not platform_id in port_infos:\n raise PlatformDriverException(\n \"%r: get_platform_ports response does not include \"\n \"platform_id: %s\" % (platform_id, port_infos))\n\n ports = port_infos[platform_id]\n\n if not isinstance(ports, dict):\n raise PlatformDriverException(\n \"%r: get_platform_ports: entry for platform_id is \"\n \"not a dict: %s\" % (platform_id, ports))\n\n for port_id, dic in ports.iteritems():\n port = PortNode(port_id, dic['network'])\n port.set_state(dic['state'])\n pnode.add_port(port)\n\n # add connected instruments:\n instrs_res = rsn_oms.instr.get_connected_instruments(platform_id, port_id)\n if not isinstance(instrs_res, dict):\n log.warn(\"%r: port_id=%r: get_connected_instruments \"\n \"response is not a dict: %s\" % (platform_id, port_id, instrs_res))\n continue\n\n if log.isEnabledFor(logging.TRACE):\n log.trace(\"%r: port_id=%r: get_connected_instruments \"\n \"returned: %s\" % (platform_id, port_id, instrs_res))\n\n if not platform_id in instrs_res:\n raise PlatformDriverException(\n \"%r: port_id=%r: get_connected_instruments response\"\n \"does not have entry for platform_id: %s\" % (\n platform_id, ports))\n\n if not port_id in instrs_res[platform_id]:\n raise PlatformDriverException(\n \"%r: port_id=%r: get_connected_instruments response \"\n \"for platform_id does not have entry for port_id: %s\" % (\n platform_id, port_id, instrs_res[platform_id]))\n\n instr = instrs_res[platform_id][port_id]\n for instrument_id, attrs in instr.iteritems():\n port.add_instrument(InstrumentNode(instrument_id, attrs))\n\n # call the recursive routine\n build_attributes_and_ports(root_pnode)\n\n # we got our whole network including platform attributes and ports.\n\n # and finally create and return NetworkDefinition:\n ndef = NetworkDefinition()\n ndef._platform_types = platform_types\n ndef._pnodes = pnodes\n ndef._dummy_root = dummy_root\n return ndef",
"def get_domains(graph: Graph, property_to_id: Dict[str, int], entity_type_to_id: Dict[str, int]) -> Dict[int, int]:\n # dictionary pointing from object property id to an entity type id\n domains = {}\n\n # add all domain triples for which the subject is an object property and the object is an entity type\n for subject, predicate, object in graph.triples((None, RDFS.domain, None)):\n if subject in property_to_id and object in entity_type_to_id:\n domains[property_to_id[subject]] = entity_type_to_id[object]\n\n return domains"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Boolean function to check if a given architecture is exclusive. | def exclusive_arch(pathogen_groups_set, collapse_pathogen_groups):
if len(pathogen_groups_set) == 1:
return True
# Only check pathogen grouping when the flag is on
if collapse_pathogen_groups:
if len(pathogen_groups_set) > 2:
return False
if 0 in pathogen_groups_set and 1 in pathogen_groups_set:
return True
if 3 in pathogen_groups_set and 4 in pathogen_groups_set:
return True
return False | [
"def IsExclusive(self):\n return False",
"def isCheckedOutExclusive(self) -> bool:\n ...",
"def exclusive_state(self) -> bool:\n return pulumi.get(self, \"exclusive_state\")",
"def is_skip_and_return_zero_patch_available(\n\t self, addr: int, arch: Optional['architecture.Architecture'] = None\n\t) -> bool:\n\t\tif arch is None:\n\t\t\tif self.arch is None:\n\t\t\t\traise Exception(\"Attempting to call can_assemble without an Architecture specified\")\n\t\t\tarch = self.arch\n\t\treturn core.BNIsSkipAndReturnZeroPatchAvailable(self.handle, arch.handle, addr)",
"def is_infrastructure (self):\n return sum([1 for i in self.infras]) != 0",
"def is_exclusive(self):\n return self.exclusive",
"def is_skip_and_return_value_patch_available(\n\t self, addr: int, arch: Optional['architecture.Architecture'] = None\n\t) -> bool:\n\t\tif arch is None:\n\t\t\tif self.arch is None:\n\t\t\t\traise Exception(\"Attempting to call can_assemble without an Architecture specified\")\n\t\t\tarch = self.arch\n\t\treturn core.BNIsSkipAndReturnValuePatchAvailable(self.handle, arch.handle, addr)",
"def available_on_system(cls):\n return (cls.reason_to_be_disabled() is None)",
"def _is_unlocked():\n return ctx.operation.name in \\\n ctx.instance.runtime_properties.get('unlock', [])",
"def osarch_is_amd64():\n return osarch_match(\"amd64\")",
"def _is_disabled(self):\n start_month = self.internship.start_month\n freeze_count = self.intern.freezes.count()\n if self.month - start_month > (11 + freeze_count):\n return True\n elif self.intern.profile.intern.is_outside_intern and not self.occupied and not (self.has_rotation_request or self.has_rotation_cancel_request):\n rotation_count = self.internship.rotations.count()\n request_count = self.internship.rotation_requests.open().count()\n return rotation_count + request_count >= 6\n return False # FIXME: Investigate why this actually works!",
"def check_masked(self):\n if self._alternate == 'N': # If our alternate allele is masked, or an 'N'\n return True # Return True\n else: # Otherwise\n return False # Return False",
"def osarch_is_amd64():\n return osarch_match(\"amd64\")",
"def isShiftHeld():\n return False if pm.about(batch=True) else (pm.getModifiers() & 1) > 0",
"def exclusive_ip(self) -> bool:\n return pulumi.get(self, \"exclusive_ip\")",
"def is64Bit(program: ghidra.program.model.listing.Program) -> bool:\n ...",
"def check_allowed(self):\n if self.state_model.op_state in [\n DevState.FAULT,\n DevState.UNKNOWN,\n DevState.DISABLE,\n ]:\n return False\n\n return True",
"def get_bool_master_unlock(ML):\r\n new_ML = ML\r\n if new_ML == \"1\":\r\n return True\r\n else:\r\n print(\"Master lock active. Doors locked\")\r\n return False",
"def is_gate(self):\n return not self.is_be()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns True if employee has rejoined otherwise False | def is_rejoinee(self):
return len(self._start_date) > 1 | [
"def is_employee():\n return _is_member('uw_employee')",
"def is_expired(self):\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\n return (self.date_joined + expiration_date <= datetime.datetime.now())",
"def already_booked(slots, attendees, user_name):\n already_joined = False\n for i in attendees:\n if i[\"email\"] == user_name+'@student.wethinkcode.co.za':\n already_joined = True\n\n if already_joined == True:\n return False\n else:\n return True",
"def check_old_employee_room(self, employee_name):\n for rooms in self.offices + self.living_spaces:\n if employee_name in [\n occupants for occupants in rooms.room_occupants]:\n return rooms",
"def is_emperor(user_id: int, table_id: int) -> bool:\n table = Table.query.get(table_id)\n return table.emperor == user_id",
"def activation_expired(self):\n return self.date_joined + timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS) < timezone.now()",
"def checkDaysOff_old(roster):\r\n\r\n for i in roster.employees:\r\n ename = i.fName + \" \" + i.lName\r\n onList, offList = roster.getOnOffDays(ename)\r\n\r\n # for each offday or vacation day\r\n for j in offList:\r\n shiftassignments = 0 # counts how many shifts have been assigned to employee per day\r\n\r\n # for each shift\r\n for k in roster.shiftTypes:\r\n\r\n # check if employee i works shift k on day j\r\n # make a list with working employees for the day and shift\r\n EmployeesWorkingShiftThatDay = roster.getWorkingEmployees(j, k['name'])\r\n\r\n # check if current employee i is in list\r\n for e in EmployeesWorkingShiftThatDay:\r\n if (e == ename):\r\n shiftassignments = shiftassignments + 1\r\n\r\n # if one employee has more than zero shifts per day assigned on his/her day off, return false\r\n if (shiftassignments > 0):\r\n return False\r\n\r\n return True",
"def activation_key_expired(self):\r\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\r\n return self.activation_key == self.ACTIVATED or \\\r\n (self.user.date_joined + expiration_date <= datetime.datetime.now())",
"def activation_key_expired(self):\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\n return self.activation_key == RegistrationProfile.ACTIVATED or \\\n (self.user.date_joined + expiration_date <= datetime.datetime.now())",
"def isOn(self):\r\n return len(self.__agenda)>2",
"def is_student_employee():\n return _is_member('uw_affiliation_student-employee')",
"def hr_refuse(self):\n if not self.state == 'notice' and not self.state == 'exit' and not self.state == 'manager_review':\n self.status_refuse_hr()\n template = self.env.ref('vertiple__employee.hr_review_to_emp')\n self.env['mail.template'].browse(template.id).send_mail(self.id)\n else:\n raise ValidationError(\"You are already in %s\" % self.state)",
"def is_joined_days_passed(self, days):\n return timezone.now() >= self.user.date_joined + timedelta(days=days)",
"def _is_last_admin_leaving(\n event: EventBase,\n power_level_content: dict,\n state_events: StateMap[EventBase],\n) -> bool:\n # Get every admin user defined in the room's state\n admin_users = {\n user\n for user, power_level in power_level_content[\"users\"].items()\n if power_level >= 100\n }\n\n if event.sender not in admin_users:\n # This user is not an admin, ignore them\n return False\n\n if any(\n event_type == EventTypes.Member\n and event.membership in [Membership.JOIN, Membership.INVITE]\n and state_key in admin_users\n and state_key != event.sender\n for (event_type, state_key), event in state_events.items()\n ):\n # There's another admin user in, or invited to, the room\n return False\n\n return True",
"def __eq__(self, other: Employee) -> bool:\n\n return self.eid == other.eid",
"def _registration_abused(self):\n\n users_in_last_day = get_user_model().objects.filter(\n date_joined__gte=date.today()\n )\n\n if len(users_in_last_day) >= 50:\n return True\n else:\n return False",
"def could_do_this_job(s,e): #shift,start time,employee\n if(s.job_id not in e.jobs): #in array check employee tru and false\n return(False)\n for t in e.availability:\n if in_time(s.time, t):\n return(True)\n return(False)",
"def tiene_rol(self):\n assert (self.rol is None and not self.permisos_por_fase.all().exists()) or (self.rol is not None)\n return self.usuario == self.proyecto.gerente or self.rol is not None",
"def is_entitlement_redeemable(self, entitlement):\n # This is < because a get_days_since_created of expiration_period means that that many days have passed,\n # which should then expire the entitlement\n return (entitlement.get_days_since_created() < self.expiration_period.days\n and not entitlement.enrollment_course_run\n and not entitlement.expired_at)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process the Exit of employee | def process_employee_exit(self):
if self.is_employee_serving():
self._end_date.append(datetime.now().isoformat())
print(f"Successfully processed exit for employee {self.name} on" \
f"{self._end_date[-1]}\nWe wish {self.name} for future endeavours")
return
raise RejoiningException("Employee not in service. Cannot process exit.") | [
"def identify_result_exit(self, record):\n return [\"exit\"]",
"def _handler_generic_exit(self):",
"def _common_state_exit(self, *args, **kwargs):",
"def _exit(n):\n pass",
"def exit(self, status=0,message=None):\n\t\tpass",
"def on_exit_step(self) -> Event:\n return self._on_exit_step",
"def exit_eeve(exit_value: int = 0):\n print('unregistering')\n for event in eeve.events:\n event.unregister()\n\n print('exiting')\n sys.exit(exit_value)",
"def defaultExit(self):",
"def __exit__(self, exc_type, exc_value, exc_tb):\n self.get(\"/exit\", log_errors=False)",
"def exit_program():\n\n today = date.today()\n current_date = today.strftime(\"%d/%m/%Y\")\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n print('Ending program : Customer_Search_Name.py - at : ' + current_time + ' on : ' + current_date)\n sys.exit()",
"def _exit(self, save_vars):\n raise NotImplementedError()",
"def exit(self): \n self.teo_exchange_intent = self.teo_wallet\n self.withdraw_intent = self.euro_wallet\n\n self.register_teo_exchange(self.teo_exchange_intent)\n self.register_withdraw(self.withdraw_intent)\n\n if self.teo_wallet + self.euro_wallet == 0:\n print('Agent exited: ', self.__class__.__name__)\n self.model.schedule.remove(self)",
"def ConsoleExit(self, errorcode=200):\n pass",
"def exit(self):\n print(\"\\n***************************** Exit Metafor *****************************\")",
"def on_exit(self) -> Event:\n return self._on_exit",
"def exit(self):\n self.XY.exit()\n self.Z.exit()",
"def afk_exit(self):\n self._new_afk(\"exits\")",
"def on_exit(cmd: pcmd.Command, args: List[str]) -> None:\n sh.exit()",
"def clicked_exit():\r\n write_person_event(staff_data, 'depart') # write event to the log\r\n person_popup.destroy()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a full media url from Bandwidth and extracts the media id | def get_media_id(media_url):
split_url = media_url.split("/")
#Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/file.png
if split_url[-2] == "media":
return split_url[-1]
#Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/abc/0/file.png
else:
#This is required for now due to the SDK parsing out the `/`s
return "%2F".join(split_url[-3:]) | [
"def extract_media_id(self, s):\n return int(re.findall(r\"_(\\d+).ts\", s)[0])",
"def _id_from_url(url):\n url = re.sub(r'\\?.*', '', url)\n video_id = url.split('/')[-2]\n return video_id",
"def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]:\n try:\n # The type on postpath seems incorrect in Twisted 21.2.0.\n postpath: List[bytes] = request.postpath # type: ignore\n assert postpath\n\n # This allows users to append e.g. /test.png to the URL. Useful for\n # clients that parse the URL to see content type.\n server_name_bytes, media_id_bytes = postpath[:2]\n server_name = server_name_bytes.decode(\"utf-8\")\n media_id = media_id_bytes.decode(\"utf8\")\n\n # Validate the server name, raising if invalid\n parse_and_validate_server_name(server_name)\n\n file_name = None\n if len(postpath) > 2:\n try:\n file_name = urllib.parse.unquote(postpath[-1].decode(\"utf-8\"))\n except UnicodeDecodeError:\n pass\n return server_name, media_id, file_name\n except Exception:\n raise SynapseError(\n 404, \"Invalid media id token %r\" % (request.postpath,), Codes.UNKNOWN\n )",
"def get_media_id_by_tag(self, tag):\n\n if self.login_status:\n try:\n if tag.startswith(\"l:\"):\n tag = tag.replace(\"l:\", \"\")\n self.logger.info(f\"Get Media by location: {tag}\")\n url_location = self.url_location % (tag)\n r = self.s.get(url_location)\n all_data = json.loads(r.text)\n self.media_by_tag = list(\n all_data[\"graphql\"][\"location\"][\"edge_location_to_media\"][\n \"edges\"\n ]\n )\n\n else:\n self.logger.debug(f\"Get Media by tag: {tag}\")\n url_tag = self.url_tag % (tag)\n r = self.s.get(url_tag)\n all_data = json.loads(r.text)\n self.media_by_tag = list(\n all_data[\"graphql\"][\"hashtag\"][\"edge_hashtag_to_media\"][\n \"edges\"\n ]\n )\n except Exception as exc:\n self.media_by_tag = []\n self.logger.warning(\"Except on get_media!\")\n self.logger.exception(exc)",
"def get_instagram_url_from_media_id(self, media_id, url_flag=True, only_code=None):\n media_id = int(media_id)\n if url_flag is False:\n return \"\"\n\n alphabet = (\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_\"\n )\n shortened_id = \"\"\n while media_id > 0:\n media_id, idx = divmod(media_id, 64)\n shortened_id = alphabet[idx] + shortened_id\n\n if only_code:\n return shortened_id\n\n return f\"instagram.com/p/{shortened_id}/\"",
"def get_single_media(media_id):\n return query_single(media_id, Media, media_schema)",
"def get_media_id_from_post(media_obj):\n if media_obj:\n media_id = media_obj.get('id')\n return media_id\n return",
"def parse_movie_id(douban_url):\n matched = Movie.__movie_url_pattern.match(douban_url)\n if matched is None:\n raise UrlParseException(douban_url)\n # This looks like a good page. Remove query parameters.\n matched = Movie.__param_removal_pattern.match(douban_url)\n if matched is not None:\n movie_id = matched.group(1)\n else:\n raise UrlParseException(douban_url)\n return movie_id",
"def _get_movie_id(self, movie_id_link):\n #return re.sub('\\/movies\\/\\?id\\=', '', movie_id_link)\n return movie_id_link.split('id=')[1]",
"def get_id(url):\n if \"gfycat\" in url:\n match = re.search(r'^https?:\\/\\/(?:www.)?gfycat.com\\/ifr\\/([A-Za-z0-9\\-_]+)(?:\\/)?(?:\\?.*)?$', url)\n if match:\n return match.group(1)\n return None",
"def media_content_id(self):\n return self._currentsong.get(\"file\")",
"def get_media_filename(media_url):\n return media_url.split(\"/\")[-1]",
"def extract_imgur_id(url):\n\n pos = url.rfind('/')\n \n if pos >= 0:\n id_str = url[pos+1:]\n\n pos = id_str.rfind('.')\n if pos > 0:\n id_str = id_str[:pos]\n\n return id_str\n\n else:\n raise InvalidImgurUrlException(\"Failed to parse Imgur url\")",
"def get_id(url):\n\n return re.search(GET_ID_REGEX_URL, url)[0]",
"def _get_id(self, id, url):\n if isinstance(id, list):\n imgur_match = id\n elif isinstance(id, str):\n return id\n else:\n imgur_match = REPatterns.imgur.findall(url)[0]\n id = None\n # Try block for catching imgur 404s\n try:\n if imgur_match[4]: # Image match\n id = imgur_match[4]\n elif imgur_match[3]: # Gallery match\n gallery = imgur.gallery_item(imgur_match[3])\n if not isinstance(gallery, GalleryImage):\n id = gallery.images[0]['id'] # First image from gallery album\n else:\n id = gallery.id\n elif imgur_match[2]: # Album match\n album = imgur.get_album(imgur_match[2])\n id = album.images[0]['id'] # First image of album\n\n self.pic = imgur.get_image(id) # take first image from gallery album\n\n except ImgurClientError as e:\n print(\"Imgur returned 404, deleted image?\", e.status_code)\n self.pic = None\n id = None\n\n return id",
"def extract_id(cls, link):\n\t\treturn link.split('/')[-1]",
"def extract_item_id(url):\n m = re.search('/([0-9]+)\\.htm', url)\n if m is not None:\n return m.group(1)\n else:\n return None",
"def parse_link_to_id(self, playlist_link: str) -> str:\n split_1 = playlist_link.split('/')[4]\n split_2 = split_1.split('?')\n return split_2[0]",
"def get_channel_id(sess, url):\n\tcontent = sess.get(url).content\n\tsoup = BeautifulSoup(content, 'html.parser')\n\treturn soup.find(\"meta\", attrs={'name':\"ustream:channel_id\"})[\"content\"]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a full media url from Bandwidth and extracts the filename | def get_media_filename(media_url):
return media_url.split("/")[-1] | [
"def get_content_name(self, content_url):\n endpoint = content_url.split('/')[-1]\n return re.match(r'(.+\\.(?:jpg|mp4))', endpoint).group(0)",
"def getImageFilename(url):\n head, filename = url.rsplit('/', 1)\n return filename",
"def get_media_id(media_url):\n split_url = media_url.split(\"/\")\n #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/file.png\n if split_url[-2] == \"media\":\n return split_url[-1]\n #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/abc/0/file.png\n else:\n #This is required for now due to the SDK parsing out the `/`s\n return \"%2F\".join(split_url[-3:])",
"def get_url_filename(url: str) -> str:\n parse_result = urlparse(url)\n return os.path.basename(parse_result.path)",
"def get_filename(url):\r\n if url.find('/'):\r\n return url.rsplit('/', 1)[1] #'/', 1 tekee sen etta valitsee viimeisen osan \r\n #tiedoston nimen polusta\r",
"def _filename_from_url(url):\n file_name = url.split(\"/\")[-1]\n return file_name",
"def url_file_name(url):\r\n return url[url.rfind('/') + 1:]",
"def get_filename(url: str) -> str:\n filename: str = ''\n\n # Check to see if one of the valid image extensions exist in the URL.\n # If it does, split the URL at the '/' character and remove any\n # characters after the extension\n for ext in IMG_FORMATS:\n if ext in url:\n index = url.index(ext) + len(ext)\n clean_name = url[:index]\n filename = clean_name.split('/')[-1]\n \n return filename",
"def get_file_name(url: str):\n filename = os.path.basename(url)\n fname, extension = os.path.splitext(filename)\n if extension:\n if \"=\" in filename:\n return filename.split(\"=\")[-1]\n return filename\n header = requests.head(url).headers\n if \"Location\" in header:\n return os.path.basename(header[\"Location\"])\n return filename",
"def get_filename(target_dir, filename_prefix):\n # this whole function is not the nicest thing, but isolating it makes\n # things clearer , a good refactoring would be to get\n # the info from the video_url or the current output, to avoid the\n # iteration from the current dir\n filenames = os.listdir(target_dir)\n subs_filename = filename_prefix\n for name in filenames: # Find the filename of the downloaded video\n if name.startswith(filename_prefix):\n (basename, ext) = os.path.splitext(name)\n return basename",
"def get_filename(self) -> str:\n fname = self.url.split(\"/\")[-1]\n if \",\" in fname:\n _fname, _i = fname.split(\",\")\n _split_fname = _fname.split(\".\")\n _name = _split_fname[0]\n _extension = _split_fname[-1]\n return _name + _i + \".\" + _extension\n else:\n return fname",
"def get_filename(url):\n path = urlparse.urlparse(url).path\n last_component = path.split('/')[-1]\n return last_component",
"def get_audiobook_name(url):\n return url.split('/')[-1]",
"def get_filename(url, dir_=\"\"):\n return path.join(dir_, path.basename(urlsplit(url).path))",
"def get_name_image(url):\n name_image = url.split(u'/')[-1]\n return name_image",
"def get_download_filename():\n max_length = 200\n query = job.meta[\"query\"]\n extension = os.path.splitext(job.result)[1]\n if len(query) > max_length:\n name = query[:max_length] + \"_etc\"\n else:\n name = query\n\n if job.meta[\"format\"] == \"list\":\n return get_valid_filename(name + \".\" + job.meta[\"format\"])\n else:\n return get_valid_filename(name + \".\" + job.meta[\"format\"] + extension)",
"def getMediaPath(self, path):",
"def get_url_filename(url, headers=None, strip=[]):\n filename = get_url_disposition_filename(url, headers)\n if filename:\n return filename\n return get_url_straight_filename(url, strip=[])",
"def get_track_filename(self, url = None):\n track_file = urllib.urlopen(url)\n headers = track_file.info()\n track_file.close()\n return wget.filename_from_headers(headers)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a list of media urls and downloads the media into the temporary storage | def download_media_from_bandwidth(media_urls):
downloaded_media_files = []
for media_url in media_urls:
media_id = get_media_id(media_url)
filename = get_media_filename(media_url)
with open(filename, "wb") as f:
try:
downloaded_media = messaging_client.get_media(MESSAGING_ACCOUNT_ID, media_id)
f.write(downloaded_media.body)
except Exception as e:
print(e)
downloaded_media_files.append(filename)
return downloaded_media_files | [
"def download_all_media():\n # download_path = '{}'.format(os.path.join(folder_name(conv_s()), \"media\"))\n # profile.set_preference(\"browser.download.dir\", download_path)\n open_media()\n download_media()\n while left_media():\n pass\n esc()\n esc()\n o(\"Finished downloading media\")",
"def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n print(filename)\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n shutil.move(filename,path)\n print('Done!')",
"def download(urls, dest_folder):\n pass",
"def download_cdn_videos(filenames,sub_urls,handout_urls,video_urls, target_dir):\n \"\"\" using a simple file downloader \"\"\"\n for i, v in enumerate(video_urls):\n filename_prefix = str(i+1).zfill(2) + '-'\n #original_filename = v.rsplit('/', 1)[1]\n video_filename = filename_prefix + filenames[i] + '.mp4'\n sub_filename = filename_prefix + filenames[i] + '.srt'\n handout_filename = filename_prefix + filenames[i] + '.srt'\n video_path = os.path.join(target_dir, video_filename)\n sub_path = os.path.join(target_dir, sub_filename)\n handout_path = os.path.join(target_dir, handout_filename)\n #print('[debug] GET %s' % v)\n print('[download] Destination: %s' % video_path)\n v = quote(v,safe=\":/\")\n if len(v) != YOUTUBE_VIDEO_ID_LENGTH:\n req = Request(v) \n try:\n video = urlopen(v)\n fileSize = int(video.headers['content-length'])\n finish = False\n existSize = 0\n if os.path.exists(video_path):\n output = open(video_path,\"ab\")\n existSize = os.path.getsize(video_path)\n #If the file exists, then only download the remainder\n if existSize < fileSize:\n #print(\"[debug] bytes range is: %s-%s\" % (existSize,fileSize))\n req.headers[\"Range\"]= \"bytes=%s-%s\" % (existSize,fileSize)\n video = urlopen(req)\n else:\n finish = True\n else:\n output = open(video_path,\"wb\")\n if finish == False:\n file_size_dl = existSize\n block_sz = 262144\n while True:\n buffer = video.read(block_sz)\n if not buffer:\n break\n \n file_size_dl += len(buffer)\n output.write(buffer)\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / fileSize)\n status = status + chr(8)*(len(status)+1)\n sys.stdout.write(status)\n sys.stdout.flush()\n \n output.close()\n\n except URLError as e:\n print(\"[warning]error: %r when downloading %s\" % (e.reason,v) )\n\n else:\n download_youtube_video(v,video_path)\n \n if sub_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(sub_path):\n subs_string = edx_get_subtitle(sub_urls[i], headers)\n if subs_string:\n print('[info] Writing edX subtitles: %s' % sub_path)\n open(os.path.join(os.getcwd(), sub_path),\n 'wb+').write(subs_string.encode('utf-8'))\n\n if handout_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(handout_path):\n handout_content = urlopen(BASE_URL+handout_urls[i]).read()\n if handout_content:\n print('[info] Writing handout: %s' % handout_path)\n open(os.path.join(os.getcwd(), handout_path),\n 'wb+').write(handout_content)\n #srtfile = urlopen(BASE_URL+sub_urls[i])\n #output = open(srt_path,'wb')\n #output.write(srtfile.read())\n #output.close()",
"def import_remote_media():\n run_export_media()\n run_download_media()\n import_media()",
"def download_pics(pics_links):\n\n for link in range(len(pics_links)):\n r = requests.get(pics_links[link][0])\n with open(os.path.join(\"tmp\", f\"{link}.jpg\"), \"wb\") as dl:\n dl.write(r.content)",
"def download_media(track, chunks, fname):\n with open(fname, 'wb') as file:\n print(f'{track[\"initialization\"]}')\n init_seg = requests.get(track['initialization'])\n fsize = file.write(init_seg.content)\n seg = int(track['startNumber'])\n while chunks > 0:\n media_url = track[\"media\"].replace('$Number$', str(seg))\n print(f'{media_url}', end='\\r')\n media_seg = requests.get(media_url)\n fsize += file.write(media_seg.content)\n seg += 1\n chunks -= 1\n print(f'{media_url}')\n print(f'write {fsize} bytes')",
"def download_media(media, downloads_directory, post_shortcode, session):\n JinstaScrape.make_directory(downloads_directory)\n url = media['url']\n file_name = post_shortcode + '.' + url.split('/')[-1].split('?')[0]\n file_path = os.path.join(downloads_directory, file_name)\n is_video = True if 'mp4' in file_name else False\n\n if not os.path.isfile(file_path):\n with open(file_path, 'wb') as media_file:\n try:\n # Video\n if is_video:\n r = session.get(url, stream=True)\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n media_file.write(chunk)\n # Image\n else:\n content = session.get(url).content\n media_file.write(content)\n \n # Update media for successful download\n media['downloaded_path'] = file_path\n media['downloaded_at'] = str(datetime.now())\n except requests.exceptions.ConnectionError:\n time.sleep(JinstaScrape.RETRY_COOLDOWN)\n # Video\n if is_video:\n r = session.get(url, stream=True)\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n media_file.write(chunk)\n # Image\n else:\n content = session.get(url).content\n media_file.write(content)",
"def download_images(links):\n\n for link in links:\n print(\"Processing\", link)\n try:\n response = requests.get(link,\n timeout=METADATA_REQUEST_TIMEOUT, stream=True)\n except requests.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n artist_name = link.rsplit('/', 2)[1]\n image_name = link.rsplit('/', 2)[2]\n image_name = artist_name + image_name\n\n file_location = ASSET_PATH.joinpath(image_name)\n\n with open(str(file_location), 'wb') as outfile:\n shutil.copyfileobj(response.raw, outfile)",
"def __download_scraped_media(self):\n # Terminate if manifest is empty\n if not self.manifest:\n print 'Nothing to download! Exiting program...'\n return\n\n download_start_time = datetime.now()\n future_to_download = {}\n executor = concurrent.futures.ThreadPoolExecutor(max_workers=JinstaScrape.MAX_WORKERS)\n \n # Iterate each scraped post in manifest and add to concurrent download executor\n for shortcode in self.manifest:\n post = self.manifest[shortcode]\n # Iterate each media in post\n for media in post['media_items']:\n # If media not yet downloaded\n if not media['downloaded_path']:\n future = executor.submit(JinstaScrape.download_media, media, self.downloads_directory, shortcode, self.session)\n future_to_download[future] = media\n \n # Download each post in executor's list\n failure_count = 0\n for future in tqdm.tqdm(concurrent.futures.as_completed(future_to_download), total=len(future_to_download), desc='Downloading media'):\n media = future_to_download[future]\n if future.exception() is not None:\n print 'Media shortcode={0} at {1} generated an exception: {2}'.format(media['shortcode'], media['url'], future.exception())\n failure_count += 1\n\n # Upon completion\n print 'Download time elapsed: {}s'.format(JinstaScrape.time_elapsed(download_start_time))\n if failure_count:\n print failure_count, 'failed downloads'\n \n self.__writeout_manifest() # write out updated manifest",
"async def download_youtube_video_into_mp3(self, ctx, *, links : str):\n if not links.startswith(\"http\"):\n await self.bot.say(\"*`Please Start with a HTTP URL!`*\")\n return\n links = links.split(\" \")\n try:\n await self.bot.delete_message(ctx.message)\n except:\n pass\n if len(links) > 1 and (not links[1].startswith(\"-\") or len(links) != 2):\n embed = discord.Embed(title=\"Multiple Youtube Download... [{} Total]\".format(len([link for link in links])), description=\"*All done by Youtube-DL*\")\n embed_msg = await self.bot.say(embed=embed)\n before = datetime.datetime.now()\n x = 0\n fp = \"/media/seacow/Music/{}.zip\".format(\"Songs_for_\"+ctx.message.author.name.replace(\" \", \"_\"))\n zips = ZipFile(fp, \"w\")\n while x < len(links):\n dont_convert = False\n dont_upload = False\n if \"-noconvert\" in links[x]:\n del links[x]\n dont_convert = True\n x = x - 1\n elif \"-noupload\" in links[x]:\n del links[x]\n x = x - 1\n link = links[x]\n afterdl = await self.download_video_song(link, dont_convert = dont_convert, dont_upload = dont_upload, multi=True, embed=embed, embed_msg=embed_msg)\n z = afterdl[1]\n if z != None:\n zips.write(z, arcname=z.split(\"/\")[-1])\n x += 1\n zips.close()\n after = datetime.datetime.now()\n elapsed = after - before\n embed = afterdl[0]\n embed.add_field(name=\"Downloads Complete! Uploading...\", value=\"*Took {0.seconds} seconds*\".format(elapsed), inline=False)\n await self.bot.edit_message(embed_msg, embed=embed)\n await self.bot.upload(fp)\n os.remove(fp)\n embed.set_field_at(-1, name=\"Upload Complete!\", value=embed.fields[-1].value, inline=embed.fields[-1].inline)\n await self.bot.edit_message(embed_msg, embed=embed)\n else:\n links = \" \".join(links)\n dont_convert = False\n dont_upload = False\n if \"-noconvert\" in links:\n dont_convert = True\n elif \"-noupload\" in links:\n dont_upload = True\n link = links.split(\" \")[0]\n await self.download_video_song(link = link, dont_convert = dont_convert, dont_upload = dont_upload)",
"def sync_media(f_type):\n if f_type == 'photos':\n media = 'fr_{}.jpg'\n if f_type == 'videos':\n media = 'fs_{}.mov'\n\n for i in range(last_media + 1):\n media_name = media.format(i)\n download_media(media_name)",
"def download_photos(urls, folder=''):\n folder_path = os.path.join('photos', folder)\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n for url in urls:\n image = requests.get(url)\n filename = os.path.join(folder_path, url.split('/')[-1])\n with open(filename, 'wb') as f:\n f.write(image.content)",
"def run_yt_dl_multiple(session,download_urls,extractor_used,audio_id=None,video_id=None):\n assert(type(download_urls) is type([]))\n # Prevent duplicate downloads\n download_urls = uniquify(download_urls)\n # Download videos if there are any\n media_id_list = []\n for download_url in download_urls:\n media_id_list += run_yt_dl_single(\n session=session,\n download_url=download_url,\n extractor_used=extractor_used,\n audio_id=audio_id,\n video_id=video_id,\n )\n continue\n return media_id_list",
"def download_mp3s(url):\n def gen_urls(bandcamp_html):\n f = open(bandcamp_html)\n tracks = re.findall('(\\[.*?\\])', f.read())[0]\n tracks.replace('null', 'None').replace('false', 'False')\n tracks = eval(tracks) # WARNING!! VERY INSECURE\n\n for track in f:\n yield track['title'], track['file']['mp3-128']\n\n\n # dl page\n os.system('wget %s -O ~/tmp')\n os.system('grep \"trackinfo:\" ~/tmp > ~/tmp2')\n for title, url in gen_urls('~/tmp2'):",
"def startDownload(self,urllist): \n self.futureDownload = {self.tpe.submit(self.getImage,bucket,url):(line,zips) for url, bucket, line, zips in urllist}",
"def media_downloaded(self, response, request, info):",
"def download_list(urls, outdir=None, workdir=None, threads=3):\n pool = ThreadPool(threads)\n download_lambda = lambda x: download(x, outfile=outdir, workdir=workdir)\n pool.map(download_lambda, urls)",
"def download_files(file_uris):\n\n if os.path.exists(LOG_FILE):\n log_file = open(LOG_FILE, \"rU+\")\n downloaded_podcasts = strip_newlines(log_file)\n else:\n log_file = open(LOG_FILE,\"w\")\n downloaded_podcasts = []\n\n for uri in file_uris:\n # if the current file URI is not found in the log, it is a new file, and\n # is thus downloaded\n if uri not in downloaded_podcasts:\n # extract filename from the URI \n uri_split = re.split(\"/\", uri)\n filename = uri_split[len(uri_split) - 1]\n \n # download the file\n if OUTPUT:\n print \"downloading \" + uri\n urllib.urlretrieve(uri, DEST_DIR + os.sep + filename)\n log_file.write(uri + os.linesep)\n\n log_file.close()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a list of media files and uploads them to Bandwidth The media file names are used as the media id | def upload_media_to_bandwidth(media_files):
for filename in media_files:
with open(filename, "rb") as f:
file_content = f.read()
try:
##Note: The filename is doubling as the media id##
response = messaging_client.upload_media(MESSAGING_ACCOUNT_ID, filename, str(len(file_content)), body=file_content)
except Exception as e:
print(e) | [
"def _upload(self, files, voice_clip=False):\n file_dict = {\"upload_{}\".format(i): f for i, f in enumerate(files)}\n\n data = {\"voice_clip\": voice_clip}\n\n j = self._payload_post(\n \"https://upload.facebook.com/ajax/mercury/upload.php\", data, files=file_dict\n )\n\n if len(j[\"metadata\"]) != len(files):\n raise _exception.FBchatException(\n \"Some files could not be uploaded: {}, {}\".format(j, files)\n )\n\n return [\n (data[_util.mimetype_to_key(data[\"filetype\"])], data[\"filetype\"])\n for data in j[\"metadata\"]\n ]",
"def addFiles(self, file_list):\n \n # Add the files to the queue\n for file_name in file_list:\n self.file_queue.put(file_name)\n \n # Write the queue to disk\n self.saveQueue()\n \n # Upload the data\n self.uploadData()",
"def do_upload(self, args):\n if not self.config.require(['path', 'dest']):\n return\n\n files = os.listdir(self.config['path'])\n for filename in files:\n android.upload_audio(filename, self.config['path'], self.config['dest'])\n os.remove(os.path.join(self.config['path'], filename))",
"def get_add_media(self):\r\n return AnkiConnect.request(\r\n \"multi\",\r\n actions=[\r\n AnkiConnect.request(\r\n \"storeMediaFile\",\r\n filename=key,\r\n data=value\r\n )\r\n for key, value in MEDIA.items()\r\n ]\r\n )",
"def upload(self, folder_list, files):\n current_folder_id = self.top_folder_id\n for fname in folder_list:\n current_folder_id = self._fetch_or_create_folder(fname, current_folder_id)\n for file in files:\n self._upload_detail(file, current_folder_id)",
"def upload(self):\r\n self.inspectfiles()\r\n self.filereference()\r\n for path in self.multipart_urls.keys():\r\n for i in xrange( self.getPartcount(path) ):\r\n self.uploadpart(path,i)\r\n self.mergeparts(path)\r\n self.workerthread(path)",
"def upload_spectrals(spectrals_path, spectral_ids):\n if not spectral_ids:\n return None\n\n spectrals = []\n for sid, filename in spectral_ids.items():\n spectrals.append(\n (\n sid - 1,\n filename,\n (\n os.path.join(spectrals_path, f\"{sid:02d} Full.png\"),\n os.path.join(spectrals_path, f\"{sid:02d} Zoom.png\"),\n ),\n )\n )\n\n try:\n return upload_spectral_imgs(spectrals)\n except ImageUploadFailed as e:\n return click.secho(f\"Failed to upload spectral: {e}\", fg=\"red\")",
"def download_media_from_bandwidth(media_urls):\n downloaded_media_files = []\n for media_url in media_urls:\n media_id = get_media_id(media_url)\n filename = get_media_filename(media_url)\n with open(filename, \"wb\") as f:\n try:\n downloaded_media = messaging_client.get_media(MESSAGING_ACCOUNT_ID, media_id)\n f.write(downloaded_media.body)\n except Exception as e:\n print(e)\n downloaded_media_files.append(filename)\n return downloaded_media_files",
"def media_file_upload(request, manifest_id):\n manifest = get_object_or_404(Manifest, id=manifest_id)\n\n manifest_files = MediaFile.objects.filter(manifest=manifest)\n total_files_count = manifest_files.count()\n files_needing_upload = manifest_files.filter(file='')\n files_needing_upload_count = files_needing_upload.count()\n\n file_to_upload = files_needing_upload.first()\n\n # If no files left to upload, mark the manifest complete and move on\n if files_needing_upload_count < 1:\n Manifest.objects.filter(id=manifest.id).update(all_media_present=True)\n return HttpResponseRedirect(reverse('manifest-view', args=(manifest.id,)))\n\n form = MediaFileForm(request.POST or None, request.FILES or None, instance=file_to_upload)\n\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('file-upload', args=(manifest.id,))) # Refresh view\n\n return render(request, 'file_manager/file_upload.html', {\n 'form': form,\n 'upload_number': total_files_count - files_needing_upload_count + 1, # Which place in order of upload e.g. 2 of 3\n 'total_files_count': manifest_files.count(),\n 'file_to_upload': file_to_upload,\n })",
"def ftp_files(file_list):\n # SET UP ftp TRANSFER\n host = 'enso.princeton.edu' # server address\n ftp = FTP(host)\n ftp.login()\n ftp.cwd('incoming/Caylor_smap/')\n for file in file_list:\n upload(ftp, file)\n return \"SMAP transfer successful\"",
"def upload_all(self,raise_on_error=False,**kwargs):\n jobs = []\n for src in self.register:\n job = self.upload_src(src, **kwargs)\n jobs.extend(job)\n return asyncio.gather(*jobs)",
"def bulk_upload(self, files):\n self.conn = self._connect()\n uploads = [self._upload_single_file(file) for file in files]\n logging.info(f'Finished uploading {len(uploads)} files to {self.remote_path} on {self.gitServer}')",
"def upload_waveforms(self) -> None:\n self._generator.upload_waveforms()",
"def upload_all(all_file_names):\n with ThreadPool(processes=int(10)) as pool:\n return pool.map(upload_file, all_file_names)",
"def sync_media(f_type):\n if f_type == 'photos':\n media = 'fr_{}.jpg'\n if f_type == 'videos':\n media = 'fs_{}.mov'\n\n for i in range(last_media + 1):\n media_name = media.format(i)\n download_media(media_name)",
"def process_file_list(file_list):\n upload_req_json = {\n \"checksum_algo\": \"md5\",\n \"files\": [],\n }\n print(\"Starting process!\")\n for file in file_list:\n file_dict = create_upload_req(file)\n upload_req_json['files'].append(file_dict)\n\n\n try:\n upload_resp = requests.post(url=CREATE_URL, json=upload_req_json, auth=('aspecscire', 'drone@123'))\n upload_resp.raise_for_status()\n upload_resp_json = upload_resp.json()\n except Exception as excpt:\n print(\"Request to CREATE URL failed!\")\n raise RuntimeError(excpt)\n\n files_to_upload = upload_resp_json.get(\"files\")\n if files_to_upload is None:\n raise RuntimeError(\"Files list empty in response from upload create req!\")\n\n columns = [\"name\", \"guid\", \"entity_key\"]\n data_list = []\n for file in files_to_upload:\n print(file['name'], end=\"\\r\")\n if file['error'].get(\"code\") == \"E_UPLOAD_CREATE_FILE_EXISTS\":\n data_list.append([file['name'], file['guid'], file['entity_key']])\n\n dataframe = pd.DataFrame(data_list, columns=columns)\n dataframe.to_csv(output_entity_csv)\n print(\"Write to CSV complete!\")",
"def upload_bulk_sms_file(batch_id, file_path):\n batch = Batch.objects.get(id=batch_id)\n batch.add_messages(read_messages_from_file(file_path))\n batch.status = Batch.PENDING\n batch.save()",
"def add_media(self, paths: Iterable[str]):\n if not paths:\n return\n\n # TODO: Let's just hope the commits and rollbacks always succeed for now...\n self._model.database().transaction()\n paths_added = []\n\n for path in paths:\n log.debug(f\"Adding media for {path}\")\n\n metadata = _parse_media(path)\n record = self._create_record(metadata)\n\n if not self._model.insertRecord(-1, record):\n log.error(f\"Failed to add media for {path}: {self._model.lastError()}\")\n # Assuming the model wasn't ever modified if this failed; no revert needed.\n else:\n paths_added.append(path)\n\n if not self._model.submitAll():\n log.error(f\"Failed to add media: could not submit changes.\")\n self._model.revertAll()\n self._model.database().rollback()\n\n return\n\n self._model.database().commit()\n\n # It's safer to get the last inserted ID right after committing as opposed to getting it\n # before inserting anything.\n last_id = self._model.query().lastInsertId()\n\n # Populate the playlist.\n for media_id, path in enumerate(paths_added, last_id - len(paths_added) + 1):\n media = QMediaContent(QUrl.fromLocalFile(path))\n self.playlist().addMedia(media, media_id)\n\n self.media_added.emit()",
"def upload_media(self,\r\n account_id,\r\n media_id,\r\n body,\r\n content_type='application/octet-stream',\r\n cache_control=None):\r\n\r\n # Prepare query URL\r\n _url_path = '/users/{accountId}/media/{mediaId}'\r\n _url_path = APIHelper.append_url_with_template_parameters(_url_path, {\r\n 'accountId': {'value': account_id, 'encode': False},\r\n 'mediaId': {'value': media_id, 'encode': False}\r\n })\r\n _query_builder = self.config.get_base_uri(Server.MESSAGINGDEFAULT)\r\n _query_builder += _url_path\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n if isinstance(body, FileWrapper):\r\n body_wrapper = body.file_stream\r\n body_content_type = body.content_type\r\n else:\r\n body_wrapper = body\r\n body_content_type = content_type\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'content-type': body_content_type,\r\n 'Cache-Control': cache_control\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.config.http_client.put(_query_url, headers=_headers, parameters=body_wrapper)\r\n MessagingBasicAuth.apply(self.config, _request)\r\n _response = self.execute_request(_request)\r\n\r\n # Endpoint and global error handling using HTTP status codes.\r\n if _response.status_code == 400:\r\n raise MessagingException('400 Request is malformed or invalid', _response)\r\n elif _response.status_code == 401:\r\n raise MessagingException('401 The specified user does not have access to the account', _response)\r\n elif _response.status_code == 403:\r\n raise MessagingException('403 The user does not have access to this API', _response)\r\n elif _response.status_code == 404:\r\n raise MessagingException('404 Path not found', _response)\r\n elif _response.status_code == 415:\r\n raise MessagingException('415 The content-type of the request is incorrect', _response)\r\n elif _response.status_code == 429:\r\n raise MessagingException('429 The rate limit has been reached', _response)\r\n self.validate_response(_response)\r\n\r\n # Return appropriate type\r\n return ApiResponse(_response)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes all of the given files | def remove_files(files):
for file_name in files:
os.remove(file_name) | [
"def remove_files(self, files: Set[str]) -> None:\n for f in files:\n src = os.path.join(self.get_directory(), f)\n os.remove(src)",
"def remove_files(self):\n for ff in self._all_paths:\n try:\n os.remove(ff)\n except OSError:\n pass",
"def delete_files(file_paths):\n for file_path in file_paths:\n remove(file_path)",
"def cleanup_files(self):\n\t\t#todo: this needs python 3.5 for the ** thing\n\t\t#todo: cleaned files may have had another expiry time set when created (probably acceptable though, it's just cache)\n\t\tfiles = iglob(join(self.file_dir, '**'), recursive=True)\n\t\tfor file in files:\n\t\t\tif isfile(file) and time() - getmtime(file) > self.file_expiration_time:\n\t\t\t\tremove(file)",
"def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()",
"def delete_files(files_list):\n for file_path in files_list:\n if os.path.isdir(file_path):\n shutil.rmtree(file_path)\n else:\n os.remove(file_path)",
"def delete_files_tmp():\n\n files = glob.glob('./tmp/*')\n for f in files:\n os.remove(f)",
"def remove_frames(tmpdir, files):\n for fname in files: os.remove(os.path.join(tmpdir, fname))\n if not(tmpdir == None): os.rmdir(tmpdir)",
"def clean_files(self):\n self.filenames.clear()",
"def clean():\n for dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n if filename.endswith('.pyc') or filename.endswith('.pyo'):\n full_pathname = os.path.join(dirpath, filename)\n click.echo('Removing {}'.format(full_pathname))\n os.remove(full_pathname)",
"def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())",
"def deleteAllFiles(self, flush=True): \n \n if flush: \n self.flush(False) \n \n for filePath in self.filePathDict.keys(): \n if self.outDir is None: \n fullPath = filePath \n else: \n fullPath = os.path.join(self.outDir,filePath) \n \n if os.path.exists(fullPath): \n os.remove(fullPath)",
"def remove_files(self, files):\n # Sort for report-formatting, but also tries to delete parent dirs first.\n try:\n files.sort()\n except AttributeError:\n # None, or non-sortable object passed.\n return 0\n\n if self.debug:\n self.report_debug('Would\\'ve removed:')\n self.report_debug(' {}'.format('\\n '.join(files)))\n return 0\n\n cleanerrors = 0\n for filepath in files:\n # File may have been removed with a directory removal.\n if os.path.exists(filepath):\n if os.path.isdir(filepath):\n filetype = 'directory'\n removefunc = shutil.rmtree\n else:\n filetype = 'file'\n removefunc = os.remove\n try:\n removefunc(filepath)\n self.report('Removed {}: {}'.format(filetype, filepath))\n except Exception as ex:\n failfmt = 'Failed to remove: {}\\n {}'\n self.report(failfmt.format(filepath, ex))\n cleanerrors += 1\n return cleanerrors",
"def clean_files_for(file):\n for f in [file, f\"{file}.json\", f\"{file}.lock\"]:\n if os.path.isfile(f):\n os.remove(f)",
"def rmfiles(ctx, file, basedir, exitonfail): \n if not (type(file) == list or type(file) == tuple):\n raise Exception(\"Wrong input type should be a list: %s\" % type(file))\n \n result = True\n reason = None\n failed_once = False\n \n for file_name in file:\n with io.open(file_name, \"r\", encoding=\"utf8\") as f:\n for line in f:\n file_path = line.rstrip()\n if basedir:\n file_path = os.path.abspath(os.path.join(basedir, file_path))\n try:\n result, reason = rmfile(file_path)\n except OSError as excep:\n result = False\n reason = str(excep)\n if not result:\n click.echo(\"%s\\t%s\" % (file_path, reason))\n failed_once = True\n if exitonfail:\n sys.exit(1)\n\n if failed_once:\n sys.exit(1)",
"def delete_all_files_in_image():\n \n [os.remove(file) for file in glob.glob(os.path.join(os.getcwd(),\"src/static/images/\",\"*.png\"))]",
"def delete_files(pths):\n for f in pths:\n try:\n os.remove(f)\n except OSError:\n log.debug(\"Found and ignored Error when deleting file %s\" % f)\n pass\n log.debug(\"deleted %d files\" % len(pths))",
"def teardown():\n for filename in files_to_delete:\n delete_file(filename)",
"def _remove_files(self, dict_files):\n for k, v in dict_files.items():\n\n try:\n file_path = os.getcwd() + k.replace('/', os.sep)\n \n if (os.path.isdir(file_path)):\n# os.rmdir(file_path)\n shutil.rmtree(file_path)\n else:\n os.remove(file_path)\n\n except Exception:\n print('*** Error')\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes information from a Bandwidth inbound message callback that includes media and responds with a text message containing the same media sent through Bandwidth's media resource. | def handle_inbound_media_mms(to, from_, media):
downloaded_media_files = download_media_from_bandwidth(media)
upload_media_to_bandwidth(downloaded_media_files)
remove_files(downloaded_media_files)
body = MessageRequest()
body.application_id = MESSAGING_APPLICATION_ID
body.to = [from_]
body.mfrom = to
body.text = "Rebound!"
#Build the media URL by taking the media ids (that doubled as the file names) and appending them to
#the bandwidth media base url
body.media = [BANDWIDTH_MEDIA_BASE_ENDPOINT + media_file for media_file in downloaded_media_files]
try:
messaging_client.create_message(MESSAGING_ACCOUNT_ID, body)
except Exception as e:
print(e)
return None | [
"def send_callback(context):\n publish_next_media_to_channel(context, chat_id=context.job.context)",
"def handle_inbound_message():\n data = json.loads(request.data)\n\n if data[0][\"type\"] == \"message-received\":\n if \"call me\" in data[0][\"message\"][\"text\"]:\n handle_inbound_sms_call_me(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"])\n elif \"media\" in data[0][\"message\"]:\n handle_inbound_media_mms(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"], data[0][\"message\"][\"media\"])\n else:\n handle_inbound_sms(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"])\n else:\n print(data)\n return \"\"",
"def send_handler(update, context):\n publish_next_media_to_channel(context, update.message.chat_id)",
"def handle_media( environ ):\n # TODO: implement me\n return 200, [], _html.format(\n title = 'MEDIA',\n head = '',\n body = 'MEDIA'\n )",
"def handle_message(parameters):\n message_sid = parameters[\"MessageSid\"]\n _from = parameters[\"From\"]\n to = parameters[\"To\"]\n body = parameters[\"Body\"]\n messaging_service_sid = parameters.get(\"MessagingServiceSid\",\"\")\n num_media = parameters.get(\"NumMedia\",None)\n error_tuple = (parameters.get(\"ErrorCode\",\"0\"),parameters.get(\"ErrorMessage\",\"\"))\n return message_handler(message_sid,_from,to,body,num_media,messaging_service_sid,error_tuple)",
"def incoming_sms():\n txt = request.form['Body']\n\n # remove leading and trailing white space and make lowercase\n txt = txt.strip()\n txt = txt.lower()\n\n # handle random searches differently than breed searches\n if txt == 'random' or txt == 'dog':\n url = get_dogs.get_random_dog()\n else:\n url = get_dogs.request_breed(txt)\n \n resp = MessagingResponse()\n if url:\n resp.message(url)\n else:\n resp.message(\"Sorry! We couldn't find a dog matching that query. Please try \\\n a more general search term.\")\n return str(resp)",
"def media_downloaded(self, response, request, info):",
"async def media(sessionid: str = Form(...),\n media_pk: int = Form(...),\n clients: ClientStorage = Depends(get_clients)) -> Dict:\n cl = clients.get(sessionid)\n return cl.insights_media(media_pk)",
"def handleSMS(self, callerID, message, node):",
"def upload_media_to_bandwidth(media_files):\n for filename in media_files:\n with open(filename, \"rb\") as f:\n file_content = f.read()\n try:\n ##Note: The filename is doubling as the media id##\n response = messaging_client.upload_media(MESSAGING_ACCOUNT_ID, filename, str(len(file_content)), body=file_content)\n except Exception as e:\n print(e)",
"def on_response_received(self, message):",
"def handle(self, message: Message) -> None:",
"def receive(self, data):\n try:\n xml = ElementTree.XML(data)\n except:\n raise StandardError(\"API request malformed\")\n\n mms = mobile.models.IncomingMMS.objects.create(\n id=xml.find('id').text,\n country=xml.find('country').text,\n sender=xml.find('senderNumber').text,\n recipient=xml.find('targetNumber').text,\n subject=xml.find('mms/subject').text,\n source=data\n )\n\n for item in xml.findall('mms/item'):\n if item.find('base64').text == 'true':\n data = b64decode(item.find('content').text)\n else:\n data = item.find('content').text\n\n mms_file = mobile.models.MMSFile(\n mms=mms\n )\n\n # Extract content type from MIME data\n matches = re.search('([^;]*/[^;]*);', item.find('mimeType').text)\n if matches:\n mms_file.content_type = matches.group(1)\n\n # Save file\n mms_file.file.save(\n name=item.find('name').text,\n content=ContentFile(data)\n )\n\n mms_file.save()\n\n return mms",
"def send_media_message(self,\n media_path: str,\n media_type: TelegramMediaType = TelegramMediaType.IMAGE,\n **kwargs) -> bool:\n chat_id = None\n if 'chat_id' in kwargs:\n chat_id = kwargs['chat_id']\n elif 'chat_title' in kwargs:\n with self._chat_id_map_lock:\n if kwargs['chat_title'] in self._chat_id_map:\n chat_id = self._chat_id_map[kwargs['chat_title']]\n\n if chat_id is not None:\n logging.debug(f\"Sending the next media message: {media_path} to chat id {chat_id}.\")\n caption_text = kwargs['caption'] if 'caption' in kwargs else ''\n content = TelegramWrapper._get_media_fie_content(media_path, media_type, caption_text)\n self._td_client_send({'@type': 'sendMessage', 'chat_id': chat_id, 'input_message_content': content})\n return True\n else:\n return False",
"def handle_subscription(client, userdata, message):\n payload = message.payload\n payload = payload.decode('utf-8')\n print(payload)\n # for now printing message, figure out more on payload and play with it.\n lps(\"Request received on topic: \" + str(message.topic) + \", with payload: \" + payload)\n blink_led(payload)",
"def callback_message(self, msg):\n # Messages sent privately to the bot should not be replayed !\n # We only want to replay messages that are coming from a thread or channel.\n if msg.is_direct:\n return\n\n # Get the data we need to send the callback webhook\n data = dict(\n source=self.bot_config.BACKEND,\n destination=os.getenv('TC_DESTINATION'),\n channel=os.getenv('TC_CHANNEL'),\n to=os.getenv('TC_TO'),\n author=msg.frm.nick,\n message=msg.body\n )\n endpoint = os.getenv('TC_DESTINATION_ENDPOINT')\n\n # Don't attempt to bridge messages that are empty or have failed to parse\n if data['author'] == '' or data['message'] == '':\n logging.info(\"Not sending bridge request: empty or failed to parse\")\n return\n\n logging.info(\"Sending bridge request\")\n r = requests.post(endpoint, data=json.dumps(data))",
"def on_media_state(self):\n try:\n if self.call.info().media_state == pj.MediaState.ACTIVE:\n if self.cfg['VoipIO']['debug']:\n self.system_logger.debug(\"CallCallback::on_media_state : Media is now active\")\n else:\n if self.cfg['VoipIO']['debug']:\n self.system_logger.debug(\"CallCallback::on_media_state : Media is inactive\")\n except:\n self.voipio.close_event.set()\n self.cfg['Logging']['system_logger'].exception('Uncaught exception in the CallCallback class.')\n raise",
"def handle_inbound_sms_call_me(to, from_):\n handle_call_me(to, from_)",
"def reqmedia(self) :\n\t\ttry :\n\t\t\treturn self._reqmedia\n\t\texcept Exception as e:\n\t\t\traise e"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes information from a Bandwidth inbound message callback and initiates a call | def handle_inbound_sms_call_me(to, from_):
handle_call_me(to, from_) | [
"def incoming_call(self, call):\n self.active_call = call\n self.ringer.play_ringer()",
"def _handleMessage(self):\r\n call = self._onBeforeCall()\r\n ## execute incoming RPC\r\n d = maybeDeferred(self._callProcedure, call)\r\n ## register callback and errback with extra argument call\r\n d.addCallbacks(self._onAfterCallSuccess,\r\n self._onAfterCallError,\r\n callbackArgs = (call,),\r\n errbackArgs = (call,))",
"def ProcessCallback(self, interface, info):\n pass",
"def whenReadReady(self, channel, call):",
"def onAfterSendCallSuccess(self, msg, call):\r\n pass",
"def handle_incoming(number):\n # Respond via text\n created = update_user(number, call_time=True)\n if created:\n msg = twilio_interface.Message(number, ONBOARDING_MSG)\n else:\n msg = twilio_interface.Message(number, CALL_RESPONSE)\n msg.send()\n\n # Call somebody\n recipients = get_recipient_list(number)\n for number in recipients:\n # TODO: Use callbacks to only update the call times of people who actually get called\n update_user(number, call_time=True)\n\n flavor = random.choice(FLAVOR)\n\n return twilio_interface.dial(recipients, flavor)",
"def on_incoming_call(self, call):\n\n try:\n current_time = time.time()\n remote_uri = hash_remote_uri(self.cfg, call.info().remote_uri)\n\n if not self.cfg['VoipIO']['reject_calls']:\n if self.voipio.black_list[get_user_from_uri(remote_uri)] < current_time:\n # answer the call\n self.voipio.call = call\n self.voipio.on_incoming_call(remote_uri)\n\n if self.cfg['VoipIO']['debug']:\n self.cfg['Logging']['system_logger'].debug(\"AccountCallback::on_incoming_call - Incoming call from %s\" % remote_uri)\n\n call_cb = CallCallback(self.cfg, call, self.voipio)\n call.set_callback(call_cb)\n\n call.answer()\n else:\n # rejected the call since the caller is blacklisted\n if self.cfg['VoipIO']['debug']:\n self.cfg['Logging']['system_logger'].debug(\"AccountCallback::on_incoming_call - Rejected call from blacklisted remote URI %s \" % remote_uri)\n wait_hours = (self.voipio.black_list[get_user_from_uri(remote_uri)] - current_time) / (60 * 60)\n self.cfg['Logging']['system_logger'].debug(\"AccountCallback::on_incoming_call - Must wait for %d hours\" % wait_hours)\n # respond by \"Busy here\"\n call.answer(486)\n\n self.voipio.on_rejected_call_from_blacklisted_uri(remote_uri)\n else:\n # reject the call since all calls must be rejected\n if self.cfg['VoipIO']['debug']:\n self.cfg['Logging']['system_logger'].debug(\"AccountCallback::on_incoming_call - Rejected call from %s\" % remote_uri)\n\n # respond by \"Busy here\"\n call.answer(486)\n # respond by \"Decline\"\n #call.answer(603)\n\n self.voipio.on_rejected_call(remote_uri)\n except:\n self.voipio.close_event.set()\n self.cfg['Logging']['system_logger'].exception('Uncaught exception in the AccountCallback class.')\n raise",
"def on_call_update(self, event):\n # if plivo_app != 'true', check b leg Dial callback\n plivo_app_flag = event['variable_plivo_app'] == 'true'\n if not plivo_app_flag:\n # request Dial callbackUrl if needed\n aleg_uuid = event['Bridged-To']\n if not aleg_uuid:\n return\n bleg_uuid = event['Unique-ID']\n if not bleg_uuid:\n return\n disposition = event['variable_endpoint_disposition']\n if disposition != 'ANSWER':\n return\n ck_url = event['variable_plivo_dial_callback_url']\n if not ck_url:\n return\n ck_method = event['variable_plivo_dial_callback_method']\n if not ck_method:\n return\n params = {'DialBLegUUID': bleg_uuid,\n 'DialALegUUID': aleg_uuid,\n 'DialBLegStatus': 'answer',\n 'CallUUID': aleg_uuid\n }\n # add extra params\n extra_params = self.get_extra_fs_vars(event)\n if extra_params:\n params.update(extra_params)\n spawn_raw(self.send_to_url, ck_url, params, ck_method)\n return",
"async def asterisk_init(request):\n\n try:\n phone = request.rel_url.query[\"phone\"]\n except KeyError:\n phone = None\n LOGGER.error(f\"No 'phone' parameter passed on: '{request.rel_url}'\")\n raise web.HTTPClientError(\n reason=ASTERISK_CALL_ERROR, body=None, text=None, content_type=None\n )\n try:\n message = request.rel_url.query[\"message\"]\n except KeyError:\n message = None\n LOGGER.error(f\"No 'message' parameter passed on: '{request.rel_url}'\")\n raise web.HTTPClientError(\n reason=ASTERISK_CALL_ERROR, body=None, text=None, content_type=None\n )\n\n # Prepare the URL to 'call' the Asterisk ARI\n asterisk_query_string = (\n f\"endpoint={ASTERISK_CHAN_TYPE}/{phone}&extension={ASTERISK_EXTENSION}\"\n + f\"&context={ASTERISK_CONTEXT}&callerId={ASTERISK_CALLERID}\"\n )\n asterisk_call_init = (\n f\"{ASTERISK_URL}/{ASTERISK_ARI_CHANNELS}?{asterisk_query_string}\"\n )\n # Place a call on the Asterisk system using HTTP Basic Auth on the PBX\n headers = await gen_headers(await gen_auth_string())\n\n try:\n session = ClientSession(timeout=CLIENT_TIMEOUT_TOTAL)\n call_resp = await session.post(\n url=asterisk_call_init, data=None, headers=headers\n )\n await session.close()\n if call_resp.status == 200:\n response_data = await call_resp.json()\n asterisk_chan = response_data[\"id\"]\n session = ClientSession(timeout=CLIENT_TIMEOUT_TOTAL)\n await session.post(\n url=CALL_REGISTER_URL\n + f\"/{CALL_REGISTER_APP_ROUTE_REGISTER_CALL}\"\n + f\"?phone={phone}&message={message}&asterisk_chan={asterisk_chan}\",\n data=None,\n headers=headers,\n )\n await session.close()\n else:\n LOGGER.error(\n f\"Asterisk server '{ASTERISK_URL}' response: {call_resp.status}. Unable to initialize the call.\"\n )\n\n except client_exceptions.ClientConnectorError as e:\n LOGGER.error(f\"Unable to connect to the Asterisk system: '{e}'\")\n raise web.HTTPClientError(\n reason=str(e), body=None, text=None, content_type=None\n )\n\n return web.json_response({\"status\": call_resp.status})",
"def main(msg: func.ServiceBusMessage):\r\n\r\n # Extract the method into a dictionary\r\n msg_dict = json.loads(msg.get_body().decode(\"utf-8\"))\r\n\r\n logging.info(f\"Python ServiceBus queue trigger processed message: {msg_dict}\")\r\n\r\n # Enable a connection with the IoT Hub. The connectionstring for the IoT Hub\r\n # is preloaded in the Azure Functions configurations.\r\n connectino_string_iothub = os.getenv(\"connectionStringIotHub\")\r\n registry_manager = IoTHubRegistryManager(connectino_string_iothub)\r\n\r\n # Settings for the method that the IoT Device should run upon receiving the message.\r\n callback_method = \"start_fan\"\r\n callback_payload = {}\r\n device_method = CloudToDeviceMethod(\r\n method_name=callback_method, payload=callback_payload\r\n )\r\n\r\n # Sending the actual cloud-to-device message and invoke a function on the IoT device.\r\n device_id = msg_dict[\"IoTHub\"][\"ConnectionDeviceId\"]\r\n response = registry_manager.invoke_device_method(device_id, device_method)\r\n\r\n print(\"\")\r\n print(\"Device Method called\")\r\n print(\"Device Method name : {0}\".format(callback_method))\r\n print(\"Device Method payload : {0}\".format(callback_payload))\r\n print(\"\")\r\n print(\"Response status : {0}\".format(response.status))\r\n print(\"Response payload : {0}\".format(response.payload))",
"def call(self, callee: \"SIPPhoneTemplate\") -> None:",
"def _initiate(self, call):\n if not self.gsm_call:\n raise Exception(\"No connectivity\")\n number = str(call.number)\n logger.info(\"initiate call to %s\", number)\n call_id = yield WaitDBus(self.gsm_call.Initiate, number, \"voice\")\n call_id = int(call_id)\n logger.info(\"call id : %d\", call_id)\n self.lines[call_id] = call\n # TODO: mabe not good idea to store this in the call itself,\n # beside, it makes pylint upset.\n call.__id = call_id",
"def polling_call(self) -> global___Snippet.ClientCall:",
"def subscribe(self, handler):",
"def place_call(self, number):\n call_params = urllib.urlencode({\n 'outgoingNumber' : number,\n 'forwardingNumber' : self.forwarding_number,\n 'subscriberNumber' : 'undefined',\n 'remember' : '0',\n 'phoneType' : self.phone_type,\n '_rnr_se': self.key\n })\n\n # Send the text, display status message \n self.response = self.opener.open(self.call_url, call_params).read()",
"def handle_call(self):\n call_socket, address = self.call_socket.accept()\n print(\"connected call socket: {}\".format(call_socket))\n # gets name of user making the call:\n caller_name = self.receive_mes(call_socket)\n # gets from calling client user they want to call:\n receiver_name = self.receive_mes(call_socket)\n # gets receivers socket from dictionary\n if receiver_name not in self.client_dict:\n print(\"boi bye\")\n sys.exit(EXIT)\n receiver_sock = self.client_dict[receiver_name]\n mes = \"{} is calling you\".format(caller_name)\n self.send_mes(mes.encode(), receiver_sock)\n answer = self.receive_mes(receiver_sock)\n print(\"answer from {}: {}\".format(receiver_name, answer))\n if answer == \"Y\":\n self.send_mes(\"call\".encode(), call_socket)\n self.start_call()\n else:\n self.send_mes(\"no call\".encode(), call_socket)",
"def get_listener(self, loc, handle_comm, deserialize, **connection_args):",
"def handle_call(parameters):\n call_sid = parameters[\"CallSid\"]\n _from = parameters[\"From\"]\n to = parameters[\"To\"]\n call_status = parameters[\"CallStatus\"]\n direction = parameters[\"Direction\"]\n forwarded_from = parameters.get(\"ForwardedFrom\",None)\n parent_call_sid = parameters.get(\"ParentCallSid\",None)\n digits = parameters.get(\"Digits\",None)\n error_tuple = (parameters.get(\"ErrorCode\",\"0\"),parameters.get(\"ErrorMessage\",\"\"))\n return call_handler(call_sid,_from,to,call_status,error_tuple,direction,forwarded_from,parent_call_sid,digits)",
"def call(self):\n for cb in self.callbacks:\n if len(cb[1]) > 0:\n cb[0](*cb[1])\n else:\n cb[0]()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A method for showing how to handle Bandwidth messaging callbacks. For inbound SMS that contains the phrase "call me", a phone call is made and the user is asked to forward the call to another number For inbound SMS that doesn't contain the phrase "call me", the response is a SMS with the date and time. For inbound MMS with a media attachment, the response is the same media attachment sent through Bandwidth's media resource. For all other events, the callback is logged to console | def handle_inbound_message():
data = json.loads(request.data)
if data[0]["type"] == "message-received":
if "call me" in data[0]["message"]["text"]:
handle_inbound_sms_call_me(data[0]["message"]["to"][0], data[0]["message"]["from"])
elif "media" in data[0]["message"]:
handle_inbound_media_mms(data[0]["message"]["to"][0], data[0]["message"]["from"], data[0]["message"]["media"])
else:
handle_inbound_sms(data[0]["message"]["to"][0], data[0]["message"]["from"])
else:
print(data)
return "" | [
"def handle_inbound_sms_call_me(to, from_):\n handle_call_me(to, from_)",
"def handleSMS(self, callerID, message, node):",
"def handle_inbound_media_mms(to, from_, media):\n downloaded_media_files = download_media_from_bandwidth(media)\n upload_media_to_bandwidth(downloaded_media_files)\n remove_files(downloaded_media_files)\n body = MessageRequest()\n body.application_id = MESSAGING_APPLICATION_ID\n body.to = [from_]\n body.mfrom = to\n body.text = \"Rebound!\"\n #Build the media URL by taking the media ids (that doubled as the file names) and appending them to\n #the bandwidth media base url\n body.media = [BANDWIDTH_MEDIA_BASE_ENDPOINT + media_file for media_file in downloaded_media_files]\n try:\n messaging_client.create_message(MESSAGING_ACCOUNT_ID, body)\n except Exception as e:\n print(e)\n return None",
"def incoming_sms():\n txt = request.form['Body']\n\n # remove leading and trailing white space and make lowercase\n txt = txt.strip()\n txt = txt.lower()\n\n # handle random searches differently than breed searches\n if txt == 'random' or txt == 'dog':\n url = get_dogs.get_random_dog()\n else:\n url = get_dogs.request_breed(txt)\n \n resp = MessagingResponse()\n if url:\n resp.message(url)\n else:\n resp.message(\"Sorry! We couldn't find a dog matching that query. Please try \\\n a more general search term.\")\n return str(resp)",
"def handle_incoming(number):\n # Respond via text\n created = update_user(number, call_time=True)\n if created:\n msg = twilio_interface.Message(number, ONBOARDING_MSG)\n else:\n msg = twilio_interface.Message(number, CALL_RESPONSE)\n msg.send()\n\n # Call somebody\n recipients = get_recipient_list(number)\n for number in recipients:\n # TODO: Use callbacks to only update the call times of people who actually get called\n update_user(number, call_time=True)\n\n flavor = random.choice(FLAVOR)\n\n return twilio_interface.dial(recipients, flavor)",
"def _handleMessage(self):\r\n call = self._onBeforeCall()\r\n ## execute incoming RPC\r\n d = maybeDeferred(self._callProcedure, call)\r\n ## register callback and errback with extra argument call\r\n d.addCallbacks(self._onAfterCallSuccess,\r\n self._onAfterCallError,\r\n callbackArgs = (call,),\r\n errbackArgs = (call,))",
"def handle_message(parameters):\n message_sid = parameters[\"MessageSid\"]\n _from = parameters[\"From\"]\n to = parameters[\"To\"]\n body = parameters[\"Body\"]\n messaging_service_sid = parameters.get(\"MessagingServiceSid\",\"\")\n num_media = parameters.get(\"NumMedia\",None)\n error_tuple = (parameters.get(\"ErrorCode\",\"0\"),parameters.get(\"ErrorMessage\",\"\"))\n return message_handler(message_sid,_from,to,body,num_media,messaging_service_sid,error_tuple)",
"def on_response_received(self, message):",
"def on_data(self, raw_data):\n data = json.loads(raw_data)\n\n if \"in_reply_to_status_id\" in data:\n status = Status.parse(None, data)\n return self.on_status(status)\n if \"delete\" in data:\n delete = data[\"delete\"][\"status\"]\n return self.on_delete(delete[\"id\"], delete[\"user_id\"])\n if \"disconnect\" in data:\n return self.on_disconnect_message(data[\"disconnect\"])\n if \"limit\" in data:\n return self.on_limit(data[\"limit\"][\"track\"])\n if \"scrub_geo\" in data:\n return self.on_scrub_geo(data[\"scrub_geo\"])\n if \"status_withheld\" in data:\n return self.on_status_withheld(data[\"status_withheld\"])\n if \"user_withheld\" in data:\n return self.on_user_withheld(data[\"user_withheld\"])\n if \"warning\" in data:\n return self.on_warning(data[\"warning\"])\n\n log.error(\"Received unknown message type: %s\", raw_data)",
"def _handle_custom_msg(self, content, buffers):\n self._msg_callbacks(self, content, buffers)",
"def sms_notification(request, source):\n\n # call provider factory based on slug\n source = source.lower()\n try:\n provider = get_service_provider(slug=source)\n except Exception, e:\n log.critical(e)\n raise Http404()\n\n # do a simple IP check\n ip = request.META['REMOTE_ADDR']\n\n if not provider.is_ip_allowed(ip):\n log.warn(\"Illegal call from %s\" % ip)\n raise Http404()\n\n log.info(\"Got request notification from %s\" % source)\n\n # extract message data\n try:\n msisdn, text, number = provider.get_primal_data(request.GET)\n log.debug(\"%s %s %s\" % (msisdn, text, number))\n except Exception, e:\n return HttpResponse(provider.handle_notification_error(e, request))\n\n log.debug(\"%s Request input: msisdn:%s, text:%s, number:%s\" % \\\n (source, msisdn, text, number))\n\n # collect purchase data, send success signal and say thanks to your\n # notification service\n la = provider.get_large_account(la_number = number, text = text)\n provider.dispatch_purchase(la = la, msisdn = msisdn, text = text)\n return HttpResponse(provider.NOTIFICATION_REPLY)",
"def master_callback(self, hermes, intent_message):\n coming_intent = intent_message.intent.intent_name\n if coming_intent == self.i18n.INTENT_FLIP_COIN:\n self.flip_coin_callback(hermes, intent_message)\n elif coming_intent == self.i18n.INTENT_RANDOM_DATE:\n self.random_date_callback(hermes, intent_message)\n elif coming_intent == self.i18n.INTENT_RANDOM_NUMBER:\n self.random_number_callback(hermes, intent_message)\n elif coming_intent == self.i18n.INTENT_ROLL_DICE:\n self.roll_dice_callback(hermes, intent_message)",
"def send_callback(context):\n publish_next_media_to_channel(context, chat_id=context.job.context)",
"def CallDtmfReceived(self, Call, Code):",
"def handle_send_messages():\n items = {k: v for k, v in subscribers.items() if v}\n for key in items:\n subscriber_obj = items[key]\n sim_id = get_sim_id(subscriber_obj)\n if sim_id and type(sim_id) is int:\n frame_messenger(subscriber_obj)\n elif sim_id and sim_id == \"live\":\n live_messenger(subscriber_obj)",
"def callback(request):\n \n logger.debug('Got callback from Link Mobile: %(raw_get_data)s\\n%(meta)s' % {\n 'raw_get_data': request.GET,\n 'meta': request.META,\n })\n \n try:\n ack = AckMessage()\n ack.batchid = request.GET.get('BatchID', '')\n ack.msisdn = request.GET.get('Msisdn', '')\n ack.price = request.GET.get('Price', '')\n ack.operator = request.GET.get('Operator', '')\n ack.messageid = request.GET.get('MessageID', '')\n ack.parts = request.GET.get('Parts', '')\n ack.statuscode = request.GET.get('StatusCode', '')\n ack.substatuscodes = request.GET.get('SubStatusCodes', '')\n ack.save()\n except:\n logger.exception('Could not save link mobile ACK message with BatchID %(batchid)s, MessageID %(msgid)s. Returning NOK.' % {\n 'batchid': request.GET.get('BatchID', ''),\n 'msgid': request.GET.get('MessageID', '')\n })\n return HttpResponse('NOK', content_type='text/plain')\n \n return HttpResponse('OK', content_type='text/plain')",
"def message_handler(payload):\n\tdevice_id = payload['deviceId']\n\tevent_name = payload['eventName']\n\tif event_name == 'EnteredDanger':\n\t\tprint \"{device_id} in Danger status\".format(device_id=device_id)\n\t\trelay.switch_on()\n\telif event_name == 'EnteredComfort':\n\t\tprint \"{device_id} in Comfort status\".format(device_id=device_id)\n\t\trelay.switch_off()\n\telse:\n\t\tprint \"{device_id} has generated an event of type {event_name} that is out of scope\".format(\n\t\t\tdevice_id=device_id,\n\t\t\tevent_name=event_name,\n\t\t)",
"def on_media_state(self):\n try:\n if self.call.info().media_state == pj.MediaState.ACTIVE:\n if self.cfg['VoipIO']['debug']:\n self.system_logger.debug(\"CallCallback::on_media_state : Media is now active\")\n else:\n if self.cfg['VoipIO']['debug']:\n self.system_logger.debug(\"CallCallback::on_media_state : Media is inactive\")\n except:\n self.voipio.close_event.set()\n self.cfg['Logging']['system_logger'].exception('Uncaught exception in the CallCallback class.')\n raise",
"def MessageHandlerMethod(**kwargs):\n data: dict = kwargs['data']\n bus: AbstractPikaBus = kwargs['bus']\n payload: dict = kwargs['payload']\n print(payload)\n if payload['reply']:\n payload['reply'] = False\n bus.Reply(payload=payload)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats |record| with color. | def format(self, record):
msg = super(ColoredFormatter, self).format(record)
color = self._COLOR_MAPPING.get(record.levelname)
if self._use_colors and color:
msg = '%s%s%s' % (color, msg, self._RESET)
return msg | [
"def format(self, record):\n\t\tif self.color:\n\t\t\ttry:\n\t\t\t\tcat = getattr(record, self.CATEGORY, None)\n\t\t\t\tif not cat:\n\t\t\t\t\tif record.levelname == 'WARN': cat = LOG_WARN\n\t\t\t\t\telif record.levelname == 'ERROR': cat = LOG_ERROR\n\t\t\t\t\telif record.levelname == 'DEBUG': cat = LOG_DEBUG\n\t\t\t\tif cat:\n\t\t\t\t\tcat = cat.lower()\n\t\t\t\t\trecord = copy.copy(record)\n\t\t\t\t\tindexes = getattr(record, self.ARG_INDEX, None)\n\t\t\t\t\tif indexes == None:\n\t\t\t\t\t\trecord.msg = self.colorCategoryToEscapeSequence(cat)+record.msg+self.colorCategoryToEscapeSequence(LOG_END)\n\t\t\t\t\telse:\n\t\t\t\t\t\targs = list(record.args)\n\t\t\t\t\t\tfor index in indexes: args[index] = self.formatArg(cat, args[index])\n\t\t\t\t\t\trecord.args = tuple(args)\n\t\t\t\t\t\n\t\t\texcept Exception as e:\n\t\t\t\tlogging.getLogger('pysys.utils.logutils').debug('Failed to format log message \"%s\": %s'%(record.msg, repr(e)))\n\n\t\treturn super(ColorLogFormatter, self).format(record)",
"def format(self, record):\n message = record.getMessage()\n asctime = self.formatTime(record, self.datefmt)\n name = yellow(record.name)\n\n s = \"%(timestamp)s %(levelname)s %(name)s \" % {\n \"timestamp\": green(\"%s,%03d\" % (asctime, record.msecs), bold=True),\n \"levelname\": self.LEVELS[record.levelname],\n \"name\": name,\n }\n\n if \"\\n\" in message:\n indent_length = len(re_color_codes.sub(\"\", s))\n message = message.replace(\"\\n\", \"\\n\" + \" \" * indent_length)\n\n s += message\n return s",
"def format(self, record) -> object:\n record.message = record.getMessage()\n level = record.levelname\n\n if self._use_color and level in self.COLORS:\n msg_color = self.COLOR_SEQ % (self.COLORS[level]) + record.message + self.RESET_SEQ\n record.message = msg_color\n\n if self.usesTime():\n record.asctime = self.formatTime(record, self.datefmt)\n\n s = self.formatMessage(record)\n\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n\n if record.exc_text:\n if s[-1:] != \"\\n\":\n s += \"\\n\"\n s = s + record.exc_text\n\n if record.stack_info:\n if s[-1:] != \"\\n\":\n s += \"\\n\"\n s = s + self.formatStack(record.stack_info)\n\n return s",
"def colorize_traceback(self, formatter, record):\n\t\tif record.exc_info:\n\t\t\t# Cache the traceback text to avoid converting it multiple times\n\t\t\t# (it's constant anyway)\n\t\t\trecord.exc_text = \"\".join([\n\t\t\t\tself.get_color(\"red\"),\n\t\t\t\tformatter.formatException(record.exc_info),\n\t\t\t\tself.reset,\n\t\t\t])",
"def format(self, record):\n mappings = {\n 'asctime': create_timestamp,\n 'message': lambda r: r.msg,\n }\n\n formatters = self.parse()\n\n log_record = {}\n for formatter in formatters:\n try:\n log_record[formatter] = mappings[formatter](record)\n except KeyError:\n log_record[formatter] = record.__dict__[formatter]\n\n return json.dumps(log_record)",
"def format(self, record):\n extra = {\n \"message\": record.getMessage(),\n \"time\": self.formatTime(record, self.datefmt),\n \"msecs\": record.msecs,\n \"name\": record.name,\n \"level\": record.levelname,\n }\n\n keys = filter(self.filterer, record.__dict__)\n extra.update({k: record.__dict__[k] for k in keys})\n return str(CustomEncoder().encode(extra))",
"def format(self, record):\r\n # Decode the message to support non-ascii characters\r\n # We must choose the charset manually\r\n for record_charset in 'UTF-8', 'US-ASCII', 'ISO-8859-1':\r\n try:\r\n record.message = record.message.decode(record_charset)\r\n self.encoding = record_charset\r\n except UnicodeError:\r\n pass\r\n else:\r\n break\r\n \r\n if self.formatter is None:\r\n return record.message\r\n return self.formatter(record, self)",
"def format(self, record):\n json_dict = dict(msg=record.msg, level=record.levelname)\n formatted_message = ' ' + json.dumps(json_dict)\n\n if self._is_first_line:\n self._is_first_line = False\n return formatted_message\n\n return ', ' + formatted_message",
"def format(self, record):\n # Add our custon fields first.\n if(not hasattr(record, 'hostname')):\n record.hostname = socket.gethostname()\n if(not hasattr(record, 'datetime')):\n record.datetime = datetime.datetime.fromtimestamp(record.created)\n if(record.exc_info):\n df_fmtr = logging._defaultFormatter\n record.exception = df_fmtr.formatException(record.exc_info)\n else:\n record.exception = None\n\n # Now call super.format() to make sure everything is kosher.\n super(DatabaseHandler, self).format(record)\n\n # Then massage the standsrd ones.\n if(len(record.asctime) >= 4 and record.asctime[-4] == ','):\n record.asctime = record.asctime[:-4] + '.' + record.asctime[-3:]\n\n # Turn every string into unicode.\n for attr in ('asctime',\n 'hostname',\n 'filename',\n 'funcName',\n 'levelname',\n 'levelno',\n 'module',\n 'message',\n 'name',\n 'pathname',\n 'processName',\n 'threadName',\n 'exception'):\n value = getattr(record, attr)\n if(value is not None and isinstance(value, str)):\n setattr(record, attr, unicode(value))\n return",
"def emit(self, record):\n try:\n message = self.format(record)\n\n if not self.is_colorized:\n self.stream.write(message)\n else:\n style = self._get_style_function_for_level(record.levelno)\n self.stream.write(style(message))\n\n self.stream.write(getattr(self, 'terminator', '\\n'))\n self.flush()\n\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)",
"def format(self, record):\n record.msg = json.dumps(record.msg)[1:-1]\n return super().format(record)",
"def format(self, record, *args, **kwargs):\r\n return logging.Formatter.format(\r\n self, record, *args, **kwargs).replace('\\n', '\\n' + ' ' * 8)",
"def cformat(self, *args, **kwargs):\n kwargs = kwargs.copy()\n kwargs.setdefault(\"color\", None)\n return self.format(*args, **kwargs)",
"def format(self, record):\n record.message = indent_string(record.getMessage())\n if \"%(asctime)\" in self._fmt:\n record.asctime = self.formatTime(record, self.datefmt)\n s = self._fmt % record.__dict__\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if s[-1:] != \"\\n\":\n s = s + \"\\n\"\n s = \"{0} Exception:\\n {1}\".format(s, indent_string(record.exc_text))\n return s",
"def format(self, record):\n return {\n 'timestamp': int(float(record.created or time.time()) * 1000),\n 'message': record.msg or '',\n 'level': record.levelname or 'DEBUG'\n }",
"def record_format(self, record_format):\n self._record_format = record_format",
"def emit(self, record):\n # Need to make a actual copy of the record\n # to prevent altering the message for other loggers\n myrecord = copy.copy(record)\n levelno = myrecord.levelno\n if levelno >= 50: # CRITICAL / FATAL\n front = '\\033[30;41m' # black/red\n elif levelno >= 40: # ERROR\n front = '\\033[30;41m' # black/red\n elif levelno >= 30: # WARNING\n front = '\\033[30;43m' # black/yellow\n elif levelno >= 20: # INFO\n front = '\\033[30;42m' # black/green\n elif levelno >= 10: # DEBUG\n front = '\\033[30;46m' # black/cyan\n else: # NOTSET and anything else\n front = '\\033[0m' # normal\n\n myrecord.levelname = '%s%s\\033[0m' % (front, myrecord.levelname)\n logging.StreamHandler.emit(self, myrecord)",
"def get_formatted_record(self, record_format: str = None) -> str:\n if record_format:\n return record_format.format_map(defaultdict(str, **self.dict_values))\n raise RecordFormatError(\"Format string must be set\")",
"def format(self, record):\n record.message = record.getMessage()\n if self.usesTime():\n record.asctime = self.formatTime(record, self.datefmt)\n try:\n s = self._fmt % record.__dict__\n except UnicodeDecodeError as e:\n # Issue 25664. The logger name may be Unicode. Try again ...\n try:\n record.name = record.name.decode('utf-8')\n s = self._fmt % record.__dict__\n except UnicodeDecodeError:\n raise e\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if s[-1:] != \"\\n\":\n s = s + \"\\n\"\n try:\n s = s + record.exc_text\n except UnicodeError:\n # Sometimes filenames have non-ASCII chars, which can lead\n # to errors when s is Unicode and record.exc_text is str\n # See issue 8924.\n # We also use replace for when there are multiple\n # encodings, e.g. UTF-8 for the filesystem and latin-1\n # for a script. See issue 13232.\n s = s + record.exc_text.decode(sys.path.getfilesystemencoding(),\n 'replace')\n if re.findall(r\"u'\\\\u\", s):\n s = s.encode('utf-8').decode('unicode_escape')\n\n return s"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Always symlink |path| to a relativized |target|. | def symlink(target, path):
unlink(path)
path = os.path.realpath(path)
target = os.path.relpath(os.path.realpath(target), os.path.dirname(path))
logging.info('Symlinking %s -> %s', path, target)
os.symlink(target, path) | [
"def symlink(self, path, target, *args, **kwargs): # pragma: no cover",
"def symlink_to(self, target, target_is_directory=False):\n if self._closed:\n self._raise_closed()\n self._accessor.symlink(target, self, target_is_directory)",
"def symlink(source, link_name, target_is_directory=False, dir_fd=None):\n pass",
"def attempt_symlink_to(path: str, to_path: str) -> None:\n try:\n Path(path).symlink_to(Path(to_path))\n except OSError:\n pytest.skip(\"could not create symbolic link\")",
"def symlink_target(pth):\n\n if os.path.islink(pth):\n return os.readlink(pth)\n return pth",
"def link(target, link):\n assert isinstance(target, str)\n assert os.path.exists(target)\n assert isinstance(link, str)\n\n # Create the path to the link if it does not exists\n abs_path = os.path.dirname(os.path.abspath(link))\n if not os.path.isdir(abs_path):\n os.makedirs(abs_path)\n\n # Make sure the file or folder recursively has the good mode\n chmod(target)\n\n # Create the link to target\n os.symlink(target, link)",
"def link_file(source, target):\n try:\n os.symlink(source, target)\n except AttributeError:\n try:\n os.link(source, target)\n except AttributeError:\n copy_file(source, target)",
"def symlink(self, newlink):\n os.symlink(self, newlink)",
"def link_to(self, path_prefix):\n dest_link_path = get_destination_path(\n source=self.path(),\n dest=path_prefix,\n extension=self.file_extension())\n if not os.path.exists(dest_link_path):\n parent, _ = os.path.split(path_prefix)\n if parent and not os.path.exists(parent):\n os.makedirs(parent)\n try:\n os.link(self._path, dest_link_path)\n except os.error:\n relative_src_path = os.path.join(\n os.path.relpath(\n os.path.dirname(self._path),\n os.path.dirname(dest_link_path)),\n os.path.basename(self._path))\n os.symlink(relative_src_path, dest_link_path)",
"def _makelink(self, name, target):\n name = os.path.join(self.working_dir, name)\n target = os.path.join(self.working_dir, target)\n if not os.path.isdir(os.path.dirname(name)):\n os.makedirs(os.path.dirname(name))\n os.symlink(target,name)",
"def _create_link_(l, newpath=None):\n _l = pathlib.Path(l)\n # log.debug(\"make_links:_l:{}, newpath: {}\".format(_l, newpath))\n if _l.is_symlink():\n pass\n else:\n if _l.is_dir():\n if newpath is None:\n newpath = _l.joinpath(_l.parent, '.'+_l.name+'-'+timestamp.ts())\n # log.debug(\"oldpath: {} and newpath: {}\".format(_l, newpath))\n # log.debug(\"os.path.basename(newpath): {}\".format(os.path.basename(newpath)))\n _l.rename(newpath)\n _l.symlink_to(os.path.basename(newpath))\n else:\n # log.debug(\"oldpath: {} and newpath: {}\".format(_l, newpath))\n _l.symlink_to(newpath)",
"def makeLinks(self, source, target):\n\n if os.path.exists(target): os.unlink(target)\n os.symlink(source, target)",
"def link(self):\n\n if self.path_source is not None:\n full_source_path = os.path.join(\n os.path.expandvars(self.path_source), self.name\n )\n full_destination_path = os.path.join(\n os.path.expandvars(self.path_destination), self.name\n )\n\n try:\n if self.sudo:\n spawn.process(\n f'ln -sfv \"{full_source_path}\" \"{full_destination_path}\"',\n sudo=True,\n )\n else:\n os.symlink(full_source_path, full_destination_path)\n except FileExistsError:\n message.error(\n \"Can't symlink, file already exists at destination. Attempting fix.\"\n )\n os.remove(full_destination_path)\n message.info(f\"Removed: '{full_destination_path}'\")\n os.symlink(full_source_path, full_destination_path)\n finally:\n message.info(\n f\"Symlink created: '{full_source_path}' <--> '{full_destination_path}'\"\n )\n else:\n message.error(\n f\"'{self.name}' has no source from which to create a link from.\"\n )",
"def hardlink(self, newpath):\n os.link(unicode(self), unicode(newpath))",
"def relative_link(src, dst):\n src_dir = os.path.dirname(src)\n rel_path = os.path.relpath(dst, src_dir)\n os.symlink(rel_path, src)",
"def symlink(self, dst):\r\n raise NotImplementedError()",
"def _link_destination(self, path):\n path = os.path.expanduser(path)\n path = os.readlink(path)\n if sys.platform[:5] == \"win32\" and path.startswith(\"\\\\\\\\?\\\\\"):\n path = path[4:]\n return path",
"def create_symlink(self, source_path, dest_path):\n raise NotImplementedError()",
"def _symlink(source, link_name):\n flags = 0\n\n if source is not None and os.path.isdir(source):\n flags = 1\n\n CreateSymbolicLinkW(link_name, source, flags)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return sha256 hex digest of |path|. | def sha256(path: Union[Path, str]) -> str:
# The file shouldn't be too big to load into memory, so be lazy.
with open(path, 'rb') as fp:
data = fp.read()
m = hashlib.sha256()
m.update(data)
return m.hexdigest() | [
"def hex_hash(path):\n\n return hashlib.md5(path).hexdigest()[:2]",
"def hexhash(path):\n\n m = hashlib.md5()\n with open(path) as handle:\n for line in handle:\n m.update(line)\n return m.hexdigest()",
"def _checksum_sha256(file_path):\n sha256_hash = hashlib.sha256()\n chunk_size = 4096\n with open(file_path, \"rb\") as f:\n # Read and update hash string value in blocks of 4K\n while True:\n buffer = f.read(chunk_size)\n if not buffer:\n break\n sha256_hash.update(buffer)\n return sha256_hash.hexdigest()",
"def get_path_hash(root_path):\n return hashlib.sha224(root_path.encode('ascii')).hexdigest()",
"def _get_file_sha256_hash(file_path):\n sha256hash = hashlib.sha256()\n chunk_size = 8192\n with open(file_path, \"rb\") as f:\n while True:\n buffer = f.read(chunk_size)\n if not buffer:\n break\n sha256hash.update(buffer)\n return sha256hash.hexdigest()",
"def sha256sum(file_path):\n from hashlib import sha256\n hash = sha256()\n with open(file_path, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b''):\n hash.update(chunk)\n return hash.hexdigest()",
"def hash_file(path):\n hasher = hashlib.sha256()\n with open(path, 'rb') as f:\n buffer = f.read(BLOCK_SIZE)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = f.read(BLOCK_SIZE)\n return base64.urlsafe_b64encode(hasher.digest()[:12]).decode('utf-8')",
"def checksum(path):\n with open(path, 'r') as f:\n return md5(f.read()).digest()",
"def get_file_content_hash(file_path):\n with open(file_path) as content:\n hasher = hashlib.sha256()\n hasher.update(content.read())\n return hasher.hexdigest()",
"def file_digest(path, algo=hashlib.md5):\n checksum = algo()\n with open(path, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n checksum.update(chunk)\n return checksum.hexdigest()",
"def calculate_file_hash(fpath: pathlib.Path) -> str:\n BLOCKSIZE = 65536\n hasher = hashlib.sha256()\n with open(fpath, \"rb\") as f:\n buf = f.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = f.read(BLOCKSIZE)\n\n return hasher.hexdigest()",
"def hash_file_sha256(file_path, binary=False, buffer_size=65536):\n return hash_file(file_path, hash_type=hashlib.sha256, binary=binary, buffer_size=buffer_size)",
"def sha256(content):\n content = content.encode('utf-8')\n return hashlib.sha256(content).hexdigest()",
"def hash_file(path, digest=None):\r\n digest = digest or hashlib.sha1()\r\n with open(path, 'rb') as fd:\r\n s = fd.read(8192)\r\n while s:\r\n digest.update(s)\r\n s = fd.read(8192)\r\n return digest.hexdigest()",
"def hash_file(pathname):\n h = hashlib.sha256()\n with open(pathname, 'rb') as ifile:\n h.update(ifile.read())\n return h.digest()",
"def file_checksum(path):\n\n with abort_if_file_changes_during_read(path):\n m = hash_implementation()\n\n with open(path, 'rb') as f:\n for chunk in read_in_chunks(f, io.DEFAULT_BUFFER_SIZE):\n m.update(chunk)\n\n return m.hexdigest()",
"def _hash_file_content(self, path):\n hasher = hashlib.sha1()\n with open(path, 'rb') as file:\n buffer = file.read(self.hash_block_size)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = file.read(self.hash_block_size)\n return hasher.hexdigest()",
"def sha256(cls, value):\n assert type(value) is str\n return int(sha256(value.encode()).hexdigest(), 16)",
"def symlink_hash(path):\n hasher = sha1()\n data = path_to_bytes(os.readlink(path))\n hasher.update(('blob %u\\0' % len(data)).encode('ascii'))\n hasher.update(data)\n return hasher"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unpack |archive| into |cwd|. | def unpack(archive: Union[Path, str],
cwd: Optional[Path] = None,
files: Optional[List[Union[Path, str]]] = ()):
archive = Path(archive)
if cwd is None:
cwd = Path.cwd()
if files:
files = ['--'] + list(files)
else:
files = []
# Try to make symlink usage easier in Windows.
extra_env = {
'MSYS': 'winsymlinks:nativestrict',
}
logging.info('Unpacking %s', archive.name)
# We use relpath here to help out tar on platforms where it doesn't like
# paths with colons in them (e.g. Windows). We have to construct the full
# before running through relpath as relative archives will implicitly be
# checked against os.getcwd rather than the explicit cwd.
src = os.path.relpath(cwd / archive, cwd)
run(['tar', '--no-same-owner', '-xf', src] + files, cwd=cwd,
extra_env=extra_env) | [
"def unpack_archive(fname: Union[str, Path], tgt_dir: Union[str, Path]) -> None:\n shutil.unpack_archive(str(fname), str(tgt_dir))",
"def _unzip(archive, dst):\n with zipfile.ZipFile(archive) as zf:\n for zi in zf.infolist():\n if zi.filename[-1] == '/': # skip dir\n continue\n zi.filename = os.path.basename(zi.filename)\n zf.extract(zi, dst)",
"def unpack(self, tarball=None, location=None, use_vhost=True):",
"def unpack_dir(indir, outdir, bands=None, clouds=None):\r\n archives = glob.glob(indir + '*.tar.gz')\r\n count = len(archives)\r\n for idx, archive in enumerate(archives):\r\n # Determine the outpath directory name for the unpacked landsat archive\r\n unpackDir = outdir + os.path.splitext(os.path.split(\r\n os.path.splitext(archive)[0])[1])[0]\r\n\r\n # Check if the directory already exists and make it if it doesn't\r\n if not os.path.exists(unpackDir):\r\n os.makedirs(unpackDir)\r\n\r\n # Unpack the current archive.\r\n unpack_landsat(archive, unpackDir, bands=bands,clouds=clouds)\r\n\r\n # Let the user know how progress is going.\r\n print(archive + ' unpacked (' + str(idx + 1) + ' of ' + str(count) + ')')",
"def unpack_kit_archive(kit_archive_path: str, dest_root_dir: str) -> str:\n meta_dict = get_metadata_from_archive(kit_archive_path)\n\n destdir = os.path.join(\n dest_root_dir,\n 'kit-{}'.format(\n format_kit_descriptor(meta_dict['name'],\n meta_dict['version'],\n meta_dict['iteration'])\n )\n )\n\n if not os.path.exists(destdir):\n os.mkdir(destdir)\n\n logger.debug(\n '[utils.parse()] Unpacking [%s] into [%s]' % (\n kit_archive_path, destdir))\n\n #\n # Extract the file\n #\n cmd = 'tar --extract --bzip2 --strip-components 1 --file {} -C {}'.format(\n kit_archive_path, destdir)\n TortugaSubprocess(cmd).run()\n\n #\n # Remove world write permissions, if any\n #\n cmd = 'chmod -R a-w {}'.format(destdir)\n TortugaSubprocess(cmd).run()\n\n logger.debug(\n '[utils.parse()] Unpacked [%s] into [%s]' % (\n kit_archive_path, destdir))\n\n return destdir",
"def unpack_archive(self, archive_name):\n archive = zipfile.ZipFile(\n os.path.join(\n self.current_path,\n os.path.split(self.exe_file)[0],\n archive_name\n )\n )\n\n self.extraction_path = os.getcwd()\n\n archive.extractall(self.extraction_path)\n\n self.rename_main_script()\n\n archive_pyc_files = []\n\n for path, dirs, files in os.walk(self.extraction_path):\n for f in files:\n archive_pyc_files.append(os.path.join(path, f))\n\n return archive_pyc_files",
"def extract_zip(archive, outdir=None):\n zip = ZipFile(archive, \"r\")\n if outdir is None:\n outdir = os.getcwd()\n zip.extractall(outdir)\n zip.close()",
"def unpack(c):\n c.run(\"tar -xzf raw_data.tar.gz --directory data\")",
"def unpack(backend_name, archive_id):\n backend = get_backend(backend_name)\n click.echo(f\"Retrieving archive {archive_id}\")\n backend.archive_retrieve(config.root_path, archive_id)",
"def _unpack_archive(self, dir, filters):\n ext = os.path.splitext(self.path)[1]\n if ext in [\".zip\", \".xpi\"]:\n if filters:\n raise GbpError(\"Can only filter tar archives: %s\", (ext, self.path))\n self._unpack_zip(dir)\n else:\n self._unpack_tar(dir, filters)",
"def restore_dir(self) -> None:\n # ensure archive file exists and specified directory does not\n assert self.archive_file_path.exists()\n assert not self.path.exists()\n # ensure correct directory structure exists in home directory\n self.path.parent.mkdir(parents=True, exist_ok=True)\n # generate command to extract archive into parent of specified directory\n if platform_os == \"Linux\":\n command = \"7z x -so -p\" + \"\\\"\" + archive_password + \"\\\"\" + \" \" + \"\\\"\" + str(self.archive_file_path) + \"\\\"\"\n command += \" | tar x -C \" + \"\\\"\" + str(self.path.parent) + \"\\\"\"\n elif platform_os == \"Windows\":\n command = \"7z x -o\" + \"\\\"\" + str(self.path.parent) + \"\\\"\" + \" -p\"\n command += \"\\\"\" + archive_password + \"\\\"\" + \" \"\n command += \"\\\"\" + str(self.archive_file_path) + \"\\\"\"\n else:\n raise Exception(\"Platform not recognised!\")\n os.system(command)",
"def unpack():\n clean_remote()\n if exists(env.code_pack_location):\n with cd(env.dev_root):\n sudo('tar xf {0}'.format(env.code_pack_location))",
"def extract_one(self, archive: Path, dest: Path):\n if dest.exists():\n shutil.rmtree(dest)\n\n dest.mkdir(parents=True)\n\n if self.should_use_libarchive_c:\n import libarchive\n\n old_cwd = os.getcwd()\n os.chdir(str(dest))\n try:\n libarchive.extract_file(str(archive))\n finally:\n os.chdir(old_cwd)\n return\n\n if archive.name.endswith(EXTENSION_ZIP):\n with zipfile.ZipFile(archive) as zf:\n zf.extractall(dest)\n elif archive.name.endswith(EXTENSION_TAR):\n mode = \"r:bz2\" if archive.name.endswith(\".bz2\") else \"r:gz\"\n with tarfile.open(archive, mode) as tf:\n self.safe_extract_all(tf, dest)\n else:\n raise ValueError(f\"Unrecognized archive format {archive.name}\")\n\n for path in [dest, *dest.rglob(\"*\")]:\n path.chmod(MOD_DIRECTORY if path.is_dir() else MOD_FILE)",
"def unpack_archive(\n filepath: types.PathLike, *, extract_dir: Optional[types.PathLike] = None\n) -> types.PathLike:\n filepath = utils.to_path(filepath).resolve()\n if not extract_dir:\n extract_dir = str(filepath.parent)\n filepath = str(filepath)\n os.makedirs(extract_dir, exist_ok=True)\n is_zipfile = zipfile.is_zipfile(filepath)\n is_tarfile = tarfile.is_tarfile(filepath)\n if not is_zipfile and not is_tarfile:\n LOGGER.debug(\"'%s' is not an archive\", filepath)\n return extract_dir\n else:\n LOGGER.info(\"extracting data from archive file '%s'\", filepath)\n shutil.unpack_archive(filepath, extract_dir=extract_dir, format=None)\n # we want to rename the unpacked directory to a consistent value\n # unfortunately, shutil doesn't pass this back to us\n # so, we get the root path of all the constituent members\n if is_zipfile:\n with zipfile.ZipFile(filepath, mode=\"r\") as zf:\n members = zf.namelist()\n else:\n with tarfile.open(filepath, mode=\"r\") as tf:\n members = tf.getnames()\n src_basename = os.path.commonpath(members)\n dest_basename = os.path.basename(filepath)\n if src_basename:\n while True:\n tmp, _ = os.path.splitext(dest_basename)\n if tmp == dest_basename:\n break\n else:\n dest_basename = tmp\n if src_basename != dest_basename:\n return shutil.move(\n os.path.join(extract_dir, src_basename),\n os.path.join(extract_dir, dest_basename),\n )\n else:\n return os.path.join(extract_dir, src_basename)\n else:\n return extract_dir",
"def untar(tarball):\n import tarfile\n from time import strftime\n wp_extract_dir = '/tmp/' + strftime(\"%d-%m-%Y\") + '-wp/'\n wp_extracted = wp_extract_dir + 'wordpress/'\n if not path.exists(wp_extracted):\n print('Uncompressing files...')\n wp_tarfile = tarfile.open(tarball)\n wp_tarfile.extractall(wp_extract_dir)\n wp_tarfile.close()\n print('Extraction complete.')\n return wp_extracted # Return path to extracted files",
"def untar(archive):\n log.info('Unpacking archive \"%s\".' % archive)\n tar = module.params['tar']\n tar_extra_options = shlex.split(module.params['tar_extra_options'])\n if not tar:\n tar = module.get_bin_path('tar', required=True)\n if archive.endswith('.gz'):\n uncompress = 'z'\n elif archive.endswith('.bz2'):\n uncompress = 'j'\n else:\n raise ValueError('Unsupported compression type: %s' % archive)\n options = ''.join(['x', uncompress, 'f'])\n args = [tar, options] + tar_extra_options + [archive]\n rc, out, err = module.run_command(args)\n log.info('untar: rc=%d out=%s err=%s', rc, out, err)\n if rc != 0:\n raise ValueError('tar command failed: %d' % rc)",
"def unpack(input_filename, extract_dir):\n if not is_archive_file(input_filename):\n raise AttributeError(\"Input_filename must be an archive (ex: .tar.gz, .zip)\")\n if zipfile.is_zipfile(input_filename):\n unzip(input_filename, extract_dir)\n else:\n untar(input_filename, extract_dir)",
"def extract(archive, item, directory):\n \n d_path = path.join(directory, path.basename(item))\n s_path = get_path(archive, item)\n src = archive.open(s_path, 'r')\n dst = open(d_path, 'wb')\n copyfileobj(src, dst)\n src.close()\n dst.close()\n return d_path",
"def unzip_and_untar(item):\n print(\"Unpacking %s\" % item)\n\n f = tarfile.open(item, mode=\"r\")\n f.extractall(path=\"working\")\n f.close()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create an |archive| with |paths| in |cwd|. The output will use XZ compression. | def pack(archive: Union[Path, str],
paths: List[Union[Path, str]],
cwd: Optional[Path] = None,
exclude: Optional[List[Union[Path, str]]] = ()):
archive = Path(archive)
if cwd is None:
cwd = Path.cwd()
if archive.suffix == '.xz':
archive = archive.with_suffix('')
# Make sure all the paths have sane permissions.
def walk(path):
if path.is_symlink():
return
elif path.is_dir():
# All dirs should be 755.
mode = path.stat().st_mode & 0o777
if mode != 0o755:
path.chmod(0o755)
for subpath in path.glob('*'):
walk(subpath)
elif path.is_file():
# All scripts should be 755 while other files should be 644.
mode = path.stat().st_mode & 0o777
if mode in (0o755, 0o644):
return
if mode & 0o111:
path.chmod(0o755)
else:
path.chmod(0o644)
else:
raise ValueError(f'{path}: unknown file type')
logging.info('Forcing sane permissions on inputs')
for path in paths:
walk(cwd / path)
logging.info('Creating %s tarball', archive.name)
# We use relpath here to help out tar on platforms where it doesn't like
# paths with colons in them (e.g. Windows). We have to construct the full
# before running through relpath as relative archives will implicitly be
# checked against os.getcwd rather than the explicit cwd.
tar = os.path.relpath(cwd / archive, cwd)
run(['tar', '--owner=0', '--group=0', '-cf', tar] +
[f'--exclude={x}' for x in exclude] + ['--'] + paths, cwd=cwd)
logging.info('Compressing tarball')
run(['xz', '-f', '-T0', '-9', tar], cwd=cwd) | [
"def make_archive():\n Utils.delete_if_exist(Path.get_zip_path(True))\n shutil.make_archive(Path.get_zip_path(), 'zip', Utils.reverse_path_if_windows(Path.get_dist_path()))",
"def generate_archive_file(location, paths, environment=None, compression=None, archive_format=None):\n if archive_format == 'zip':\n archive = ZipTarWrapper(location.name, 'w', zipfile.ZIP_DEFLATED)\n else:\n write_type = \"w\"\n if compression:\n write_type = \"w|{0}\".format(compression)\n archive = tarfile.open(location.name, write_type)\n\n # Add all the things to the archive\n for path_spec in paths:\n path_spec.add_to_tar(archive, environment)\n\n # Finish the zip\n archive.close()\n\n return archive",
"def zip_package(paths: List[Path], fp, compression=zipfile.ZIP_DEFLATED):\n\n with zipfile.ZipFile(\n file=fp, mode=\"w\", compression=compression, compresslevel=9\n ) as z:\n for path in paths:\n (local_path, zip_path) = path\n z.write(filename=str(path[0]), arcname=str(path[1]))",
"def create_zip_file(archive_path, files):\n with zipfile.ZipFile(archive_path, \"w\") as zip_file:\n for file, content in files:\n zip_file.writestr(file, content)",
"def create_archives(self):\n # Cria pasta onde arquivos serão armazenados\n self._create_data_folder()\n # cria os aquivos de saida esperados\n OUT_ARCHIVE_COMMENTS, OUT_ARCHIVE_MEDIAS, OUT_ARCHIVE_MEDIAS_PERIODIC, OUT_ARCHIVE_PROFILES_PERIODIC = self._create_output_paths()\n\n print(self._now_str(), \"Creating archives at:\",\n \"data/archives/{}\".format(self.TIME))\n\n self._aggregate_comments(OUT_ARCHIVE_COMMENTS)",
"def create_archive(cls, directory_path: str, output_path: str) -> str:\n pass",
"def make_zip(archive, rootdir=None, basedir=None, mode=\"w\"):\n cwd = os.getcwd()\n if rootdir is not None:\n os.chdir(rootdir)\n try:\n if basedir is None:\n basedir = os.curdir\n log(\"Creating %s with %s ...\" % (archive, basedir))\n zip = ZipFile(archive, mode, compression=ZIP_DEFLATED)\n for dirpath, dirnames, filenames in os.walk(basedir):\n for name in filenames:\n path = os.path.normpath(os.path.join(dirpath, name))\n if os.path.isfile(path):\n zip.write(path, path)\n log(\"Adding %s\" % path)\n zip.close()\n finally:\n if rootdir is not None:\n os.chdir(cwd)",
"def Zip(args):\n parser = argparse.ArgumentParser(description=Zip.__doc__)\n parser.add_argument(\n '-r', dest='recursive', action='store_true',\n default=False,\n help='recurse into directories')\n parser.add_argument(\n '-q', dest='quiet', action='store_true',\n default=False,\n help='quiet operation')\n parser.add_argument('zipfile')\n parser.add_argument('filenames', nargs='+')\n options = parser.parse_args(args)\n\n src_files = []\n for filename in options.filenames:\n globbed_src_args = glob.glob(filename)\n if not globbed_src_args:\n if not options.quiet:\n print('zip warning: name not matched: %s' % filename)\n\n for src_file in globbed_src_args:\n src_file = os.path.normpath(src_file)\n src_files.append(src_file)\n if options.recursive and os.path.isdir(src_file):\n for root, dirs, files in os.walk(src_file):\n for dirname in dirs:\n src_files.append(os.path.join(root, dirname))\n for filename in files:\n src_files.append(os.path.join(root, filename))\n\n # zip_data represents a list of the data to be written or appended to the\n # zip_stream. It is a list of tuples:\n # (OS file path, zip path/zip file info, and file data)\n # In all cases one of the |os path| or the |file data| will be None.\n # |os path| is None when there is no OS file to write to the archive (i.e.\n # the file data already existed in the archive). |file data| is None when the\n # file is new (never existed in the archive) or being updated.\n zip_data = []\n new_files_to_add = [OSMakeZipPath(src_file) for src_file in src_files]\n zip_path_to_os_path_dict = dict((new_files_to_add[i], src_files[i])\n for i in range(len(src_files)))\n write_mode = 'a'\n if os.path.exists(options.zipfile):\n with zipfile.ZipFile(options.zipfile, 'r') as zip_stream:\n try:\n files_to_update = set(new_files_to_add).intersection(\n set(zip_stream.namelist()))\n if files_to_update:\n # As far as I can tell, there is no way to update a zip entry using\n # zipfile; the best you can do is rewrite the archive.\n # Iterate through the zipfile to maintain file order.\n write_mode = 'w'\n for zip_path in zip_stream.namelist():\n if zip_path in files_to_update:\n os_path = zip_path_to_os_path_dict[zip_path]\n zip_data.append((os_path, zip_path, None))\n new_files_to_add.remove(zip_path)\n else:\n file_bytes = zip_stream.read(zip_path)\n file_info = zip_stream.getinfo(zip_path)\n zip_data.append((None, file_info, file_bytes))\n except IOError:\n pass\n\n for zip_path in new_files_to_add:\n zip_data.append((zip_path_to_os_path_dict[zip_path], zip_path, None))\n\n if not zip_data:\n print('zip error: Nothing to do! (%s)' % options.zipfile)\n return 1\n\n with zipfile.ZipFile(options.zipfile, write_mode,\n zipfile.ZIP_DEFLATED) as zip_stream:\n for os_path, file_info_or_zip_path, file_bytes in zip_data:\n if isinstance(file_info_or_zip_path, zipfile.ZipInfo):\n zip_path = file_info_or_zip_path.filename\n else:\n zip_path = file_info_or_zip_path\n\n if os_path:\n st = os.stat(os_path)\n if stat.S_ISDIR(st.st_mode):\n # Python 2.6 on the buildbots doesn't support writing directories to\n # zip files. This was resolved in a later version of Python 2.6.\n # We'll work around it by writing an empty file with the correct\n # path. (This is basically what later versions do anyway.)\n zip_info = zipfile.ZipInfo()\n zip_info.filename = zip_path\n zip_info.date_time = time.localtime(st.st_mtime)[0:6]\n zip_info.compress_type = zip_stream.compression\n zip_info.flag_bits = 0x00\n zip_info.external_attr = (st[0] & 0xFFFF) << 16\n zip_info.CRC = 0\n zip_info.compress_size = 0\n zip_info.file_size = 0\n zip_stream.writestr(zip_info, '')\n else:\n zip_stream.write(os_path, zip_path)\n else:\n zip_stream.writestr(file_info_or_zip_path, file_bytes)\n\n if not options.quiet:\n if zip_path in new_files_to_add:\n operation = 'adding'\n else:\n operation = 'updating'\n zip_info = zip_stream.getinfo(zip_path)\n if (zip_info.compress_type == zipfile.ZIP_STORED or\n zip_info.file_size == 0):\n print(' %s: %s (stored 0%%)' % (operation, zip_path))\n elif zip_info.compress_type == zipfile.ZIP_DEFLATED:\n print(' %s: %s (deflated %d%%)' % (operation, zip_path,\n 100 - zip_info.compress_size * 100 / zip_info.file_size))\n\n return 0",
"def create_archive(filelist):\n\t\n\n\ttmp = tempfile.NamedTemporaryFile()\n\t# with tempfile.SpooledTemporaryFile() as tmp:\n\twith zipfile.ZipFile(tmp, 'w', zipfile.ZIP_DEFLATED) as archive:\n\t\tarcname = './docs/'\n\t\tfor x in filelist:\n\t\t\tfilename = os.path.basename(x[1])\n\t\t\t_file = x[0]\n\t\t\t# make sure we're at the start...\n\t\t\t_file.seek(0)\n\t\t\tarchive.write(_file.name, arcname=os.path.join(arcname, filename))\n\n\t# Reset file pointer\n\ttmp.seek(0)\n\n\treturn tmp\n\n\t\t# Write file data to response\n\t\t# return HttpResponse(tmp.read(), content_type='application/x-zip-compressed')",
"def _makeArchive(fileList, archive, root):\n with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as zipf:\n for f in fileList:\n zipf.write(f, os.path.relpath(f, root))",
"def _make_archive(file_list, archive, root):\n with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as zipf:\n for f in file_list:\n zipf.write(f, os.path.relpath(f, root))",
"def archiveNewFiles(self):\n\n #Archive any diag files\n if self.checkForDiagFiles():\n self._archiveDiags()\n\n #Archive checkpoint files\n if self.checkForMaeFiles(self.chk_prefix, self.chkdir, 2):\n self._archiveMaeFiles(self.chk_prefix, self.chkdir, 2)\n \n #Archive plot files\n if self.checkForMaeFiles(self.plt_prefix, self.pltdir, 1):\n self._archiveMaeFiles(self.plt_prefix, self.pltdir, 1)",
"def make_archive(output_filename, archive_format, source_dir):\n if os.path.splitext(output_filename)[1] == \"\":\n output_filename += \".zip\"\n output_dir = os.path.dirname(output_filename)\n if output_dir and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n relroot = source_dir\n with zipfile.ZipFile(output_filename, \"w\", zipfile.ZIP_DEFLATED) as zip_f:\n for root, dirs, files in os.walk(source_dir):\n # add directory (needed for empty dirs)\n zip_f.write(root, os.path.relpath(root, relroot))\n for file in files:\n filename = os.path.join(root, file)\n if os.path.isfile(filename): # regular files only\n arcname = os.path.join(os.path.relpath(root, relroot), file)\n zip_f.write(filename, arcname)\n\n return output_filename",
"def _create_zip(self, files):\n temp = StringIO()\n archive = zipfile.ZipFile(temp, 'w', zipfile.ZIP_DEFLATED)\n for file in files:\n archive.writestr(filename, file.read())\n file.close()\n archive.close()\n return temp",
"def create_zip(self):\n self._create_cmd()\n if os.path.exists(self.main_name):\n shutil.rmtree(self.main_name)\n\n if os.path.exists(self.main_name + \".zip\"):\n os.remove(self.main_name + \".zip\")\n\n names = self._find_names()\n names = list(itertools.chain.from_iterable(names))\n # print(names)\n if self.main_file not in names:\n raise FileNotFoundError(\"the main_file \\\"{}\\\" not not found in glob list\".format(self.main_file))\n\n if self.compile_to_pyc:\n names = self.compile(names)\n\n with zipfile.ZipFile(self.main_name + \".zip\", \"w\") as zip_ref:\n for file in names:\n zip_ref.write(file, self.main_name + \"-src\\\\\" + file)\n if self.compile_to_pyc:\n fn, ex = os.path.splitext(file)\n if ex == \".pyc\":\n os.remove(file)\n\n zip_ref.write(self.cmd_file)\n\n os.remove(self.cmd_file)\n if self.nquiet:\n print(\"done create zip from files and folders:\", \",\".join(names))",
"def atomic_dump(paths):\n with contextlib.ExitStack() as stack:\n # Create the temporary files. They will all be closed at the end of the\n # `with` block.\n tmp_paths = tuple(\n stack.enter_context(tempfile.NamedTemporaryFile()).name # pylint: disable=no-member\n for _ in paths\n )\n # Write to the temporary files.\n yield tmp_paths\n # Overwrite the old files with the new ones.\n for (path, tmp_path) in zip(paths, tmp_paths):\n shutil.move(tmp_path, path)\n # Create an empty file to avoid a FileNotFoundError on exit.\n open(tmp_path, 'w').close()",
"def create_tar(paths):\n paths = to_list(paths)\n try:\n temp_tar_file = tempfile.NamedTemporaryFile(suffix=\".tar\",\n delete=False)\n with tarfile.open(temp_tar_file.name, \"w\") as tar_file:\n for path in paths:\n full_path = os.path.abspath(path)\n if os.path.isfile(full_path):\n arcname = os.path.basename(full_path)\n tar_file.add(full_path, arcname=arcname)\n elif os.path.isdir(full_path):\n # If we pass a directory, flatten it out.\n # List its contents, and add them as they are.\n for element in os.listdir(full_path):\n arcname = element\n tar_file.add(os.path.join(full_path, element),\n arcname=arcname)\n return temp_tar_file.name\n except tarfile.TarError:\n raise CommandError(\"Error creating the temporary tar archive.\")",
"def _zip_files(self):\n\n zip_file = Path(self.build_directory.parent).joinpath(\n self.package_name + '.zip'\n )\n logger.info('Creating zip file: %s', zip_file)\n\n shutil.make_archive(zip_file.with_suffix(''), 'zip', self.build_directory)\n shutil.move(str(zip_file), self.build_directory)",
"def buildArchive(zipfileName, filelist, tilePrefixStr, verbose):\n zf = zipfile.ZipFile(zipfileName, 'w')\n subdirName = os.path.basename(tilePrefixStr)\n prefixLen = len(tilePrefixStr)\n if verbose:\n print(\"Downloading\", len(filelist), \"files\")\n for filename in filelist:\n basename = os.path.basename(filename)\n (fd, tmpName) = tempfile.mkstemp(prefix=basename, dir='.')\n os.close(fd)\n \n if verbose:\n print(\"File\", basename)\n cmdWords = [\"gsutil\", \"cp\", filename, tmpName]\n proc = subprocess.Popen(cmdWords, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n (stdout, stderr) = proc.communicate()\n \n if os.path.exists(tmpName):\n archiveName = \"{}{}\".format(subdirName, filename[prefixLen:])\n zf.write(tmpName, archiveName)\n os.remove(tmpName)\n zf.close()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetch |uri| and write the results to |output| (or return BytesIO). | def fetch_data(uri: str, output=None, verbose: bool = False, b64: bool = False):
# This is the timeout used on each blocking operation, not the entire
# life of the connection. So it's used for initial urlopen and for each
# read attempt (which may be partial reads). 5 minutes should be fine.
TIMEOUT = 5 * 60
if output is None:
output = io.BytesIO()
try:
with urllib.request.urlopen(uri, timeout=TIMEOUT) as infp:
mb = 0
length = infp.length
while True:
data = infp.read(1024 * 1024)
if not data:
break
# Show a simple progress bar if the user is interactive.
if verbose:
mb += 1
print('~%i MiB downloaded' % (mb,), end='')
if length:
percent = mb * 1024 * 1024 * 100 / length
print(' (%.2f%%)' % (percent,), end='')
print('\r', end='', flush=True)
if b64:
data = base64.b64decode(data)
output.write(data)
except urllib.error.HTTPError as e:
logging.error('%s: %s', uri, e)
sys.exit(1)
return output | [
"def _fetch_file(self, location, output=None):\n\n self.log.debug(\"Fetching '%s' file...\" % location)\n\n if not output:\n output = tempfile.mktemp(\"-dogen\")\n\n self.log.debug(\"Fetched file will be saved as '%s'...\" % os.path.basename(output))\n\n r = requests.get(location, verify=self.ssl_verify, stream=True)\n\n if r.status_code != 200:\n raise Error(\"Could not download file from %s\" % location)\n\n with open(output, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n f.write(chunk)\n\n return output",
"def fetch_file(self, location, output=None):\n\n self.log.debug(\"Fetching '%s' file...\" % location)\n\n if not output:\n output = tempfile.mktemp(\"-dogen\")\n \n self.log.debug(\"File will be saved as '%s'...\" % output)\n\n with open(output, 'wb') as f:\n f.write(requests.get(location, verify=self.ssl_verify).content)\n\n return output",
"def download_img(self, url, output):\n try:\n print(\"Downloading from: %s\" % url)\n with open(output, 'wb') as f:\n f.write(urllib2.urlopen(url).read())\n print(\"Wrote to: %s\" % output)\n except IOError, e:\n print(e)",
"def fetch(url, outq):\n print(\"fetching\", url)\n data = ''\n with eventlet.Timeout(5, False):\n data = urllib2.urlopen(url).read()\n for url_match in url_regex.finditer(data):\n new_url = url_match.group(0)\n outq.put(new_url)",
"def fetch(url, outq):\r\n print(\"fetching\", url)\r\n data = ''\r\n with eventlet.Timeout(5, False):\r\n data = urllib2.urlopen(url).read()\r\n for url_match in url_regex.finditer(data):\r\n new_url = url_match.group(0)\r\n outq.put(new_url)",
"def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")",
"def _download_file(url: str, output_path: str):\n\n def write_to_file(response: requests.Response, output_path: str) -> int:\n \"\"\"Write the response content to the given file.\n\n :param response: Response to be written to the output file.\n :param output_path: Path to the output file.\n :returns: Number of bytes read from the response content.\n \"\"\"\n read_bytes = 0\n with open(output_path, \"wb\") as output_file:\n # Use the same chunk size of `urlretrieve`\n for chunk in response.iter_content(chunk_size=1024 * 8):\n read_bytes += len(chunk)\n output_file.write(chunk)\n if read_bytes > FETCHER_MAXIMUM_FILE_SIZE:\n break\n return read_bytes\n\n try:\n with requests.get(\n url, stream=True, timeout=FETCHER_REQUEST_TIMEOUT\n ) as response:\n response.raise_for_status()\n\n content_length = int(response.headers.get(\"Content-Length\", 0))\n if content_length > FETCHER_MAXIMUM_FILE_SIZE:\n raise REANAFetcherError(\"Maximum file size exceeded\")\n\n read_bytes = write_to_file(response, output_path)\n\n if read_bytes > FETCHER_MAXIMUM_FILE_SIZE:\n os.remove(output_path)\n raise REANAFetcherError(\"Maximum file size exceeded\")\n except HTTPError as e:\n error = f\"Cannot fetch the workflow specification: {e.response.reason} ({response.status_code})\"\n if response.status_code == 404:\n error = \"Cannot find the given workflow specification\"\n raise REANAFetcherError(error)\n except Timeout:\n raise REANAFetcherError(\n \"Timed-out while fetching the workflow specification\"\n )\n except RequestException:\n raise REANAFetcherError(\n \"Something went wrong while fetching the workflow specification\"\n )",
"def __fetch_output_task(\n self, task, download_dir, overwrite, changed_only, **extra_args):\n return task.fetch_output(\n download_dir, overwrite, changed_only, **extra_args)",
"def fetch(url, data=None):\n \n conn = urllib2.urlopen(url, data=data)\n try:\n return conn.read()\n finally:\n conn.close()",
"def fetch(self) -> bytes:\n self.log.debug(f\"fetching package: {self.file_name}\")\n desc = self.format_desc(self.file_name)\n content = utils.stream_download(self.source_url, desc=desc)\n return content",
"def url_fetch(self, url):\n user_agent = random.choice(self.conf.user_agents)\n if self.isCompress == True:\n headers = {\n 'Uesr-Agent': user_agent,\n \"Accept-Encoding\": \"gzip,deflate\",\n \"Accept-Charset\" : \"UTF-8,*\"\n }\n else:\n headers = {\n 'Uesr-Agent': user_agent,\n \"Accept-Charset\" : \"UTF-8,*\"\n }\n raw_data = ''\n try:\n conn = httplib.HTTPConnection(self.proxy, timeout=3.0)\n conn.request('GET', url, None, headers)\n response = conn.getresponse()\n raw_data = response.read()\n except Exception as err:\n self.logger.error('connect error[%s]' % err)\n return '999', 'Request failed', ''\n finally:\n conn.close()\n \n content = ''\n if self.isCompress == True:\n if response.status == 200:\n try:\n stream = StringIO.StringIO(raw_data)\n decompressor = gzip.GzipFile(fileobj=stream)\n content = decompressor.read()\n except:\n self.logger.error('status[%s] len_raw_data[%d]' % (response.status, len(raw_data)))\n return '998', 'content err', ''\n else:\n if response.status == 200:\n content = raw_data \n\n return response.status, response.reason, content",
"def fetch(self) -> None:\n workflow_spec_path = os.path.join(self._output_dir, self._spec)\n self._download_file(self._parsed_url.original_url, workflow_spec_path)",
"def fetch_image(url: str) -> BytesIO:\n http = urllib3.PoolManager()\n return BytesIO(http.request(\"GET\", url).data)",
"def _download_file(url):\n return requests.get(url, stream=True)",
"def _fetch(self, url):\n now = time.time()\n diff = now - self._last_fetch\n print \"now=%f, then=%f, diff=%f vs. %f\" % (now, self._last_fetch, diff, self._wait_time)\n if diff < self._wait_time:\n time.sleep(self._wait_time - diff)\n self._last_fetch = time.time()\n\n print \"Fetching %s\" % url\n f = urllib.URLopener().open(url)\n return f.read()",
"def _FetchLog(http_client, log_url, output_file, callback):\r\n def _OnFetch(response):\r\n if response.code == 200:\r\n with open(output_file, 'w') as f:\r\n f.write(response.body)\r\n logging.info('wrote %d bytes to %s' % (len(response.body), output_file))\r\n else:\r\n logging.error('failed to fetch %s' % log_url)\r\n callback()\r\n\r\n http_client.fetch(log_url, callback=_OnFetch, method='GET')",
"async def fetch(url):\n r = requests.get(url)\n return r",
"def fetch_and_save(cls, url, path):\n content = cls.fetch_with_retry(url)\n if not content:\n return False\n # print(\"Saving {}\".format(os.path.basename(path)))\n with open(path, \"wb\") as file:\n file.write(content)\n return content",
"def download(self, destination=None):\n url = self.imageinfo['url']\n if destination is not None:\n res = self.site.connection.get(url, stream=True)\n for chunk in res.iter_content(1024):\n destination.write(chunk)\n else:\n return self.site.connection.get(url).content"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Download our copies of node & npm to our tree and updates env ($PATH). | def node_and_npm_setup():
# We have to update modules first as it'll nuke the dir node lives under.
node.modules_update()
node.update() | [
"def InstallNodeDependencies():\n logging.info('entering ...')\n # Install the project dependencies specified in package.json into\n # node_modules.\n logging.info('installing AMP Validator engine dependencies ...')\n subprocess.check_call(\n ['npm', 'install', '--userconfig', '../.npmrc'],\n stdout=(open(os.devnull, 'wb') if os.environ.get('CI') else sys.stdout))\n logging.info('installing AMP Validator nodejs dependencies ...')\n subprocess.check_call(['npm', 'install', '--userconfig', '../../../.npmrc'],\n cwd='js/nodejs',\n stdout=(open(os.devnull, 'wb')\n if os.environ.get('CI') else sys.stdout))\n logging.info('... done')",
"def nodejs(self):\n self.summarize_operation(\"Installing Nodejs\")\n process = Popen(shlex.split(\"curl --silent --location https://deb.nodesource.com/setup_5.x \"), stdout=subprocess.PIPE)\n process_stdout = Popen(shlex.split(\"sudo -E bash -\"), stdin=process.stdout)\n process_stdout.communicate()[0]\n self.install_package(\"nodejs\")\n self.npm_install_globally(\"npm@latest\")",
"def heroku_package_npm():\n if not exists('package.json'):\n local('npm init')\n local('npm install')",
"def npm(cmd):\n status_set(\n 'maintenance',\n 'installing NPM dependencies for {}'.format(node_dist_dir()))\n os.chdir(node_dist_dir())\n if not isinstance(cmd, str):\n status_set('blocked', '{}: should be a string'.format(cmd))\n sys.exit(0)\n cmd = (\"npm {}\".format(cmd))\n sh = shell(cmd)\n if sh.code > 0:\n status_set(\"blocked\", \"NPM error: {}\".format(sh.errors()))\n sys.exit(0)",
"def install_frontend_deps():\n\n with lcd(FRONTENDDIR):\n cmd = '%(npm)s install' % {'npm': get_npm()}\n local(cmd)\n cmd = '%(bower)s install' % {'bower': get_bower()}\n local(cmd)",
"def install_npm_modules():\n # This is a little weird, but we do it this way because if you\n # have package.json, then heroku thinks this might be a node.js\n # app.\n call_command('cp node.json package.json', verbose=True)\n call_command('npm install', verbose=True)\n call_command('rm package.json', verbose=True)",
"def update_npm():\n path = os.path.join(settings.PROJECT_PATH, 'rnacentral', 'portal', 'static')\n with env.cd(path):\n env.run('npm update --loglevel info')",
"def backup_install_repo():\n execute(\"backup_install_repo_node\", env.host_string)",
"def run(cmd, **kwargs):\n # We need the node bin dir to be at the start of the $PATH as some packages\n # will try to run other npm packages directly.\n extra_env = kwargs.setdefault('extra_env', {})\n assert 'PATH' not in extra_env\n extra_env['PATH'] = os.pathsep.join((str(NODE_BIN_DIR), os.getenv('PATH')))\n return libdot.run(\n cmd[1:],\n cmd_prefix=[NODE, NODE_BIN_DIR / cmd[0]],\n log_prefix=[cmd[0]],\n **kwargs)",
"def update_dependencies():\n pip = env.virtualenv.child('bin', 'pip')\n reqs = env.code_dir.child('deploy-requirements.txt')\n sudo('%s -q install -U pip' % pip)\n sudo('%s -q install -r %s' % (pip, reqs))",
"def ensure_node_modules(cwd, logger=None):\n logger = _ensure_logger(logger)\n yarn_proc = ProgressProcess(\n [\"node\", YARN_PATH, \"--immutable\", \"--immutable-cache\"], cwd=cwd, logger=logger\n )\n ret = yarn_proc.wait()\n\n # Update node_modules if needed.\n if ret != 0:\n yarn_proc = ProgressProcess([\"node\", YARN_PATH], cwd=cwd, logger=logger)\n yarn_proc.wait()\n dedupe_yarn(REPO_ROOT, logger)\n\n return ret != 0",
"def _install_npm_command(cmd):\n with settings(warn_only=True):\n version = npm_commands[cmd]['version']\n out = local('npm install {0}@{1}'.format(cmd, version), capture=True)\n if out.return_code != 0:\n print 'Problem installing {0}@{1}'.format(cmd, version)\n exit(1)",
"def update_go_deps(self):\n self.go_version()\n env = self.m.step.get_from_context('env', {})\n env.update(self.go_env)\n with self.m.step.context({'env': env}):\n self.m.run.with_retry(\n self.m.step,\n 'update go pkgs',\n UPDATE_GO_ATTEMPTS,\n cmd=[self.go_exe, 'get', '-u', '-t', '%s/...' % INFRA_GO_PKG])",
"def _install_phantomjs_ubuntu():\n run = \"\"\"\n apt-get install build-essential chrpath git-core\n apt-get install libssl-dev libfontconfig1-dev\n git clone git://github.com/ariya/phantomjs.git\n cd ./phantomjs\n git checkout 1.8\n ./build.sh\n \"\"\"\n sh(run, cwd=opts.proj.dirs.venv)",
"def deploy_node(app, deltas={}):\n\n virtualenv_path = join(ENV_ROOT, app)\n node_path = join(ENV_ROOT, app, \"node_modules\")\n node_modules_symlink = join(APP_ROOT, app, \"node_modules\")\n npm_prefix = abspath(join(node_path, \"..\"))\n env_file = join(APP_ROOT, app, 'ENV')\n deps = join(APP_ROOT, app, 'package.json')\n\n first_time = False\n if not exists(node_path):\n echo(\"-----> Creating node_modules for '{}'\".format(app), fg='green')\n makedirs(node_path)\n first_time = True\n\n env = {\n 'VIRTUAL_ENV': virtualenv_path,\n 'NODE_PATH': node_path,\n 'NPM_CONFIG_PREFIX': npm_prefix,\n \"PATH\": ':'.join([join(virtualenv_path, \"bin\"), join(node_path, \".bin\"), environ['PATH']])\n }\n if exists(env_file):\n env.update(parse_settings(env_file, env))\n\n # include node binaries on our path\n environ[\"PATH\"] = env[\"PATH\"]\n\n version = env.get(\"NODE_VERSION\")\n node_binary = join(virtualenv_path, \"bin\", \"node\")\n installed = check_output(\"{} -v\".format(node_binary), cwd=join(APP_ROOT, app), env=env, shell=True).decode(\"utf8\").rstrip(\n \"\\n\") if exists(node_binary) else \"\"\n\n if version and check_requirements(['nodeenv']):\n if not installed.endswith(version):\n started = glob(join(UWSGI_ENABLED, '{}*.ini'.format(app)))\n if installed and len(started):\n echo(\"Warning: Can't update node with app running. Stop the app & retry.\", fg='yellow')\n else:\n echo(\"-----> Installing node version '{NODE_VERSION:s}' using nodeenv\".format(**env), fg='green')\n call(\"nodeenv --prebuilt --node={NODE_VERSION:s} --clean-src --force {VIRTUAL_ENV:s}\".format(**env),\n cwd=virtualenv_path, env=env, shell=True)\n else:\n echo(\"-----> Node is installed at {}.\".format(version))\n\n if exists(deps) and check_requirements(['npm']):\n if first_time or getmtime(deps) > getmtime(node_path):\n copyfile(join(APP_ROOT, app, 'package.json'), join(ENV_ROOT, app, 'package.json'))\n if not exists(node_modules_symlink):\n symlink(node_path, node_modules_symlink)\n echo(\"-----> Running npm for '{}'\".format(app), fg='green')\n call('npm install --prefix {} --package-lock=false'.format(npm_prefix), cwd=join(APP_ROOT, app), env=env, shell=True)\n return spawn_app(app, deltas)",
"def install_deps():\n with prefix('source virtualenvwrapper.sh'):\n with prefix('workon %s' % env.PROJECT_VENV):\n if confirm(magenta(\"Is this a production server?\")):\n run('pip install -U -r requirements/prod.txt --use-mirrors')\n else:\n run('pip install -U -r requirements/dev.txt --use-mirrors')",
"def bootstrap(execute=dummy_execute):\n path = node(['-p',\n 'try { require.resolve(\"@prometheusresearch/react-scripts/bin/react-scripts.js\") } catch (e) {\"\"}'],\n quiet=True)\n if not path.strip():\n def bootstrap_yarn():\n url, md5_hash = download.parse_url(YARN_URL)\n yarn_data = download.download(url, md5_hash=md5_hash)\n yarn_path = os.path.join(sys.prefix, 'bin', 'yarn')\n with open(yarn_path, 'w') as f:\n f.write(yarn_data)\n yarn_stat = os.stat(yarn_path)\n os.chmod(yarn_path, yarn_stat.st_mode | stat.S_IEXEC)\n\n def bootstrap_npm():\n npm_path = find_executable('npm', 'npm')\n out, err = exe(npm_path, ['--version'])\n npm_version = out.strip()\n if npm_version[0] not in ('4', '3', '2'):\n npm(['install', '--global', 'npm@2.x.x'])\n npm(['install', '--global', 'npm@' + NPM_VERSION])\n\n def bootstrap_react_scripts():\n deps = [\n '@prometheusresearch/react-scripts@%s' % REACT_SCRIPTS_VERSION,\n 'nan@2.6.2', # this is required for yarn to function propely\n ]\n npm(['install', '--global'] + deps)\n\n execute(bootstrap_yarn, (), 'Installing yarn')\n execute(bootstrap_npm, (), 'Installing npm')\n execute(bootstrap_react_scripts, (), 'Installing react-scripts')",
"def install_with_npm_fast_install(self, directory, silent=False):\n timer = Timer()\n program_name = 'npm-fast-install'\n if not self.context.test('which', 'npm-fast-install'):\n program_name = os.path.join(directory, 'node_modules', '.bin', 'npm-fast-install')\n if not self.context.exists(program_name):\n logger.verbose(\"Installing npm-fast-install locally (because it's not globally installed) ..\")\n self.context.execute('npm', 'install', 'npm-fast-install', directory=directory, silent=silent)\n package_file = os.path.join(directory, 'package.json')\n original_contents = self.context.read_file(package_file)\n metadata = dict(dependencies={}, devDependencies={})\n metadata.update(json.loads(auto_decode(original_contents)))\n need_patch = metadata['devDependencies'] and not self.production\n try:\n # Temporarily change the contents of the package.json file?\n if need_patch:\n logger.debug(\"Temporarily patching %s ..\", package_file)\n patched_data = copy.deepcopy(metadata)\n patched_data['dependencies'].update(patched_data['devDependencies'])\n patched_data.pop('devDependencies')\n self.context.write_file(package_file, json.dumps(patched_data).encode('UTF-8'))\n # Run the npm-fast-install command.\n logger.info(\"Running command: %s\", quote(program_name))\n self.context.execute(program_name, directory=directory, silent=silent)\n finally:\n # Restore the original contents of the package.json file?\n if need_patch:\n logger.debug(\"Restoring original contents of %s ..\", package_file)\n self.context.write_file(package_file, original_contents)\n logger.verbose(\"Took %s to install with npm-fast-install.\", timer)",
"def install_system_packages():\n # first, ensure we have latest versions of everything\n sudo('apt-get update')\n sudo('apt-get -y -q upgrade')\n sudo('apt-get -y -q dist-upgrade')\n # now handle specific system packages we need\n execute(_install_elasticsearch)\n execute(_install_python_deps)\n execute(_install_postgres)\n sudo('apt-get -y -q install nodejs-legacy build-essential nginx npm '\n 'supervisor')\n sudo('npm -g install grunt-cli karma bower phantomjs-prebuilt')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load a module from the filesystem. | def load_module(name, path):
loader = importlib.machinery.SourceFileLoader(name, path)
module = types.ModuleType(loader.name)
loader.exec_module(module)
return module | [
"def load_module(name, path):\n\n spec = importlib.util.spec_from_file_location(name, path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module",
"def loadModule (\r\n \r\n self,\r\n path = None\r\n ) :\r\n\r\n if not utilities.filePresent( path ) : return None\r\n\r\n directory = utilities.pathDirectory( path )\r\n\r\n package = utilities.pathLastName( directory )\r\n\r\n name = utilities.pathLastNameWithoutExtension( path )\r\n\r\n extension = utilities.pathExtension( path )\r\n\r\n if not extension.startswith( \"py\" ) : return None\r\n\r\n try :\r\n\r\n module = imp.load_source( package + \".\" + name, path )\r\n\r\n except Exception, exception :\r\n\r\n print str( exception )\r\n \r\n return None\r\n\r\n return module",
"async def load_module(self, module: \"Module\"):",
"def LoadModule(filename):\n (name, ext) = os.path.splitext(filename)\n\n fh = open(filename, \"r\")\n try:\n return imp.load_module(name, fh, filename, (ext, \"r\", imp.PY_SOURCE))\n finally:\n fh.close()",
"def _load_module(modulepath):\n\n mod = __import__(modulepath)\n path = []\n for token in modulepath.split(\".\")[1:]:\n path.append(token)\n mod = getattr(mod, token)\n return mod",
"def load_module(module_name, file_name):\n from importlib.machinery import SourceFileLoader\n home_dir = os.path.expanduser(\"~\")\n valid_paths = [\n os.path.join(home_dir, \"Google Drive\"),\n os.path.join(home_dir, \"GoogleDrive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"Google Drive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"GoogleDrive\"),\n os.path.join(\"C:/\", \"GoogleDrive\"),\n os.path.join(\"C:/\", \"Google Drive\"),\n os.path.join(\"D:/\", \"GoogleDrive\"),\n os.path.join(\"D:/\", \"Google Drive\"),\n ]\n\n drive_path = None\n for path in valid_paths:\n if os.path.isdir(path):\n drive_path = path\n break\n\n if drive_path is None:\n raise Exception(\"Couldn't find google drive folder!\")\n\n utils_path = os.path.join(drive_path, \"_pyutils\")\n print(\"Loading [{}] package...\".format(os.path.join(utils_path,file_name)),flush = True)\n module_lib = SourceFileLoader(module_name, os.path.join(utils_path, file_name)).load_module()\n print(\"Done loading [{}] package.\".format(os.path.join(utils_path,file_name)),flush = True)\n\n return module_lib",
"def import_module_from_module_path(path):\n return SourceFileLoader('', path).load_module()",
"async def load(self, module):",
"def load_file(self, path, parser = Parser()):\n\t\tpath = os.path.abspath(path) #normalize the path\n\t\ttpl = open(path)\n\t\tmodule = self.load_module(tpl, path, os.path.dirname(path), parser)\n\t\ttpl.close()\n\t\treturn module",
"def load_source(fname, module_name=\"something\"):\n loader = importlib.machinery.SourceFileLoader(module_name, fname)\n mod = types.ModuleType(loader.name)\n loader.exec_module(mod)\n return mod",
"def load_module_from_path(path):\n path = os.path.abspath(path)\n\n # Use the filename (without extension) as the module name\n _, filename = os.path.split(path)\n module_name, _ = os.path.splitext(filename)\n\n spec = importlib.util.spec_from_file_location(module_name, path)\n module = importlib.util.module_from_spec(spec)\n\n # Execute the module\n spec.loader.exec_module(module)\n\n return module",
"def load_module(module_name, file_name):\n from importlib.machinery import SourceFileLoader\n home_dir = os.path.expanduser(\"~\")\n valid_paths = [\n os.path.join(home_dir, \"Google Drive\"),\n os.path.join(home_dir, \"GoogleDrive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"Google Drive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"GoogleDrive\"),\n os.path.join(\"C:/\", \"GoogleDrive\"),\n os.path.join(\"C:/\", \"Google Drive\"),\n os.path.join(\"D:/\", \"GoogleDrive\"),\n os.path.join(\"D:/\", \"Google Drive\"),\n ]\n\n drive_path = None\n for path in valid_paths:\n if os.path.isdir(path):\n drive_path = path\n break\n\n if drive_path is None:\n logger_lib = None\n print(\"Logger library not found in shared repo.\", flush = True)\n #raise Exception(\"Couldn't find google drive folder!\")\n else: \n utils_path = os.path.join(drive_path, \"_pyutils\")\n print(\"Loading [{}] package...\".format(os.path.join(utils_path,file_name)),flush = True)\n logger_lib = SourceFileLoader(module_name, os.path.join(utils_path, file_name)).load_module()\n print(\"Done loading [{}] package.\".format(os.path.join(utils_path,file_name)),flush = True)\n\n return logger_lib",
"def load_module(name_or_path):\n if os.path.exists(name_or_path):\n path = name_or_path.rstrip(\"/\")\n modname = os.path.splitext(os.path.basename(path))[0]\n if os.path.isdir(path):\n path = os.path.join(path, \"__init__.py\")\n spec = importlib.util.spec_from_file_location(modname, path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n else:\n mod = importlib.import_module(name_or_path)\n try:\n path = mod.__path__[0]\n except AttributeError:\n path = mod.__file__\n return mod, path",
"def load_module(name):\n try:\n mod = None\n mod = sys.modules[name]\n except KeyError:\n mod = import_module(name)\n finally:\n if not mod:\n raise ImportError('unable to import module %s' % name)\n return mod",
"def _load_module_from_full_path_string(self, full_path):\n spec = importlib.util.spec_from_file_location(\"module.name\", full_path)\n loaded_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(loaded_module)\n return loaded_module",
"def load_module(module_name):\n try:\n module = resolve_name(module_name)\n except ImportError:\n raise error.NotFound(msg=module_name)\n\n return module",
"def load_mod_from_file(self, fpath):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tfpath = os.path.abspath(fpath)\n\t\tfile_ext = os.path.splitext(os.path.split(fpath)[-1])[-1]\n\t\tif file_ext.lower() != '.py':\n\t\t\treturn\n\t\twith open(fpath) as f:\n\t\t\tcontent = f.read().splitlines()\n\t\tok = False\n\t\tfor line in content:\n\t\t\tif line.strip() == 'from shutit_module import ShutItModule':\n\t\t\t\tok = True\n\t\t\t\tbreak\n\t\tif not ok:\n\t\t\tself.log('Rejected file: ' + fpath,level=logging.DEBUG)\n\t\t\treturn\n\t\t# Note that this attribute will only be set for 'new style' module loading, # this should be ok because 'old style' loading checks for duplicate # existing modules.\n\t\t# TODO: this is quadratic complexity\n\t\texistingmodules = [\n\t\t\tm for m in self.shutit_modules\n\t\t\tif getattr(m, '__module_file', None) == fpath\n\t\t]\n\t\tif existingmodules:\n\t\t\tself.log('Module already seen: ' + fpath,level=logging.DEBUG)\n\t\t\treturn\n\t\t# Looks like it's ok to load this file\n\t\tself.log('Loading source for: ' + fpath,level=logging.DEBUG)\n\n\t\t# Add this directory to the python path iff not already there.\n\t\tdirectory = os.path.dirname(fpath)\n\t\tif directory not in sys.path:\n\t\t\tsys.path.append(os.path.dirname(fpath))\n\t\t# TODO: use bytearray to encode?\n\t\tmod_name = base64.b32encode(fpath.encode()).decode().replace('=', '')\n\t\tpymod = imp.load_source(mod_name, fpath)\n\n\t\t# Got the python module, now time to pull the shutit module(s) out of it.\n\t\ttargets = [\n\t\t\t('module', self.shutit_modules), ('conn_module', self.conn_modules)\n\t\t]\n\t\tself.build['source'] = {}\n\t\tfor attr, target in targets:\n\t\t\tmodulefunc = getattr(pymod, attr, None)\n\t\t\t# Old style or not a shutit module, nothing else to do\n\t\t\tif not callable(modulefunc):\n\t\t\t\treturn\n\t\t\tmodules = modulefunc()\n\t\t\tif not isinstance(modules, list):\n\t\t\t\tmodules = [modules]\n\t\t\tfor module in modules:\n\t\t\t\tsetattr(module, '__module_file', fpath)\n\t\t\t\tShutItModule.register(module.__class__)\n\t\t\t\ttarget.add(module)\n\t\t\t\tself.build['source'][fpath] = open(fpath).read()",
"def load_datamodule(cls, path: Union[str, Path]):\n if isinstance(path, str):\n path = Path(path)\n if not path.exists():\n raise FileNotFoundError(f\"{path} does not exist.\")\n datamodule = joblib.load(path)\n return datamodule",
"def import_from_file(module_name: str, filepath: str):\n return SourceFileLoader(module_name, filepath).load_module()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load & cache the program module. | def _module(self):
if self._module_cache is None:
self._module_cache = load_module(self._name, self._path)
return self._module_cache | [
"def load(self):\n \"\"\"Load a program into memory.\"\"\"\n\n if len(sys.argv) != 2:\n print(\"format: ls8.py [filename]\")\n sys.exit(1)\n\n program = sys.argv[1]\n address = 0\n\n # For now, we've just hardcoded a program:\n\n # program = [\n # # From print8.ls8\n # 0b10000010, # LDI R0,8\n # 0b00000000,\n # 0b00001000,\n # 0b01000111, # PRN R0\n # 0b00000000,\n # 0b00000001, # HLT\n # ]\n\n #open file\n with open(program) as file:\n #read the lines\n for line in file:\n #parse out comments\n line = line.strip().split(\"#\")[0]\n #cast numbers from strings to ints\n val = line.strip()\n #ignore blank lines\n if line == \"\":\n continue\n\n value = int(val, 2)\n self.ram[address] = value\n address +=1",
"def load(self):\n # Error handling\n if len(sys.argv) != 2:\n print(\"usage: ls8.py filename\")\n sys.exit(1)\n\n progname = sys.argv[1]\n address = 0\n\n with open(progname) as f:\n # iterate through each line in the program\n for line in f:\n # remove any comments\n line = line.split(\"#\")[0]\n # remove whitespace\n line = line.strip()\n # skip empty lines\n if line == \"\":\n continue\n\n value = int(line, 2)\n # set the binary instruction to memory\n self.ram[address] = value\n address += 1",
"def _load_program(self, kernel):\n return cl.Program(\n self.context, open('kernels/{0}'.format(kernel)).read()\n ).build()",
"def load(self):\n\n address = 0\n\n if len(sys.argv) < 2:\n print('ERROR - Provide program address to load')\n return\n\n program_filename = sys.argv[1]\n\n program_text = open(program_filename).read()\n program_lines = program_text.split('\\n')\n program = []\n\n for line in program_lines:\n blocks = line.split()\n if len(blocks) > 0:\n if blocks[0] != '#':\n inst = blocks[0]\n program.append(int(inst, 2))\n\n for instruction in program:\n self.ram[address] = instruction\n address += 1",
"def get_program_cache(self):\n return self._program_cache",
"async def load(self, module):",
"def reload_programs(self):\r\n print(\"Reloading programs:\")\r\n for name, program in self._programs.items():\r\n if getattr(program, 'program', None):\r\n print(\" - {}\".format(program.meta.label))\r\n program.program = resources.programs.load(program.meta)",
"def loadProcessFromFile(self):\n preExecScript = self.cmsRunNode.scriptControls[\"PreExe\"]\n preExecScript.append(\"T0.AlcaSkimInjector.RuntimeAlcaSkim\")\n \n cfgBaseName = os.path.basename(self.configFile).replace(\".py\", \"\")\n cfgDirName = os.path.dirname(self.configFile)\n modPath = imp.find_module(cfgBaseName, [cfgDirName])\n\n loader = CMSSWAPILoader(self.cmssw[\"ScramArch\"],\n self.cmssw[\"CMSSWVersion\"],\n self.cmssw[\"CMSPath\"])\n \n try:\n loader.load()\n except Exception, ex:\n logging.error(\"Couldn't load CMSSW libraries: %s\" % ex)\n return None\n \n try:\n modRef = imp.load_module(cfgBaseName, modPath[0],\n modPath[1], modPath[2])\n except Exception, ex:\n logging.error(\"Can't load config: %s\" % ex)\n loader.unload()\n return None\n\n import FWCore.ParameterSet.Config as cms \n cmsCfg = modRef.process\n\n if self.useLazyDownload == True:\n logging.debug(\"Lazy downloads ENABLED.\")\n cmsCfg.AdaptorConfig = cms.Service(\"AdaptorConfig\",\n cacheHint = cms.untracked.string(\"lazy-download\"),\n readHint = cms.untracked.string(\"auto-detect\"))\n else:\n logging.debug(\"Lazy downloads DISABLED.\")\n cmsCfg.AdaptorConfig = cms.Service(\"AdaptorConfig\",\n cacheHint = cms.untracked.string(\"application-only\"),\n readHint = cms.untracked.string(\"direct-unbuffered\")) \n\n for outputModuleName in cmsCfg.outputModules:\n logging.debug(\"ASW: outputModuleName: %s\" % outputModuleName )\n self.outputModuleNames.append( outputModuleName )\n outputModule = getattr(cmsCfg, outputModuleName)\n outputModule.fastCloning = cms.untracked.bool(False)\n \n cfgWrapper = CMSSWConfig()\n cfgWrapper.originalCfg = file(self.configFile).read()\n cfgInt = cfgWrapper.loadConfiguration(cmsCfg)\n cfgInt.validateForProduction()\n self.workflow.payload.cfgInterface = cfgWrapper\n\n loader.unload()\n \n return",
"def load_program(self, program:str):\n\n # tell the driver that the source has changed\n self.devices.mod_asm(True)\n\n # there is a GeckoMotion bug where the program must end with a newline, or the last line of it is not compiled.\n # Interestingly, this affects the GeckoMotion IDE as well.\n if not program.endswith(\"\\n\"):\n program = program + \"\\n\"\n\n self.mocktab.set_text(program)\n\n self.serial_control_lock.acquire()\n\n self.devices.assemble(self.mocktab, self.gm_project_prefs)\n\n self.serial_control_lock.release()",
"def _load_program():\n filepath = os.path.join(os.getcwd(), os.path.dirname(__file__), PROGRAM_TXT)\n f = open(filepath, 'r')\n program = f.read()\n f.close()\n return program.strip().split('\\n')",
"def exec_module(self, module):\n pass",
"def pymod_cache():\n pymod.cache.cache = Singleton(pymod.cache.factory)",
"def load_codemodule(self):\n\n # init\n self.codemodule = None\n self.codemodule_path = ''\n self.packworker = None\n self.readytorun = False\n\n # get path to code module\n (self.codemodule_path, failure) = self.get_pathtocodemodule()\n if (failure == None):\n # ask pack manager to load the import from the path\n (self.codemodule, failure) = self.packmanager.loadimport(self.codemodule_path)\n\n if (failure == None):\n # if the import worked, instantiate the pack object from it\n failure = self.instantiate_packworker()\n\n if (failure == None):\n # success so mark it as ready to run\n self.readytorun = True\n else:\n # failed; add the error message to our eventlist, and continue with this pack marked as not useable? or raise exception right away\n self.appendevent(failure)\n if (True):\n raise ExceptionPlus(failure)\n # return failure\n return failure",
"def run(self, mod, code):\n env = os.environ.copy()\n pythonpath = [os.path.dirname(mod.so_filename)]\n if 'PYTHONPATH' in env:\n pythonpath.append(env['PYTHONPATH'])\n env[\"PYTHONPATH\"] = os.pathsep.join(pythonpath)\n if self.hpy_abi in ['universal', 'debug']:\n # HPy module\n load_module = \"import sys;\" + \\\n \"import hpy.universal;\" + \\\n \"import importlib.util;\" + \\\n \"spec = importlib.util.spec_from_file_location('{name}', '{so_filename}');\" + \\\n \"mod = hpy.universal.load('{name}', '{so_filename}', spec, debug={debug});\"\n escaped_filename = mod.so_filename.replace(\"\\\\\", \"\\\\\\\\\") # Needed for Windows paths\n load_module = load_module.format(name=mod.name, so_filename=escaped_filename,\n debug=self.hpy_abi == 'debug')\n else:\n # CPython module\n assert self.hpy_abi == 'cpython'\n load_module = \"import {} as mod;\".format(mod.name)\n if self.verbose:\n print(\"\\n---\\nExecuting in subprocess: {}\".format(load_module + code))\n result = atomic_run([sys.executable, \"-c\", load_module + code], env=env,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if self.verbose:\n print(\"stdout/stderr:\")\n try:\n out = result.stdout.decode('latin-1')\n err = result.stderr.decode('latin-1')\n print(\"----\\n{out}--\\n{err}-----\".format(out=out, err=err))\n except UnicodeDecodeError:\n print(\"Warning: stdout or stderr could not be decoded with 'latin-1' encoding\")\n return result",
"def load_module():\n proc = subprocess.Popen([\"pactl\", \"load-module\", \"module-suspend-on-idle\"], stderr=subprocess.PIPE)\n stderr = proc.communicate()[1].decode(\"UTF-8\")\n return stderr",
"def load(self, program):\n\n address = 0\n\n # Load in the program instructions\n for instruction in program:\n if instruction:\n instruction = instruction.split()[0] # Remove the comment\n if instruction[0] != '#':\n self.ram[address] = int(instruction, 2)\n address += 1",
"def reload_module_data():\n build_utils.reload_modules()",
"def import_program_as_module(self, program_filepath):\n module_name = os.path.basename(program_filepath)\n\n module = self.make_module_from_file(module_name, program_filepath)\n sys.modules[module_name] = module\n\n return module",
"def _load(self):\n module = importlib.import_module(self.__name__)\n self._parent_module_globals[self._local_name] = module\n self.__dict__.update(module.__dict__)\n return module"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the packet length. | def _set_packet_len(self, packet_len):
self._packet_len = packet_len | [
"def setPacketLength(self):\n self.packetLength = len(self) - PRIMARY_HEADER_BYTE_SIZE - 1",
"def setlen(self, length):\n self._len = length",
"def set_length(self, length):\n self.length = length",
"def set(self, length):\r\n self.length = length",
"def setLength(self, new_length):\n\n self.length = new_length",
"async def gpt2_set_length(self, ctx, *, arg=None):\n print('Command gpt2_set_length triggered')\n if arg:\n try:\n i = int(arg)\n assert (i > 0) and (i < 1024)\n except ValueError or AssertionError:\n ctx.send(\"ERROR: Argument must be a positive integer number\")\n self.update_config(length=arg)\n else:\n await ctx.send(\"ERROR: Argument required\")",
"def set_length(self, length):\n if length < 0:\n raise AttributeError('length should be positive')\n self.progress_char_length = length",
"def modify_length(self, new_length: float) -> None:\n self.params['l'] = new_length",
"def frame_length(self, frame_length):\n self._frame_length = frame_length",
"def set_length(self, ak_spec: Union[str, BKT], val: float) -> None:\n ...",
"def token_length(self, token_length):\n\n self._token_length = token_length",
"def _set_maskLength(self, v, load=False):\n try:\n t = YANGDynClass(v,base=np.uint8, is_leaf=True, yang_name=\"maskLength\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"maskLength must be of a type compatible with base=np.uint8, is_leaf=True, yang_name=\"maskLength\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__maskLength = t\n if hasattr(self, '_set'):\n self._set()",
"def setTweetLength(self):\n self.tweetLength = len(self.tweet[\"text\"])",
"def setLength(self, length):\n self.vector.norm = length",
"def read_packetlen(self):\n packetlen = int(struct.unpack('!I', b\"\".join(self.__input))[0])\n self.__input = []\n self.set_terminator(packetlen)\n self.found_terminator = self.read_milter_data",
"def barcode_data_len(self, barcode_data_len):\n\n self._barcode_data_len = barcode_data_len",
"def setDataSize(self, head,payload,eop):\n self.dataSize = len(head)+len(payload)+len(eop)",
"def set_length(self, ak_spec: Union[str, BKT], val: float) -> None:\n hed_lst, ak_spec2 = self.pick_length(ak_spec)\n if hed_lst is not None and ak_spec2 is not None:\n for hed in hed_lst:\n hed.set_length(ak_spec2, val)",
"def set_length(self, new_length_cm):\n if new_length_cm < self.mat_list[-1][0]:\n raise Exception(\n \"GeneralizedTarget::set_length(): \"\n + \"can not set length below lower boundary of last \"\n + \"material.\"\n )\n self.len_target = new_length_cm\n self.mat_list[-1][1] = new_length_cm\n self._update_variables()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a XCP Ethernet frame | def create_message(self, packet):
self._header.packet_len = len(bytes(packet))
frame_bytes = super(EthernetTransport, self).create_message(packet)
# Update control counter for next frame
self._header.update_control()
return bytes(frame_bytes) | [
"def _create_frame(self, packets, type):\n fr = bytearray()\n fr += struct.pack('>H', type.value)\n fr += struct.pack('>H', len(packets))\n frameno = self.next_frameno()\n fr += struct.pack('>Q', frameno)\n for pkt in packets:\n fr += struct.pack('>H', len(pkt))\n fr += pkt\n return self.Packet(type, fr, frameno, None)",
"def create_knxipframe(self) -> KNXIPFrame:\n raise NotImplementedError(\"create_knxipframe has to be implemented\")",
"def _makeFrame(buf, _opcode=_CONTROLS.NORMAL):\n bufferLength = len(buf)\n\n if bufferLength > 0xffff:\n length = \"\\x7f%s\" % pack(\">Q\", bufferLength)\n elif bufferLength > 0x7d:\n length = \"\\x7e%s\" % pack(\">H\", bufferLength)\n else:\n length = chr(bufferLength)\n\n # Always make a normal packet.\n header = chr(0x80 | _opcodeForType[_opcode])\n frame = \"%s%s%s\" % (header, length, buf)\n return frame",
"def gen_ieee_packet(self, data):\n\t\tpacket = Dot15d4FCS() / Dot15d4Data() / Raw(load=data)\n\n\t\tpacket.fcf_srcaddrmode = 2\n\t\tpacket.fcf_destaddrmode = 2\n\n\t\tpacket.fcf_panidcompress = True\n\t\tpacket.fcf_ackreq = True\n\t\tpacket.seqnum = self.seqnum\n\n\t\tpacket.dest_panid = self.link_config.dest_panid\n\n\t\tpacket.dest_addr = self.link_config.destination.get_short_address()\n\t\tpacket.src_addr = self.link_config.source.get_short_address()\n\n\t\treturn packet.build()",
"def _create_packet(self, request):\n\n data_len = struct.pack('<Q', len(request))\n packet = b'ZBXD\\x01' + data_len + request\n\n def ord23(x):\n if not isinstance(x, int):\n return ord(x)\n else:\n return x\n\n logger.debug('Packet [str]: %s', packet)\n logger.debug('Packet [hex]: %s', ':'.join(hex(ord23(x))[2:] for x in packet))\n return packet",
"def create_tcp_pkt(smac: bytes, dmac: bytes, sip: bytes, dip: bytes, ip_id: int, sp: int, dp: int,\n flags: int =dpkt.tcp.TH_SYN, payload: bytes = b\"\") -> dpkt.ethernet.Ethernet:\n tcp_pkt = dpkt.tcp.TCP(sport=sp, dport=dp, flags=flags)\n tcp_pkt.data = payload\n ip_pkt = dpkt.ip.IP(id=ip_id, p=6, src=sip, dst=dip)\n ip_pkt.data = tcp_pkt\n ip_pkt.len += len(ip_pkt.data)\n eth_pkt = dpkt.ethernet.Ethernet(src=smac, dst=dmac)\n eth_pkt.data = ip_pkt\n return eth_pkt",
"def build_fake_frame_ies(config: dict) -> Dot11Elt:\n ssid = config.get(\"GENERAL\").get(\"ssid\")\n channel = int(config.get(\"GENERAL\").get(\"channel\"))\n ft_disabled = config.get(\"GENERAL\").get(\"ft_disabled\")\n he_disabled = config.get(\"GENERAL\").get(\"he_disabled\")\n\n ssid = bytes(ssid, \"utf-8\")\n essid = Dot11Elt(ID=\"SSID\", info=ssid)\n\n rates_data = [140, 18, 152, 36, 176, 72, 96, 108]\n rates = Dot11Elt(ID=\"Rates\", info=bytes(rates_data))\n\n channel = bytes([channel])\n dsset = Dot11Elt(ID=\"DSset\", info=channel)\n\n dtim_data = b\"\\x05\\x04\\x00\\x03\\x00\\x00\"\n dtim = Dot11Elt(ID=\"TIM\", info=dtim_data)\n\n ht_cap_data = b\"\\xef\\x19\\x1b\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x20\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n ht_capabilities = Dot11Elt(ID=0x2D, info=ht_cap_data)\n\n if ft_disabled:\n rsn_data = b\"\\x01\\x00\\x00\\x0f\\xac\\x04\\x01\\x00\\x00\\x0f\\xac\\x04\\x01\\x00\\x00\\x0f\\xac\\x02\\x80\\x00\"\n else:\n mobility_domain_data = b\"\\x45\\xc2\\x00\"\n mobility_domain = Dot11Elt(ID=0x36, info=mobility_domain_data)\n rsn_data = b\"\\x01\\x00\\x00\\x0f\\xac\\x04\\x01\\x00\\x00\\x0f\\xac\\x04\\x02\\x00\\x00\\x0f\\xac\\x02\\x00\\x0f\\xac\\x04\\x8c\\x00\"\n\n rsn = Dot11Elt(ID=0x30, info=rsn_data)\n\n ht_info_data = (\n channel\n + b\"\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n )\n ht_information = Dot11Elt(ID=0x3D, info=ht_info_data)\n\n rm_enabled_data = b\"\\x02\\x00\\x00\\x00\\x00\"\n rm_enabled_cap = Dot11Elt(ID=0x46, info=rm_enabled_data)\n\n extended_data = b\"\\x00\\x00\\x08\\x00\\x00\\x00\\x00\\x40\"\n extended = Dot11Elt(ID=0x7F, info=extended_data)\n\n vht_cap_data = b\"\\x32\\x00\\x80\\x03\\xaa\\xff\\x00\\x00\\xaa\\xff\\x00\\x00\"\n vht_capabilities = Dot11Elt(ID=0xBF, info=vht_cap_data)\n\n vht_op_data = b\"\\x00\\x24\\x00\\x00\\x00\"\n vht_operation = Dot11Elt(ID=0xC0, info=vht_op_data)\n\n wmm_data = b\"\\x00\\x50\\xf2\\x02\\x01\\x01\\x8a\\x00\\x03\\xa4\\x00\\x00\\x27\\xa4\\x00\\x00\\x42\\x43\\x5e\\x00\\x62\\x32\\x2f\\x00\"\n wmm = Dot11Elt(ID=0xDD, info=wmm_data)\n\n he_cap_data = b\"\\x23\\x0d\\x01\\x00\\x02\\x40\\x00\\x04\\x70\\x0c\\x89\\x7f\\x03\\x80\\x04\\x00\\x00\\x00\\xaa\\xaa\\xaa\\xaa\\x7b\\x1c\\xc7\\x71\\x1c\\xc7\\x71\\x1c\\xc7\\x71\\x1c\\xc7\\x71\"\n he_capabilities = Dot11Elt(ID=0xFF, info=he_cap_data)\n\n he_op_data = b\"\\x24\\xf4\\x3f\\x00\\x19\\xfc\\xff\"\n he_operation = Dot11Elt(ID=0xFF, info=he_op_data)\n\n spatial_reuse_data = b\"\\x27\\x05\\x00\"\n spatial_reuse = Dot11Elt(ID=0xFF, info=spatial_reuse_data)\n\n mu_edca_data = b\"\\x26\\x09\\x03\\xa4\\x28\\x27\\xa4\\x28\\x42\\x73\\x28\\x62\\x72\\x28\"\n mu_edca = Dot11Elt(ID=0xFF, info=mu_edca_data)\n\n six_ghz_cap_data = b\"\\x3b\\x00\\x00\"\n six_ghz_cap = Dot11Elt(ID=0xFF, info=six_ghz_cap_data)\n\n # reduced_neighbor_report_data = b\"\\x02\"\n # reduced_neighbor_report = Dot11Elt(ID=0xFF, info=reduced_neighbor_report_data)\n\n # custom_hash = {\"pver\": f\"{__version__}\", \"sver\": get_wlanpi_version()}\n # custom_data = bytes(f\"{custom_hash}\", \"utf-8\")\n # custom = Dot11Elt(ID=0xDE, info=custom_data)\n\n if ft_disabled:\n frame = (\n essid\n / rates\n / dsset\n / dtim\n / ht_capabilities\n / rsn\n / ht_information\n / rm_enabled_cap\n / extended\n / vht_capabilities\n / vht_operation\n )\n else:\n frame = (\n essid\n / rates\n / dsset\n / dtim\n / ht_capabilities\n / rsn\n / ht_information\n / mobility_domain\n / rm_enabled_cap\n / extended\n / vht_capabilities\n / vht_operation\n )\n if he_disabled:\n frame = frame / wmm\n else:\n frame = (\n frame\n # / reduced_neighbor_report\n / he_capabilities\n / he_operation\n / spatial_reuse\n / mu_edca\n / six_ghz_cap\n / wmm\n # / custom\n )\n\n # for gathering data to validate tests:\n #\n # frame_bytes = bytes(frame)\n # print(frame_bytes)\n return frame",
"def define_ethernet_header(self, src=None, dst=None, typeeth=None, tag=None):\n ether_header = Ether()\n if (dst == None):\n ether_header.dst = BCAST_MAC\n else:\n ether_header.dst = dst\n ether_header.src = src\n return ether_header",
"def ethernet_frame(packet):\n dest_mac, src_mac, proto = struct.unpack('! 6s 6s H', packet[:14])\n return get_mac_addr(dest_mac), get_mac_addr(src_mac), socket.htons(proto), packet[14:]",
"def create_packet(id):\n\t# Builds Dummy Header\n\t# Header is type (8), code (8), checksum (16), id (16), sequence (16)\n\theader = pack(\"bbHHh\", ICMP_ECHO_REQUEST, 0, 0, id, 1)\n\tdata = 192 * \"Q\"\n\n\t# Builds Real Header\n\theader = pack(\"bbHHh\", ICMP_ECHO_REQUEST, 0, htons(checksum(header + data)), id, 1)\n\treturn header + data",
"def make_packet(message, host):\n\tRESOURCE = \"/\"\t\t\t\t# dummy resource\n\t\n\t# First line is the request\n\trequest = HTTPConstants.GET_REQUEST + \" \" + RESOURCE + \" \" + HTTPConstants.VERSION + HTTPConstants.CRLF\n\t\n\t# Next are the headers\n\theaders = \"Host: {0}\".format(host) + HTTPConstants.CRLF\n\t\n\t# Construct the head\n\thead = request + headers\n\t\n\t# Construct the body\n\tbody = message + HTTPConstants.CRLF\n\t\n\t# Assembly into a packet, where the head and body (message) are separated by a blank line (CRLF), and the EOM is\n\t# denoted by a blank line\n\treturn head + HTTPConstants.CRLF + body + HTTPConstants.CRLF",
"def generate_packet(src_ip, dst_ip, dst_mac):\n if ipaddress.ip_network(src_ip.encode().decode(), False).version == 4:\n pkt = testutils.simple_ip_packet(eth_dst=dst_mac, ip_src=src_ip, ip_dst=dst_ip)\n exp_pkt = Mask(pkt)\n exp_pkt.set_do_not_care_scapy(scapy.IP, \"ttl\")\n exp_pkt.set_do_not_care_scapy(scapy.IP, \"chksum\")\n else:\n pkt = testutils.simple_tcpv6_packet(eth_dst=dst_mac, ipv6_src=src_ip, ipv6_dst=dst_ip)\n exp_pkt = Mask(pkt)\n exp_pkt.set_do_not_care_scapy(scapy.IPv6, \"hlim\")\n\n exp_pkt.set_do_not_care_scapy(scapy.Ether, \"dst\")\n exp_pkt.set_do_not_care_scapy(scapy.Ether, \"src\")\n\n return pkt, exp_pkt",
"def create_frame(data, opcode):\r\n if opcode == ABNF.OPCODE_TEXT and isinstance(data, unicode):\r\n data = data.encode(\"utf-8\")\r\n # mask must be set if send data from client\r\n return ABNF(1, 0, 0, 0, opcode, 1, data)",
"def init_from_body(knxip_body: KNXIPBody):\n knxipframe = KNXIPFrame(knxip_body.xknx)\n knxipframe.header.service_type_ident = knxip_body.__class__.service_type\n knxipframe.body = knxip_body\n knxipframe.normalize()\n return knxipframe",
"def fusion_api_create_ethernet_network(self, body, api=None, headers=None):\n return self.ethernet_network.create(body, api, headers)",
"def createPacket(packetSize):\n data = ''\n for i in range(packetSize - 1):\n data = data + 'A'\n return bytearray(data + 'B', 'utf-8')",
"def create_packet_definition(packet_to_send):\n source_mac = \"00:00:00:00:00:01\"\n destination_mac = \"00:00:00:00:00:02\"\n source_ip = \"10.10.10.1\"\n destination_ip = \"10.10.10.2\"\n source_ip6 = 'fe80::214:f2ff:fe07:af0'\n destination_ip6 = 'ff02::1'\n sport = 1\n dport = 2\n tos = 4\n if packet_to_send[\"type\"] == \"ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"TCP\": {}})\n elif packet_to_send[\"type\"] == \"tagged_ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"vlan\"],\n \"prio\": packet_to_send[\"priority\"]}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}})\n elif packet_to_send[\"type\"] == \"tcp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"TCP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"udp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"UDP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"double_tagged_ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"outer_vlan\"], \"type\": 0x8100,\n \"prio\": packet_to_send[\"outer_priority\"]}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"inner_vlan\"], \"type\": 0x0800,\n \"prio\": packet_to_send[\"inner_priority\"]}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}})\n elif packet_to_send[\"type\"] == \"arp\":\n packet_definition = (\n {\"Ether\": {\"src\": source_mac, \"dst\": 'FF:FF:FF:FF:FF:FF', \"type\": 0x0806}},\n {\"ARP\": {\"op\": 1, \"hwsrc\": source_mac,\n \"psrc\": source_ip, \"pdst\": destination_ip}},)\n elif packet_to_send[\"type\"] == \"arp_reply_tagged\":\n packet_definition = ({\"Ether\": {\"src\": source_mac, \"dst\": destination_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": 2}},\n {\"ARP\": {\"op\": 2, \"hwsrc\": source_mac, \"hwdst\": destination_mac,\n \"pdst\": destination_ip, \"psrc\": source_ip}}, )\n elif packet_to_send[\"type\"] == \"icmp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"proto\": 1}},\n {\"ICMP\": {\"type\": 8, \"code\": 0}})\n elif packet_to_send[\"type\"] == \"ipv6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"plen\": 64, \"tc\": 225}})\n elif packet_to_send[\"type\"] == \"tcp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 6}},\n {\"TCP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"udp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 17}},\n {\"UDP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"icmp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 1}},\n {\"ICMP\": {\"type\": 8, \"code\": 0}})\n return packet_definition",
"def testFramepack2(self):\n # Check bad frame generation:\n frame = stomper.Frame()\n frame.cmd = 'DISCONNECT'\n result = frame.pack()\n correct = 'DISCONNECT\\n\\n\\x00\\n'\n self.assertEqual(result, correct)",
"def _send_knxipframe(self, knxipframe: KNXIPFrame) -> None:\n self.transport.send(knxipframe)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the pickup_features feature group. To restrict features to a time range, pass in ts_column, start_date, and/or end_date as kwargs. | def pickup_features_fn(df, ts_column, start_date, end_date):
df = filter_df_by_ts(
df, ts_column, start_date, end_date
)
pickupzip_features = (
df.groupBy(
"pickup_zip", window("tpep_pickup_datetime", "1 hour", "15 minutes")
) # 1 hour window, sliding every 15 minutes
.agg(
mean("fare_amount").alias("mean_fare_window_1h_pickup_zip"),
count("*").alias("count_trips_window_1h_pickup_zip"),
)
.select(
col("pickup_zip").alias("zip"),
unix_timestamp(col("window.end")).alias("ts").cast(IntegerType()),
partition_id(to_timestamp(col("window.end"))).alias("yyyy_mm"),
col("mean_fare_window_1h_pickup_zip").cast(FloatType()),
col("count_trips_window_1h_pickup_zip").cast(IntegerType()),
)
)
return pickupzip_features | [
"def dropoff_features_fn(df, ts_column, start_date, end_date):\n df = filter_df_by_ts(\n df, ts_column, start_date, end_date\n )\n dropoffzip_features = (\n df.groupBy(\"dropoff_zip\", window(\"tpep_dropoff_datetime\", \"30 minute\"))\n .agg(count(\"*\").alias(\"count_trips_window_30m_dropoff_zip\"))\n .select(\n col(\"dropoff_zip\").alias(\"zip\"),\n unix_timestamp(col(\"window.end\")).alias(\"ts\").cast(IntegerType()),\n partition_id(to_timestamp(col(\"window.end\"))).alias(\"yyyy_mm\"),\n col(\"count_trips_window_30m_dropoff_zip\").cast(IntegerType()),\n is_weekend(col(\"window.end\")).alias(\"dropoff_is_weekend\"),\n )\n )\n return dropoffzip_features",
"def get_date_features(gt_ids=[], gt_masks=None, gt_shifts=None, first_year=None):\n # If particular arguments aren't lists, replace with repeating iterators\n if not isinstance(gt_masks, list):\n gt_masks = itertools.repeat(gt_masks)\n if not isinstance(gt_shifts, list):\n gt_shifts = itertools.repeat(gt_shifts)\n\n # Add each ground truth feature to dataframe\n df = None\n for gt_id, gt_mask, gt_shift in zip(gt_ids, gt_masks, gt_shifts):\n print \"Getting {}_shift{}\".format(gt_id, gt_shift)\n t = time.time()\n # Load ground truth data\n gt = get_ground_truth(gt_id, gt_mask, gt_shift)\n # Discard years prior to first_year\n gt = year_slice(gt, first_year = first_year)\n # If lat, lon columns exist, pivot to wide format\n if 'lat' in gt.columns and 'lon' in gt.columns:\n if gt_shift == None:\n measurement_variable = get_measurement_variable(gt_id)\n else:\n measurement_variable = get_measurement_variable(gt_id)+'_shift'+str(gt_shift)\n gt = pd.pivot_table(gt, values=measurement_variable, index='start_date',\n columns=['lat', 'lon']).reset_index()\n gt = pd.DataFrame(gt.to_records())\n gt.drop(\"index\", axis=1, inplace=True)\n # Rename columns to start_date and precip_(27.0,261.0), etc.\n gt.rename(columns={gt.columns[0]: 'start_date'}, inplace=True)\n gt.rename(columns=lambda x: x.replace('(',\n measurement_variable +\n '_('), inplace=True)\n # Use outer merge to include union of start_date values across all features\n # combinations across all features\n df = df_merge(df, gt, on=\"start_date\")\n print \"Elapsed: {}s\".format(time.time() - t)\n\n return df",
"def build_shape_data(self, start=None, end=None):\n # If start and end are None, then set them to be min/max of self.df_demand\n if start is None:\n start = self.df_demand['date'].min()\n if end is None:\n end = self.df_demand['date'].max()\n print(f\"date range for shape data is from {start} to {end}\")\n # Extract part of df_demand that is within start and end\n df_sub = self.df_demand[(self.df_demand['date'] >= start) & (self.df_demand['date'] <= end)]\n assert df_sub['date'].min() >= start\n assert df_sub['date'].max() <= end\n num_days = len(pd.date_range(iso8601.parse_date(start), iso8601.parse_date(end), freq='d'))\n print(f\"number of days is {num_days}\")\n # When finding variance and mean, add in missing days as 0s\n # Obtain the counts for each lat/lng region\n counts = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat']).size().reset_index(name='counts')\n # Group demand data by lat/lng region and average across other cols\n df = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat'])[['avail_count', 'avail_mins', 'trips', 'prob_scooter_avail', 'adj_trips']].mean().reset_index()\n df = df.merge(counts, on=['left_lng', 'right_lng', 'lower_lat', 'upper_lat'])\n # print(df.head())\n # Modify averages by multiplying each by count and divide by num_days\n vars = ['avail_count', 'avail_mins', 'trips', 'prob_scooter_avail', 'adj_trips']\n for var in vars:\n df[var] = df[var]*df['counts']/num_days\n # print(df.head())\n # Calculate the variance for prob_scooter_avail\n probVariance = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat']).apply(lambda x: ((x['prob_scooter_avail'] - (x['prob_scooter_avail'].sum()/num_days))**2).sum()/(num_days-1)).reset_index(name='prob_scooter_avail')\n # print(probVariance.head())\n df['prob_scooter_avail_var'] = probVariance['prob_scooter_avail']\n # Check to see if there are any Nan values\n print(f\"Nan values in df? {df.isnull().values.any()}\")\n # print(df.head())\n # For each var col, create corresponding color columns (log and unlog)\n # Also create the factors list that get passed into self.create_rectangle_lst\n factors = [('avail_count', 'decimal'), ('avail_mins', 'decimal'),\n ('trips', 'decimal'), ('prob_scooter_avail', 'percent'), ('adj_trips', 'decimal')]\n i = 0\n original_len = len(factors)\n while i < original_len:\n name, type = factors[i]\n # print(f\"name={name}, type={type}\")\n # Create color column\n df = self.map_values_to_color(df, name)\n # If type is not percent than create log version\n if type != 'percent':\n df = self.create_log_column(df, name)\n factors.append(('log_'+name, type))\n i += 1\n # Deal with estimated demand and unmet demand\n # Filter out rows where prob_scooter_avail sig diff from 0\n sigDiffIdx = df.apply(lambda x: utils.sig_diff_from_zero(x['prob_scooter_avail'], x['prob_scooter_avail_var']), axis=1)\n # print(sigDiffIdx.head())\n df_sig_diff = df[sigDiffIdx]\n # Calculate estimated demand and unmet demand\n df_sig_diff = self.calculate_demand(df_sig_diff)\n # print(df_sig_diff.head())\n # Create color column and log column for unmet demand\n df_sig_diff = self.map_values_to_color(df_sig_diff, 'unmet_demand')\n df_sig_diff = self.map_values_to_color(df_sig_diff, 'estimated_demand')\n df_sig_diff = self.create_log_column(df_sig_diff, 'unmet_demand')\n factors.extend([('estimated_demand', 'decimal'), ('unmet_demand', 'decimal'), ('log_unmet_demand', 'decimal')])\n # Fill in the colors for the grid cells that aren't significantly different\n df_not_sig_diff = df[~sigDiffIdx]\n # print(df_not_sig_diff.head())\n df = pd.concat([df_sig_diff, df_not_sig_diff])\n # df.to_csv('../../../data_files/20210427_estimatedDemand.csv', index=False)\n # Create Rectangle information\n rectangles = self.create_rectangle_lst(df, factors)\n return rectangles, start, end",
"def create_date_features(df = None, date = None):\n #TODO",
"def __feature_set__(self):\r\n import numpy as np\r\n import datetime\r\n import time\r\n cols_norm = [col for col in self.columns]\r\n cols_lower = [col.lower() for col in self.columns]\r\n fields = []\r\n features = []\r\n date_fields = []\r\n _geom_types = {\r\n arcgis.geometry._types.Point : \"esriGeometryPoint\",\r\n arcgis.geometry._types.Polyline : \"esriGeometryPolyline\",\r\n arcgis.geometry._types.MultiPoint : \"esriGeometryMultipoint\",\r\n arcgis.geometry._types.Polygon : \"esriGeometryPolygon\"\r\n }\r\n if self.sr is None:\r\n sr = {'wkid' : 4326}\r\n else:\r\n sr = self.sr\r\n fs = {\r\n \"objectIdFieldName\" : \"\",\r\n \"globalIdFieldName\" : \"\",\r\n \"displayFieldName\" : \"\",\r\n \"geometryType\" : _geom_types[type(self.geometry[self.geometry.first_valid_index()])],\r\n \"spatialReference\" : sr,\r\n \"fields\" : [],\r\n \"features\" : []\r\n }\r\n if 'objectid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('objectid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('objectid')]\r\n elif 'fid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('fid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('fid')]\r\n elif 'oid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('oid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('oid')]\r\n else:\r\n self['OBJECTID'] = list(range(1, self.shape[0] + 1))\r\n res = self.__feature_set__\r\n del self['OBJECTID']\r\n return res\r\n if 'objectIdFieldName' in fs:\r\n fields.append({\r\n \"name\" : fs['objectIdFieldName'],\r\n \"type\" : \"esriFieldTypeOID\",\r\n \"alias\" : fs['objectIdFieldName']\r\n })\r\n cols_norm.pop(cols_norm.index(fs['objectIdFieldName']))\r\n if 'globalIdFieldName' in fs and len(fs['globalIdFieldName']) > 0:\r\n fields.append({\r\n \"name\" : fs['globalIdFieldName'],\r\n \"type\" : \"esriFieldTypeGlobalID\",\r\n \"alias\" : fs['globalIdFieldName']\r\n })\r\n cols_norm.pop(cols_norm.index(fs['globalIdFieldName']))\r\n elif 'globalIdFieldName' in fs and \\\r\n len(fs['globalIdFieldName']) == 0:\r\n del fs['globalIdFieldName']\r\n if self._geometry_column_name in cols_norm:\r\n cols_norm.pop(cols_norm.index(self._geometry_column_name))\r\n for col in cols_norm:\r\n try:\r\n idx = self[col].first_valid_index()\r\n col_val = self[col].loc[idx]\r\n except:\r\n col_val = \"\"\r\n if isinstance(col_val, (str, np.str)):\r\n l = self[col].str.len().max()\r\n if str(l) == 'nan':\r\n l = 255\r\n\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeString\",\r\n \"length\" : int(l),\r\n \"alias\" : col\r\n })\r\n if fs['displayFieldName'] == \"\":\r\n fs['displayFieldName'] = col\r\n elif isinstance(col_val, (datetime.datetime,\r\n pd.Timestamp,\r\n np.datetime64,\r\n pd.datetime)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeDate\",\r\n \"alias\" : col\r\n })\r\n date_fields.append(col)\r\n elif isinstance(col_val, (np.int32, np.int16, np.int8)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeSmallInteger\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (int, np.int, np.int64)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeInteger\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (float, np.float64)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeDouble\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (np.float32)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeSingle\",\r\n \"alias\" : col\r\n })\r\n fs['fields'] = fields\r\n for row in self.to_dict('records'):\r\n geom = {}\r\n if self._geometry_column_name in row:\r\n geom = row[self._geometry_column_name]\r\n del row[self._geometry_column_name]\r\n for f in date_fields:\r\n try:\r\n row[f] = int(row[f].to_pydatetime().timestamp() * 1000)\r\n except:\r\n row[f] = None\r\n features.append(\r\n {\r\n \"geometry\" : dict(geom),\r\n \"attributes\" : row\r\n }\r\n )\r\n del row\r\n del geom\r\n fs['features'] = features\r\n return fs",
"def engineer_features(df, df_full):\n # length of trip\n df['trip_length'] = (df['srch_co'] - df['srch_ci'])\\\n .astype('timedelta64[D]')\n df['trip_length'].fillna((df['trip_length'].median()), inplace=True)\n\n # is the trip in the same country\n df['domestic'] = np.where((df['user_location_country']\n .equals(df['hotel_country'])), 1, 0)\n\n # is it trip length smaller than 3 days\n df['short_trip'] = np.where((df['trip_length'] <= 3), 1, 0)\n\n # is it a weekend trip\n df['srch_ci_d'] = df['srch_ci'].dt.day_name()\n df['srch_co_d'] = df['srch_co'].dt.day_name()\n df['weekend_trip'] = np.where((((df['srch_ci_d'] == 'Friday') &\n (df['trip_length'] <= 3)) |\n ((df['srch_ci_d'] == 'Saturday') &\n (df['trip_length'] <= 2))), 1, 0)\n\n # is it a business trip\n df['business_trip'] = np.where(((df['srch_ci_d'] != 'Friday') &\n (df['srch_ci_d'] != 'Saturday') &\n (df['srch_ci_d'] != 'Sunday') &\n (df['srch_co_d'] != 'Saturday') &\n (df['srch_co_d'] != 'Sunday') &\n (df['trip_length'] <= 4)), 1, 0)\n df.drop(columns=['srch_ci_d', 'srch_co_d'], inplace=True)\n\n # plan time - how far ahead do we plan the trip\n df['plan_time'] = (df['srch_ci'] - df['date_time'])\\\n .astype('timedelta64[D]')\n\n # is it a solo trip / family trip\n df['solo_trip'] = np.where(((df['srch_adults_cnt'] == 1) &\n (df['srch_children_cnt'] == 0)), 1, 0)\n\n # aggregate a mean booking rate\n def aggregated_booking_rate(instance):\n if instance['is_booking'] == 0:\n return np.nan\n instance_date = instance['date_time']\n instance_id = instance['user_id']\n mean_booking_rate = df_full[(df_full['date_time'] <= instance_date)\n & (df_full['user_id'] == instance_id)\n ]['is_booking'].mean()\n return mean_booking_rate\n\n df['booking_rate'] = df.apply(aggregated_booking_rate, axis=1)\n\n # aggregate previous bookings & clicks by hotel cluster\n def aggregated_previous_cluster(instance, hotel_cluster):\n if instance['is_booking'] == 0:\n return np.nan, np.nan\n instance_date = instance['date_time']\n instance_id = instance['user_id']\n cnt_b = len(df_full[(df_full['date_time'] <= instance_date) &\n (df_full['user_id'] == instance_id) &\n (df_full['hotel_cluster'] == hotel_cluster) &\n (df_full['is_booking'] == 1)])\n cnt_nob = len(df_full[(df_full['date_time'] <= instance_date) &\n (df_full['user_id'] == instance_id) &\n (df_full['hotel_cluster'] == hotel_cluster) &\n (df_full['is_booking'] == 0)])\n return cnt_b, cnt_nob\n\n for hotel_cluster in df_full['hotel_cluster'].unique():\n if np.isnan(hotel_cluster): # test set does not have cluster given\n continue\n df['booked_cluster' + str(int(hotel_cluster))], \\\n df['not_booked_cluster' + str(int(hotel_cluster))] = zip(\n *df.apply(lambda instance:\n aggregated_previous_cluster(instance, hotel_cluster),\n axis=1))\n\n return df",
"def map_fires_to_foehn(df_fires, df_foehn):\n\n # Loop over all fires\n rows_list = []\n for index, fire in df_fires.iterrows():\n new_features_dict = {}\n\n # Get start and end index in foehn dataframe for given forest fire\n n_start = (df_foehn[\"date\"] <= fire[\"start_date_min\"]).idxmin()\n # n_end = (df_foehn[\"date\"] < fire[\"end_date_max\"]).idxmin()\n\n # Foehn minutes before forest fire\n new_features_dict.update(sum_foehn_minutes_before_fire(fire, df_foehn, n_start, hours_before_start=24))\n new_features_dict.update(sum_foehn_minutes_before_fire(fire, df_foehn, n_start, hours_before_start=48))\n\n # Foehn minutes during starting hours of the fire\n new_features_dict.update(sum_foehn_minutes_during_start_period_of_fire(fire, df_foehn, n_start, hours_after_start=2))\n new_features_dict.update(sum_foehn_minutes_during_start_period_of_fire(fire, df_foehn, n_start, hours_after_start=6))\n new_features_dict.update(sum_foehn_minutes_during_start_period_of_fire(fire, df_foehn, n_start, hours_after_start=12))\n\n rows_list.append(new_features_dict)\n\n logging.info(f\"{len(df_fires.index)} fires in dataset after adding foehn features\")\n return pd.concat([df_fires, pd.DataFrame(rows_list)], axis=1)",
"def create_time_req_features(df_casted):\n time_df = (df_casted\n .withColumn('prev_timestamp', lag(df_casted['timestamp']).over(Window.partitionBy(\"ip\").orderBy(\"timestamp\")))\n .withColumn('time_diff', analysis.get_time_delta(col(\"prev_timestamp\"), col(\"timestamp\")))\n .withColumn('new_session', when((col(\"time_diff\") > 900), 1).otherwise(0))\n .withColumn('count_session', sf.sum(col('new_session')).over(Window.partitionBy(\"ip\").orderBy(\"timestamp\")))\n .withColumn('day', sf.dayofyear(col('timestamp')))\n .withColumn('hour', sf.hour(col('timestamp')))\n .withColumn('minute', sf.minute(col('timestamp')))\n .withColumn('status_type', when((col(\"elb_status_code\") == 200), 1).otherwise(0)))\n\n return time_df",
"def featuresFn(self):\n d = {'endGroupSize': sum(self.endSizes),\n 'maxEndSize': max(self.endSizes),\n 'numEnds': len(self.endSizes)}\n d.update(self.flowerFeatures())\n return d",
"def _generate_features(data, features, home_loc):\n feature_func = {'gyration_radius': gyration_radius,\n 'num_trips': num_trips,\n 'num_clusters': num_clusters,\n 'max_dist_between_clusters': max_dist_between_clusters,\n 'num_clusters': num_clusters,\n 'displacements': displacements,\n 'wait_time': wait_time,\n 'entropy': entropy,\n 'loc_var': loc_var,\n 'home_stay': home_stay,\n # 'trans_time': trans_time,\n 'total_dist': total_dist}\n\n # a dictionary to store features\n D = {}\n\n # if this a list of dataframe,\n # that is, if this is daily/weekly data\n if type(data) is list:\n # generate features for each day or week\n for date, curr_data in data:\n row = {}\n if len(curr_data) == 0:\n for f in features:\n row[f] = np.nan\n else:\n for f in features:\n args = features[f]\n\n if f == 'home_stay':\n args['home_loc'] = home_loc\n\n if args is not None:\n f_value = feature_func[f](curr_data, **args)\n else:\n f_value = feature_func[f](curr_data)\n row[f] = f_value\n\n D[date] = row\n # if data is a dataframe\n else:\n for f in features:\n args = features[f]\n\n if f == 'home_stay':\n args['home_loc'] = home_loc\n\n if args is not None:\n f_value = feature_func[f](data, **args)\n else:\n f_value = feature_func[f](data)\n\n D[f] = f_value\n\n return D",
"def generate_features(df):\n return np.array([np.array(xi) for xi in pd.to_datetime(df).apply(lambda x: [x.year, x.month, x.day, x.hour, x.minute, x.second, x.weekday()])])",
"def featuretest(self, args):\n db_engine = create_engine(self.root.db_url)\n feature_config = yaml.load(args.feature_config_file)\n\n FeatureGenerator(db_engine, 'features_test').create_features_before_imputation(\n feature_aggregation_config=feature_config,\n feature_dates=[args.as_of_date]\n )\n logging.info('Features created for feature_config %s and date %s', feature_config, args.as_of_date)",
"def minimum_component(self , mask_period=[], p=1, fcut=None, Q=None , in_place=False , verbose=True ):\n###############################################################################\n \n # import\n import numpy as np\n \n # handle masked periods\n\n ###############################################################################\n def __ensure_list_of_list(ll):\n ###############################################################################\n \"\"\"\n Ensures ll is a list of lists\n \n [a,b] returns [[a,b]], and [[a,b]] returns [[a,b]]\n \"\"\"\n \n # check this is a list\n \n if not isinstance(ll,list):\n raise TypeError('!!! __ensure_list_of_list requires a list or a list of list as argument: ',ll)\n \n if ll == []:\n return [[]]\n \n # case simple list\n if not isinstance(ll[0],list):\n return([ll])\n # case list of list\n else:\n return(ll)\n ###############################################################################\n \n # ensure lperiod is a list of lists\n lperiod = __ensure_list_of_list( mask_period )\n \n # ma = feature mask \n feature_mask = np.copy( self.data[:,0] )\n feature_mask[:] = False\n\n np_index=np.array([],dtype=int)\n \n # check lperiod is not [[]]\n if lperiod != [[]]:\n # masked periods\n for period in lperiod:\n \n # make actual extraction - case data_xyz now handled\n start_date_period= period[0]\n end_date_period = period[1]\n \n np_idx = np.where((self.data[:,0]>=start_date_period) & (self.data[:,0] <=end_date_period ))\n \n np_index = np.append( np_index , np_idx )\n\n feature_mask[ np_index ] = True\n\n ### copy\n new_gts=self.copy( data_xyz=None )\n\n\n ### run filter \n new_gts.data[:,1] = __min_component_filter(self.data[:,0],self.data[:,1], feature_mask , \\\n p=p, \\\n fcut=fcut , \\\n Q=Q)\n \n \n new_gts.data[:,2] = __min_component_filter(self.data[:,0],self.data[:,2], feature_mask , \\\n p=p, \\\n fcut=fcut , \\\n Q=Q)\n\n new_gts.data[:,3] = __min_component_filter(self.data[:,0],self.data[:,3], feature_mask , \\\n p=p, \\\n fcut=fcut , \\\n Q=Q)\n\n\n # restore original values for masked periods\n\n new_gts.data[np_index,1] = np.copy( self.data[np_index,1] )\n new_gts.data[np_index,2] = np.copy( self.data[np_index,2] )\n new_gts.data[np_index,3] = np.copy( self.data[np_index,3] )\n \n\n ### return\n if in_place:\n self = new_gts\n return self\n else:\n return new_gts",
"def from_features(cls, features, crs=..., columns=...): # -> GeoDataFrame:\n ...",
"def _filter_temporal(self, start_date: str, end_date: str) -> 'ImageCollection':\n process_id = 'filter_daterange'\n args = {\n 'imagery': self.graph,\n 'extent': [start_date, end_date]\n }\n\n return self.graph_add_process(process_id, args)",
"def extract_features(self):\n if not os.path.isfile(self.feature_filename):\n self.limit_order_df = pd.read_excel(self.limit_order_filename)\n # index starting from the valid level\n self.delimiter_indices = self.get_delimiter_indices()\n print(\"len delimiter_indices\", len(self.delimiter_indices))\n # index at the end of every interval\n self.time_interval_indices = (np.array(self.get_time_interval_indices()) - 1).tolist() \n basic_set, timestamps, mid_prices = self.extract_basic_set()\n time_insensitive_set = self.extract_time_insensitive_set(basic_set)\n labels = self.get_mid_price_labels(mid_prices)\n self.save_feature_json(self.feature_filename, timestamps, basic_set,\n time_insensitive_set, labels, mid_prices)\n df = pd.read_json(self.feature_filename, orient=\"records\", lines=\"True\")\n timestamps = df[\"timestamps\"].tolist()\n basic_set = df[\"basic_set\"].tolist()\n time_insensitive_set = df[\"time_insensitive_set\"].tolist()\n labels = df[\"labels\"].tolist()\n return np.array(timestamps), np.array(basic_set), \\\n np.array(time_insensitive_set), np.array(labels)",
"def features_from_points(self):\n\n filtered_data = self.load_filtered_data()\n logger.info(f'extracting features from {len(filtered_data)} animals')\n \n pbar = tqdm(total=len(filtered_data))\n feats = {}\n for strain, fdata in filtered_data.items():\n feats[strain] = Parallel(n_jobs=psutil.cpu_count(logical=False))(delayed(extract_comb_feats)(data, self.fps) for data in fdata)\n feats[strain] = [aggregate_features(f, self.stride_window) for f in feats[strain]]\n pbar.update(1)\n\n logger.info(f'extracted {len(feats)} datasets of {feats[list(feats.keys())[0]][0].shape[1]}D features')\n logger.info(f'collected features into bins of {1000 * self.stride_window // self.fps} ms')\n\n with open(self.output_dir + '/' + self.run_id + '_features.sav', 'wb') as f:\n joblib.dump(feats, f)",
"def dataset_extract_features_from_date(dataset,date_feature): \n dataset['dayofmonth'] = dataset[date_feature].dt.day\n dataset['dayofyear'] = dataset[date_feature].dt.dayofyear \n dataset['dayofweek'] = dataset[date_feature].dt.dayofweek\n dataset['month'] = dataset[date_feature].dt.month\n dataset['year'] = dataset[date_feature].dt.year\n dataset['weekofyear'] = dataset[date_feature].dt.weekofyear\n dataset['is_month_start'] = (dataset[date_feature].dt.is_month_start).astype(int)\n dataset['is_month_end'] = (dataset[date_feature].dt.is_month_end).astype(int)\n return dataset",
"def add_datepart(\n cls,\n df: pd.DataFrame,\n field_name: str,\n frequency: str,\n prefix: str = None,\n drop: bool = True,\n ) -> Tuple[pd.DataFrame, List[str]]:\n field = df[field_name]\n prefix = (re.sub(\"[Dd]ate$\", \"\", field_name) if prefix is None else prefix) + \"_\"\n attr = cls.time_features_from_frequency_str(frequency)\n added_features = []\n for n in attr:\n if n == \"Week\":\n continue\n df[prefix + n] = getattr(field.dt, n.lower())\n added_features.append(prefix + n)\n # Pandas removed `dt.week` in v1.1.10\n if \"Week\" in attr:\n week = field.dt.isocalendar().week if hasattr(field.dt, \"isocalendar\") else field.dt.week\n df.insert(3, prefix + \"Week\", week)\n added_features.append(prefix + \"Week\")\n # TODO Not adding Elapsed by default. Need to route it through config\n # mask = ~field.isna()\n # df[prefix + \"Elapsed\"] = np.where(\n # mask, field.values.astype(np.int64) // 10 ** 9, None\n # )\n # added_features.append(prefix + \"Elapsed\")\n if drop:\n df.drop(field_name, axis=1, inplace=True)\n\n # Removing features woth zero variations\n # for col in added_features:\n # if len(df[col].unique()) == 1:\n # df.drop(columns=col, inplace=True)\n # added_features.remove(col)\n return df, added_features"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the dropoff_features feature group. To restrict features to a time range, pass in ts_column, start_date, and/or end_date as kwargs. | def dropoff_features_fn(df, ts_column, start_date, end_date):
df = filter_df_by_ts(
df, ts_column, start_date, end_date
)
dropoffzip_features = (
df.groupBy("dropoff_zip", window("tpep_dropoff_datetime", "30 minute"))
.agg(count("*").alias("count_trips_window_30m_dropoff_zip"))
.select(
col("dropoff_zip").alias("zip"),
unix_timestamp(col("window.end")).alias("ts").cast(IntegerType()),
partition_id(to_timestamp(col("window.end"))).alias("yyyy_mm"),
col("count_trips_window_30m_dropoff_zip").cast(IntegerType()),
is_weekend(col("window.end")).alias("dropoff_is_weekend"),
)
)
return dropoffzip_features | [
"def pickup_features_fn(df, ts_column, start_date, end_date):\n df = filter_df_by_ts(\n df, ts_column, start_date, end_date\n )\n pickupzip_features = (\n df.groupBy(\n \"pickup_zip\", window(\"tpep_pickup_datetime\", \"1 hour\", \"15 minutes\")\n ) # 1 hour window, sliding every 15 minutes\n .agg(\n mean(\"fare_amount\").alias(\"mean_fare_window_1h_pickup_zip\"),\n count(\"*\").alias(\"count_trips_window_1h_pickup_zip\"),\n )\n .select(\n col(\"pickup_zip\").alias(\"zip\"),\n unix_timestamp(col(\"window.end\")).alias(\"ts\").cast(IntegerType()),\n partition_id(to_timestamp(col(\"window.end\"))).alias(\"yyyy_mm\"),\n col(\"mean_fare_window_1h_pickup_zip\").cast(FloatType()),\n col(\"count_trips_window_1h_pickup_zip\").cast(IntegerType()),\n )\n )\n return pickupzip_features",
"def get_date_features(gt_ids=[], gt_masks=None, gt_shifts=None, first_year=None):\n # If particular arguments aren't lists, replace with repeating iterators\n if not isinstance(gt_masks, list):\n gt_masks = itertools.repeat(gt_masks)\n if not isinstance(gt_shifts, list):\n gt_shifts = itertools.repeat(gt_shifts)\n\n # Add each ground truth feature to dataframe\n df = None\n for gt_id, gt_mask, gt_shift in zip(gt_ids, gt_masks, gt_shifts):\n print \"Getting {}_shift{}\".format(gt_id, gt_shift)\n t = time.time()\n # Load ground truth data\n gt = get_ground_truth(gt_id, gt_mask, gt_shift)\n # Discard years prior to first_year\n gt = year_slice(gt, first_year = first_year)\n # If lat, lon columns exist, pivot to wide format\n if 'lat' in gt.columns and 'lon' in gt.columns:\n if gt_shift == None:\n measurement_variable = get_measurement_variable(gt_id)\n else:\n measurement_variable = get_measurement_variable(gt_id)+'_shift'+str(gt_shift)\n gt = pd.pivot_table(gt, values=measurement_variable, index='start_date',\n columns=['lat', 'lon']).reset_index()\n gt = pd.DataFrame(gt.to_records())\n gt.drop(\"index\", axis=1, inplace=True)\n # Rename columns to start_date and precip_(27.0,261.0), etc.\n gt.rename(columns={gt.columns[0]: 'start_date'}, inplace=True)\n gt.rename(columns=lambda x: x.replace('(',\n measurement_variable +\n '_('), inplace=True)\n # Use outer merge to include union of start_date values across all features\n # combinations across all features\n df = df_merge(df, gt, on=\"start_date\")\n print \"Elapsed: {}s\".format(time.time() - t)\n\n return df",
"def create_lag_features(df, window):\n\n feature_cols = [\"air_temperature\", \"cloud_coverage\", \"dew_temperature\", \"precip_depth_1_hr\"]\n df_site = df.groupby(\"site_id\")\n\n df_rolled = df_site[feature_cols].rolling(window=window, min_periods=0)\n\n df_mean = df_rolled.mean().reset_index().astype(np.float16)\n df_median = df_rolled.median().reset_index().astype(np.float16)\n df_min = df_rolled.min().reset_index().astype(np.float16)\n df_max = df_rolled.max().reset_index().astype(np.float16)\n df_std = df_rolled.std().reset_index().astype(np.float16)\n df_skew = df_rolled.skew().reset_index().astype(np.float16)\n\n for feature in feature_cols:\n df[f\"{feature}_mean_lag{window}\"] = df_mean[feature]\n df[f\"{feature}_median_lag{window}\"] = df_median[feature]\n df[f\"{feature}_min_lag{window}\"] = df_min[feature]\n df[f\"{feature}_max_lag{window}\"] = df_max[feature]\n df[f\"{feature}_std_lag{window}\"] = df_std[feature]\n df[f\"{feature}_skew_lag{window}\"] = df_std[feature]\n\n return df",
"def feature_split(df, y_column, feature, feature_threshold_min, feature_threshold_max=None, keep_columns=None):\n if keep_columns is None:\n keep_columns = set(df.columns).difference([y_column])\n\n below_min = df[feature] <= feature_threshold_min\n above_min = df[feature] > feature_threshold_min\n below_max = df[feature] <= feature_threshold_max\n\n if feature_threshold_max is not None: \n above_min = above_min & below_max\n\n X_train = df[below_min][keep_columns]\n X_test = df[above_min][keep_columns]\n y_train = df[below_min][y_column]\n y_test = df[above_min][y_column]\n\n return X_train, X_test, y_train, y_test",
"def create_date_features(df = None, date = None):\n #TODO",
"def build_some_features(df_, num_periods_lagged=1, num_periods_diffed=0, weekday=False, month=False, rolling=[], holidays=False):\n # make a copy \n df_ = df_.copy()\n \n # for a few values, get the lags \n for i in range(1, num_periods_lagged+1):\n # make a new feature, with the lags in the observed values column\n df_['lagged_%s' % str(i)] = df_['customers'].shift(i)\n \n # for a few values, get the diffs \n for i in range(1, num_periods_diffed+1):\n # make a new feature, with the lags in the observed values column\n df_['diff_%s' % str(i)] = df_['customers'].diff(i)\n \n for stat in rolling:\n df_['rolling_%s'%str(stat)] = df_['customers'].rolling('7D').aggregate(stat)\n \n if weekday == True:\n df_['sin_weekday'] = np.sin(2*np.pi*df_.index.weekday/7)\n df_['cos_weekday'] = np.sin(2*np.pi*df_.index.weekday/7)\n \n if month == True:\n df_['sin_month'] = np.sin(2*np.pi*df_.index.month/12)\n df_['cos_month'] = np.sin(2*np.pi*df_.index.month/12)\n \n if holidays == True:\n holidays = df_[((df_.index.month==12) & (df_.index.day==25))\n |((df_.index.month==1) & (df_.index.day==1))].customers\n df_['holidays'] = holidays + 1\n df_['holidays'] = df_['holidays'].fillna(0)\n \n return df_",
"def _drop_features(self, X, drop_features):\n self.drop_features = drop_features\n if len(self.drop_features) != 0:\n cfp = ComprehensiveFCParameters()\n df2 = []\n for df in self.drop_features:\n if df in X.columns:\n df2.append(df) # exact match\n else:\n if df in cfp.keys() or df in ['fft_coefficient_hann']:\n df = '*__{:s}__*'.format(df) # feature calculator\n # wildcard match\n df2 += [col for col in X.columns if fnmatch(col, df)] \n X = X.drop(columns=df2)\n return X",
"def create_time_req_features(df_casted):\n time_df = (df_casted\n .withColumn('prev_timestamp', lag(df_casted['timestamp']).over(Window.partitionBy(\"ip\").orderBy(\"timestamp\")))\n .withColumn('time_diff', analysis.get_time_delta(col(\"prev_timestamp\"), col(\"timestamp\")))\n .withColumn('new_session', when((col(\"time_diff\") > 900), 1).otherwise(0))\n .withColumn('count_session', sf.sum(col('new_session')).over(Window.partitionBy(\"ip\").orderBy(\"timestamp\")))\n .withColumn('day', sf.dayofyear(col('timestamp')))\n .withColumn('hour', sf.hour(col('timestamp')))\n .withColumn('minute', sf.minute(col('timestamp')))\n .withColumn('status_type', when((col(\"elb_status_code\") == 200), 1).otherwise(0)))\n\n return time_df",
"def clear_periods_intersections(source: DataFrame, columns_subset: List[str]) -> DataFrame:\n\n custom_window = Window.partitionBy(*columns_subset).orderBy('start_dt')\n\n result = source \\\n .withColumn('lag_end', F.lag('end_date').over(custom_window)) \\\n .withColumn('flg',\n F.when(F.col('lag_end') < F.date_sub(F.col('start_date'), 1), 1)\n .otherwise(0)) \\\n .withColumn('grp', F.sum('flg').over(custom_window)) \\\n .groupBy(*columns_subset, 'grp') \\\n .agg(F.min('start_date').alias('start_date'),\n F.max('end_date').alias('end_date')) \\\n .drop('grp')\n\n return result",
"def add_datepart(\n cls,\n df: pd.DataFrame,\n field_name: str,\n frequency: str,\n prefix: str = None,\n drop: bool = True,\n ) -> Tuple[pd.DataFrame, List[str]]:\n field = df[field_name]\n prefix = (re.sub(\"[Dd]ate$\", \"\", field_name) if prefix is None else prefix) + \"_\"\n attr = cls.time_features_from_frequency_str(frequency)\n added_features = []\n for n in attr:\n if n == \"Week\":\n continue\n df[prefix + n] = getattr(field.dt, n.lower())\n added_features.append(prefix + n)\n # Pandas removed `dt.week` in v1.1.10\n if \"Week\" in attr:\n week = field.dt.isocalendar().week if hasattr(field.dt, \"isocalendar\") else field.dt.week\n df.insert(3, prefix + \"Week\", week)\n added_features.append(prefix + \"Week\")\n # TODO Not adding Elapsed by default. Need to route it through config\n # mask = ~field.isna()\n # df[prefix + \"Elapsed\"] = np.where(\n # mask, field.values.astype(np.int64) // 10 ** 9, None\n # )\n # added_features.append(prefix + \"Elapsed\")\n if drop:\n df.drop(field_name, axis=1, inplace=True)\n\n # Removing features woth zero variations\n # for col in added_features:\n # if len(df[col].unique()) == 1:\n # df.drop(columns=col, inplace=True)\n # added_features.remove(col)\n return df, added_features",
"def get_features(self, df):\n return df.drop(df.columns[self.target_col], axis=1)",
"def dataset_extract_features_from_date(dataset,date_feature): \n dataset['dayofmonth'] = dataset[date_feature].dt.day\n dataset['dayofyear'] = dataset[date_feature].dt.dayofyear \n dataset['dayofweek'] = dataset[date_feature].dt.dayofweek\n dataset['month'] = dataset[date_feature].dt.month\n dataset['year'] = dataset[date_feature].dt.year\n dataset['weekofyear'] = dataset[date_feature].dt.weekofyear\n dataset['is_month_start'] = (dataset[date_feature].dt.is_month_start).astype(int)\n dataset['is_month_end'] = (dataset[date_feature].dt.is_month_end).astype(int)\n return dataset",
"def month_lag_distribution(source_df, field=\"month_lag\", path=path.path, nrows=None):\n _log.info(\"Creating features from {}\".format(field))\n prefix = source_df.split(\"_\")[0]\n source_df = \"{}/{}\".format(path, source_df)\n\n _log.info(\"Reading from {}\".format(source_df))\n try:\n df = pd.read_csv(source_df, usecols=[\"card_id\", field], nrows=nrows)\n _log.info(\"Successfully read from {}\".format(source_df))\n except Exception as e:\n _log.exception(e)\n\n _log.info(\"Computing distribution of month lag\")\n func_to_be_applied = [min, max, pd.Series.nunique]\n func_to_be_applied_dummy = [max, np.mean]\n rename_dict = create_rename_dict(prefix, field, func_to_be_applied)\n rename_dict_dummy = create_rename_dict(prefix, \"dummy\", func_to_be_applied_dummy)\n\n df[\"dummy\"] = 1\n df_features = df.groupby(\"card_id\").agg({field:func_to_be_applied}).reset_index()\n df_features = pd.concat([pd.DataFrame(df_features[\"card_id\"]), df_features[field]], axis=1, sort=False)\n\n _log.info(\"Renaming columns: {}\".format(rename_dict))\n df_features.rename(columns=rename_dict, inplace=True)\n\n _log.info(\"Computing time in month between transactions\")\n df_freq = (df.groupby([\"card_id\", field]).agg({\"dummy\": np.sum}).reset_index().groupby(\"card_id\")\n .agg({\"dummy\": func_to_be_applied_dummy}).reset_index())\n df_freq = pd.concat([pd.DataFrame(df_freq[\"card_id\"]), df_freq[\"dummy\"]], axis=1, sort=False)\n df_freq.rename(columns=rename_dict_dummy, inplace=True)\n\n _log.info(\"Creating final df\")\n df_features = df_features.merge(df_freq, how=\"inner\", on=\"card_id\")\n return df_features",
"def list_feature_drop(self):\n \n list_to_drop = list()\n list_not_in_df = list()\n \n #-------------------------------------------------------------------------\n # Columns are checked to be into df_invoice_line dataframe\n #-------------------------------------------------------------------------\n for col in self._list_feature_to_drop:\n if col in self.df_invoice_line.columns:\n list_to_drop.append(col)\n else:\n list_not_in_df.append(col)\n \n if 0 == len(list_to_drop):\n self.strprint(\"\\n*** ERROR : no element in list belonging to dataframe!\")\n else:\n if len(self._list_feature_to_drop) != len(list_to_drop):\n self.strprint(\"\\n*** WARNING : followings features do not belong to \\\n dataframe : {}\".format(list_not_in_df))\n else:\n pass\n list_col_keep \\\n = [col for col in self.df_invoice_line.columns \\\n if col not in list_to_drop]\n s\n self.df_invoice_line = self.df_invoice_line[list_col_keep]\n return",
"def build_shape_data(self, start=None, end=None):\n # If start and end are None, then set them to be min/max of self.df_demand\n if start is None:\n start = self.df_demand['date'].min()\n if end is None:\n end = self.df_demand['date'].max()\n print(f\"date range for shape data is from {start} to {end}\")\n # Extract part of df_demand that is within start and end\n df_sub = self.df_demand[(self.df_demand['date'] >= start) & (self.df_demand['date'] <= end)]\n assert df_sub['date'].min() >= start\n assert df_sub['date'].max() <= end\n num_days = len(pd.date_range(iso8601.parse_date(start), iso8601.parse_date(end), freq='d'))\n print(f\"number of days is {num_days}\")\n # When finding variance and mean, add in missing days as 0s\n # Obtain the counts for each lat/lng region\n counts = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat']).size().reset_index(name='counts')\n # Group demand data by lat/lng region and average across other cols\n df = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat'])[['avail_count', 'avail_mins', 'trips', 'prob_scooter_avail', 'adj_trips']].mean().reset_index()\n df = df.merge(counts, on=['left_lng', 'right_lng', 'lower_lat', 'upper_lat'])\n # print(df.head())\n # Modify averages by multiplying each by count and divide by num_days\n vars = ['avail_count', 'avail_mins', 'trips', 'prob_scooter_avail', 'adj_trips']\n for var in vars:\n df[var] = df[var]*df['counts']/num_days\n # print(df.head())\n # Calculate the variance for prob_scooter_avail\n probVariance = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat']).apply(lambda x: ((x['prob_scooter_avail'] - (x['prob_scooter_avail'].sum()/num_days))**2).sum()/(num_days-1)).reset_index(name='prob_scooter_avail')\n # print(probVariance.head())\n df['prob_scooter_avail_var'] = probVariance['prob_scooter_avail']\n # Check to see if there are any Nan values\n print(f\"Nan values in df? {df.isnull().values.any()}\")\n # print(df.head())\n # For each var col, create corresponding color columns (log and unlog)\n # Also create the factors list that get passed into self.create_rectangle_lst\n factors = [('avail_count', 'decimal'), ('avail_mins', 'decimal'),\n ('trips', 'decimal'), ('prob_scooter_avail', 'percent'), ('adj_trips', 'decimal')]\n i = 0\n original_len = len(factors)\n while i < original_len:\n name, type = factors[i]\n # print(f\"name={name}, type={type}\")\n # Create color column\n df = self.map_values_to_color(df, name)\n # If type is not percent than create log version\n if type != 'percent':\n df = self.create_log_column(df, name)\n factors.append(('log_'+name, type))\n i += 1\n # Deal with estimated demand and unmet demand\n # Filter out rows where prob_scooter_avail sig diff from 0\n sigDiffIdx = df.apply(lambda x: utils.sig_diff_from_zero(x['prob_scooter_avail'], x['prob_scooter_avail_var']), axis=1)\n # print(sigDiffIdx.head())\n df_sig_diff = df[sigDiffIdx]\n # Calculate estimated demand and unmet demand\n df_sig_diff = self.calculate_demand(df_sig_diff)\n # print(df_sig_diff.head())\n # Create color column and log column for unmet demand\n df_sig_diff = self.map_values_to_color(df_sig_diff, 'unmet_demand')\n df_sig_diff = self.map_values_to_color(df_sig_diff, 'estimated_demand')\n df_sig_diff = self.create_log_column(df_sig_diff, 'unmet_demand')\n factors.extend([('estimated_demand', 'decimal'), ('unmet_demand', 'decimal'), ('log_unmet_demand', 'decimal')])\n # Fill in the colors for the grid cells that aren't significantly different\n df_not_sig_diff = df[~sigDiffIdx]\n # print(df_not_sig_diff.head())\n df = pd.concat([df_sig_diff, df_not_sig_diff])\n # df.to_csv('../../../data_files/20210427_estimatedDemand.csv', index=False)\n # Create Rectangle information\n rectangles = self.create_rectangle_lst(df, factors)\n return rectangles, start, end",
"def time_delayed_feature(df, feature, tdelta, aggby=np.median, group=None,\n roll=1):\n df['_date'] = pd.to_datetime(df['time_created'].apply(lambda x: x.date()))\n groups = ['_date']\n if group:\n groups.append(group)\n all_group = df[group].unique()\n agg_feature = df[groups + [feature]].groupby(groups)[feature].agg(aggby)\n # Impute first values with value at min date\n min_date = agg_feature.index.min()\n if group:\n min_date = min_date[0]\n for date in [min_date - timedelta(days=x) for x in range(1,tdelta.days+1)]:\n if group:\n for g in all_group:\n try:\n agg_feature[(date,g)] = agg_feature[min_date][g]\n except KeyError:\n agg_feature[(date,g)] = 0\n else:\n agg_feature[date] = agg_feature[min_date]\n # Apply rolling sum up until t-tdelta if roll>1\n if roll>1 and group:\n def rollsum(g):\n old_idx = g.index.copy()\n tmp = g.reset_index().set_index('_date')\n idx = tmp.index.copy()\n result = pd.rolling_sum(tmp.resample(\"1D\").sum().fillna(0), roll, min_periods=0).ix[idx]\n result.set_index(old_idx, inplace=True)\n return result\n agg_feature = agg_feature.groupby(level=group).apply(rollsum).sum(axis=1)\n if roll>1 and not group:\n agg_feature = pd.rolling_sum(agg_feature, roll, min_periods=0)\n # Helper function to return 0 if feature not defined for delay\n # (e.g. if specific fire station, code type had no feature on that date)\n def old_feature(row):\n if group:\n try:\n return agg_feature[row['_date']-tdelta][row[group]]\n except:\n return 0\n else:\n try:\n return agg_feature[row['_date']-tdelta]\n except:\n return 0\n\n delayed_feature = df[groups].apply(old_feature, axis=1)\n # Drop helper data\n df.drop('_date', axis=1)\n return delayed_feature",
"def calculate_timebase_features(self, X: pd.DataFrame) -> pd.DataFrame:\n X = self._add_lagged_features(X, [1, 3, 7, 14, 21, 365])\n\n X = self._add_rolling(X, 'mean', [5, 50])\n X = self._add_rolling(X, 'min', [5, 50])\n X = self._add_rolling(X, 'max', [5, 50])\n\n return X",
"def split_df(df): \n df = df.dropna()\n df['Date'] = pd.to_datetime(df.index)\n df_train = df[(df.Date>=_start)&(df.Date<=_mid)]\n df_test = df[(df.Date>_mid)&(df.Date<=_stop)]\n df_oos = df[(df.Date>_stop)&(df.Date <= _last)]\n df_train = df_train.drop(['Date'],axis=1)\n df_test = df_test.drop(['Date'],axis=1)\n df_oos = df_oos.drop(['Date'],axis=1)\n return(df_train, df_test,df_oos)",
"def get_data_with_lagged_feature(input_df, raw_values, predict_length, lagged_count=None):\n if lagged_count is None:\n lagged_count = [60, 90, 120, 150]\n cropped_values = np.ndarray.copy(raw_values)\n time_feature = np.stack(make_time_features_with_encoding(input_df.index), axis=-1)\n\n # we should remove real values that will be predicted\n cropped_values[-predict_length:] = 0\n\n start_date, end_date = input_df.index[0], input_df.index[-1]\n lagged_index = np.stack(lag_indexes(start_date, end_date, lagged_count), -1)\n lagged_values = get_lagged_time(cropped_values, lagged_index)\n\n all_features = np.concatenate((time_feature, lagged_values), axis=-1)\n\n return split_time_series_data(all_features, raw_values, predict_length)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ceilings datetime dt to interval num_minutes, then returns the unix timestamp. | def rounded_unix_timestamp(dt, num_minutes=15):
nsecs = dt.minute * 60 + dt.second + dt.microsecond * 1e-6
delta = math.ceil(nsecs / (60 * num_minutes)) * (60 * num_minutes) - nsecs
return int((dt + timedelta(seconds=delta)).timestamp()) | [
"def dt_to_unix_time_ms(cls, dt):\n epoch = datetime.utcfromtimestamp(0)\n return int((dt - epoch).total_seconds() * 1000)",
"def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000",
"def to_min(dt: datetime) -> int:\n # get an integer time slot from a datetime\n return int(\n (dt - dt.utcfromtimestamp(0)).total_seconds() / 60\n )",
"def _get_milleseconds(self):\n return int(round(time.time() * 1000))",
"def minutes_in(sec):\r\n return int((sec - (hours_in(sec)*3600))//60)",
"def to_minutes(delta):\n return int(math.ceil(delta.total_seconds() / 60))",
"def round_time(dt=None, roundTo=60): # IGNORE:W0621\n\n if dt is None:\n dt = datetime.now()\n\n dt = np.asarray(dt, dtype='datetime64[s]').reshape(-1)\n\n for li in range(len(dt)):\n date = dt[li].astype(object)\n seconds = (date - date.min).seconds\n\n # // is a floor division, not a comment on following line:\n rounding = (seconds + roundTo / 2) // roundTo * roundTo\n\n dt[li] = date + timedelta(0, rounding - seconds, -date.microsecond)\n\n return len(dt) == 1 and dt[0].astype(object) or dt",
"def get_closest_minute(t):\n ts = dt.datetime.utcfromtimestamp(t/1000)\n s = ts.second\n if s < 30:\n return dt.datetime(ts.year, ts.month, ts.day, ts.hour, ts.minute)\n else:\n return dt.datetime(ts.year, ts.month, ts.day, ts.hour, ts.minute) + dt.timedelta(minutes=1)",
"def unix_time_millis(dt):\n return long((dt - epoch).total_seconds() * 1000)",
"def dt_to_unix_time_sec(cls, dt):\n epoch = datetime.utcfromtimestamp(0)\n return int((dt - epoch).total_seconds())",
"def bin_of_day(dt, bin_size_mins=30):\n if dt is None:\n dt = datetime.now()\n\n # Number of minutes from beginning of day\n minutes = (dt.hour * 60) + dt.minute\n # Convert minutes to bin\n time_bin = math.trunc(minutes / bin_size_mins)\n return time_bin",
"def timedelta_as_minutes(td):\r\n return timedelta_as_seconds(td) / 60",
"def calculate_minutes(time):\n return int(time / 60)",
"def timeToMinutes(timestamp):\n if len(timestamp) == 5: \n return int(timestamp[0])*600 + int(timestamp[1])*60 + int(timestamp[3])*10 + int(timestamp[4])\n return None",
"def get_minutes(self, datetime):\n return datetime.hour*60.0+datetime.minute+datetime.second/60",
"def interval_seconds():\n return int(interval_to_milliseconds(interval())/1000)",
"def floor_time(self, ts):\n return datetime.datetime.fromtimestamp(\n int(ts.timestamp()) // self.interval * self.interval\n )",
"def truncate_to_minutes(timestamp):\n return Timestamp(datetime(timestamp.year, timestamp.month, timestamp.day, timestamp.hour, \n timestamp.minute))",
"def min_to_datetime(minutes: int) -> datetime:\n return datetime.utcfromtimestamp(minutes * 60)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return current sample rate in Sa/s | def sample_rate(self):
return self.query_float('ENTER Current Sample Rate (Sa/s)') | [
"def sample_rate(self) -> float:\n return self._sample_rate",
"def sample_rate(self):\r\n return self.config.sample_rate",
"def get_sample_rate(self):\n time_diffs = np.diff(self.get_time()).mean()\n return 1000/time_diffs",
"def sampling_rate(self):\n return self.librarycall('get_sampleRate')",
"def sample_rate(self):\n self.update()\n try:\n rate = sum(\n sims['sample_rate'] for sims in self._status['simulators'])\n return rate\n except (TypeError, KeyError):\n log.info('Unable to retrieve sample rate from BRAIN ')\n return 0",
"def sampling_rate(self):\n return self.track.sampling_rate",
"def get_samp_rate(self):\n return _uhd_swig.usrp_sink_get_samp_rate(self)",
"def get_samp_rate(self):\n return _uhd_swig.usrp_source_get_samp_rate(self)",
"def get_samplerate(self):\n\t\treturn _PM_UPDATE_RATE / self.output_decimation",
"def _get_sample_rate(self):\n if hw_online_mode:\n # This is an ivi driver call:\n self.sample_rate = self.scope.acquisition.sample_rate\n print(f\"Sample rate is: {self.sample_rate}\")\n self._run_cbX_config()",
"def get_samp_rate(self):\n return _uhd_swig.usrp_sink_sptr_get_samp_rate(self)",
"def sample_interval(self):\n\n if self.sample_rate != 0:\n return 1.0 / self.sample_rate\n return 0.0",
"def sample_rate(self):\n if self.has_data():\n try:\n return round(\n 1.0\n / np.float64(\n (\n np.median(\n np.diff(self.dataset.coords[\"time\"].to_index())\n / np.timedelta64(1, \"s\")\n )\n )\n ),\n 0,\n )\n except AttributeError:\n self.logger.warning(\n \"Something weird happend with xarray time indexing\"\n )\n\n raise ValueError(\n \"Something weird happend with xarray time indexing\"\n )\n return self.run_metadata.sample_rate",
"def spdystreamsrate(self) :\n\t\ttry :\n\t\t\treturn self._spdystreamsrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def analysis_sample_rate(self):\n return self.h5.root.analysis.songs.cols.analysis_sample_rate[self.songidx]",
"def get_channel_sampling_rate(self)->float:\n return self.__sampling_rate",
"def speech_rate(self):\r\n objects = self.__get_objects()\r\n z1 = str(objects[1]).strip().split()\r\n return int(z1[2])",
"def input_data_sample_rate(self):\n return self._input_data_sample_rate",
"def i2sec(i):\n return float(i / sample_rate)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
assert unexpected_content has not been written to stdout | def assertStdoutDoesNotContain(self, unexpected_content):
if type(unexpected_content) is not types.ListType:
unexpected_content = [ unexpected_content ]
stdout_message = sys.stdout.getvalue()
for the_text in unexpected_content:
self.assertNotIn(the_text, stdout_message,('Stdout "%s" contains text "%s"' % (stdout_message, the_text))) | [
"def check_cot_output(self, expected):\n sys.stdout = StringIO.StringIO()\n output = None\n try:\n self.instance.run()\n except (TypeError, ValueError, SyntaxError, LookupError):\n self.fail(traceback.format_exc())\n finally:\n output = sys.stdout.getvalue()\n sys.stdout = sys.__stdout__\n self.maxDiff = None\n self.assertMultiLineEqual(expected.strip(), output.strip())",
"def test_without_filename(self):\n with smart_open() as fh:\n self.assertIs(fh, sys.stdout)",
"def assertStdout(self, output):\n self.failUnlessEqual(self.stdout(), output)",
"def test_unexpectedError(self):\r\n err = StringIO()\r\n reporter = Reporter(None, err)\r\n reporter.unexpectedError('source.py', 'error message')\r\n self.assertEqual('source.py: error message\\n', err.getvalue())",
"def test_print_wrapper():\n temp = sys.stdout\n f = open(\"log.txt\", \"a+\")\n f.close()\n\n sys.stdout = open(\"log.txt\", \"r\")\n\n # no IOError exception raised\n print_wrapper(\"testing123\")\n print_wrapper(\"another line\")\n\n with pytest.raises(IOError):\n print \"expect to failed\"\n\n sys.stdout.close()\n sys.stdout = temp",
"def assert_output(self, parser_args, expected_output):\n c = count_nginx_log_frequency(\n parser_args.file,\n parser_args.segment,\n NGINX_ACCESS_LOG_REGEX\n )\n saved_stdout = sys.stdout\n try:\n out = StringIO()\n sys.stdout = out\n print_report(\n c,\n parser_args.segment,\n parser_args.limit,\n parser_args.file\n )\n output = out.getvalue().strip()\n assert output == expected_output\n finally:\n sys.stdout = saved_stdout",
"def test_ignore_capture():\n\n sys.stdout.write('Print to stdout')\n sys.stderr.write('Print to stderr')\n\n assert True",
"def test_output_interception(self):\n expected_output = 'testing, 1, 2, 3 ..'\n actual_output = capture(['echo', expected_output])\n assert actual_output.strip() == expected_output.strip()",
"def test_capture_stdout():\n\n sys.stdout.write('Print to stdout')\n\n assert False",
"def test_stdout_pattern(f, result):\n if not os.path.exists(f):\n return\n\n expected = open(f, encoding=\"utf-8\").read()\n\n # curl debug logs are too dependent on the context, so we filter\n # them and not take them into account for testing differences.\n expected = remove_curl_debug_lines(expected)\n expected_lines = expected.split(\"\\n\")\n expected_pattern_lines = [parse_pattern(line) for line in expected_lines]\n\n actual = decode_string(result.stdout)\n actual = remove_curl_debug_lines(actual)\n actual_lines = re.split(r\"\\r?\\n\", actual)\n\n if len(actual_lines) != len(expected_pattern_lines):\n print(\">>> error in stdout / mismatch in number of lines\")\n print(\n f\"actual: {len(actual_lines)} lines\\nexpected: {len(expected_pattern_lines)} lines\"\n )\n print(f\"actual <{actual}>\")\n print(\"# Actual lines\")\n for i, line in enumerate(actual_lines):\n print(\"%2d: %s\" % (i, line))\n print(\"# Expected lines\")\n for i, line in enumerate(expected_lines):\n print(\"%2d: %s\" % (i, line))\n print(\"# Expected Pattern lines\")\n for i, line in enumerate(expected_pattern_lines):\n print(\"%2d: %s\" % (i, line))\n\n sys.exit(1)\n for i in range(len(expected_pattern_lines)):\n if not re.match(expected_pattern_lines[i], actual_lines[i]):\n print(f\">>> error in stdout in line {i+1}\")\n print(f\"actual: <{actual_lines[i]}>\")\n print(\n f\"expected: <{expected_lines[i]}> (translated to regex <{expected_pattern_lines[i]}>)\"\n )\n sys.exit(1)",
"def testStdoutAndStderr(self):\n with self.OutputCapturer():\n print('foo')\n print('bar', file=sys.stderr)\n self.AssertOutputContainsLine('foo')\n self.AssertOutputContainsLine('bar', check_stdout=False, check_stderr=True)",
"def assertStderr(self, output):\n self.failUnlessEqual(self.stderr(), output)",
"def notices_decorator_assert_message_in_stdout(\n captured, messages: Sequence[str], dummy_mesg: Optional[str] = None, not_in: bool = False\n):\n assert captured.err == \"\"\n assert dummy_mesg in captured.out\n\n for mesg in messages:\n if not_in:\n assert mesg not in captured.out\n else:\n assert mesg in captured.out",
"def check_allowed_stdout(self, test: str) -> bool:\n return test[:4] in ['t003']",
"def test_EOF(self):\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"EOF\")\n out = f.getvalue()\n self.assertTrue(len(out) == 1)\n self.assertEqual(\"\\n\", out)\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"EOF fake\")\n msj = f.getvalue().strip()\n self.assertFalse(len(msj) == 1)\n self.assertEqual(\"\", msj)",
"def _outcome_(connection: ServerConnection, outcome: str) -> None:\n line = _read_line(connection)\n\n if line != outcome:\n print(line)\n raise ServerError()",
"def test_no_body_invalid(self):\n\n self.unwrapper.message = remove_line(self.unwrapper.message, \"\\n\")\n\n self.assertRaises(StopIteration, self.unwrapper.next)",
"def test_handle_print_rich_exception(self):\n\n with io.StringIO() as buf:\n # Capture stdout logs (rich logs to stdout)\n with contextlib.redirect_stdout(buf):\n _print_rich_exception(Exception(\"boom!\"))\n # Capture the stdout output\n captured_output = buf.getvalue()\n\n assert \"Exception:\" in captured_output\n assert \"boom!\" in captured_output",
"def assertNoLogs(self):\n before_logs = self.captured_logs.output.copy()\n try:\n yield\n finally:\n after_logs = self.captured_logs.output\n\n # If any new messages logged, then fail the test.\n if len(after_logs) > len(before_logs):\n msg = 'The follow messages were unexpectedly logged:\\n '\n msg += '\\n '.join(after_logs[len(before_logs):])\n raise self.failureException(msg)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Render the image represented by (rgbobj) at dst_x, dst_y in the offscreen pixmap. | def render_image(self, rgbobj, dst_x, dst_y):
self.logger.debug("redraw pixmap=%s" % (self.pixmap))
if self.pixmap is None:
return
self.logger.debug("drawing to pixmap")
# Prepare array for rendering
arr = rgbobj.get_array(self.rgb_order, dtype=np.uint8)
(height, width) = arr.shape[:2]
return self._render_offscreen(self.pixmap, arr, dst_x, dst_y,
width, height) | [
"def draw(self, surface):\r\n surface.blit(self.image, self.rect)",
"def blit(self):\n self.screen.blit(self.image, self.rect)",
"def draw_image_processing(self, screen):\n screen.blit(self.get_image_processing(), self.get_image_processing_rect())",
"def draw_inkblot(self):\n self.screen.blit(self.image,self.rect)",
"def render(self, surface):\r\n surface.blit(self._image, self._rect)",
"def render_canvas(self):\r\n self._display.image(self._image)\r\n self._display.display()",
"def render( self, screen, dest ):\n # We're gonna draw a decorative border to surround the provided area.\n # Step one: Determine the size of our box. Both dimensions should be \n # a multiple of TEX_WIDTH. \n\n if self.border == None:\n self.border = image.Image( self.border_name, self.border_width, self.border_width )\n if self.tex == None:\n self.tex = image.Image( self.tex_name, self.tex_width, self.tex_width )\n\n # W32 and H32 will store the number of columns/rows. \n W32 = ( dest.width + self.border_width ) / self.tex_width + 1\n H32 = ( dest.height + self.border_width ) / self.tex_width + 1\n\n # X0 and Y0 will store the upper left corner of the box.\n X0 = dest.left - ( ( ( W32 * self.tex_width ) - dest.width ) / 2 )\n Y0 = dest.top - ( ( ( H32 * self.tex_width ) - dest.height ) / 2 )\n\n # Draw the backdrop.\n for X in range( W32 ):\n Dest_X = X0 + X * self.tex_width\n for Y in range( H32 ):\n Dest_Y = Y0 + Y * self.tex_width\n self.tex.render( screen , ( Dest_X , Dest_Y ) )\n\n self.border.render( screen , ( X0 - self.border_width / 2 , Y0 - self.border_width / 2 ) , self.tl )\n self.border.render( screen , ( X0 - self.border_width / 2 , Y0 - self.border_width / 2 + H32 * self.tex_width ) , self.bl )\n self.border.render( screen , ( X0 - self.border_width / 2 + W32 * self.tex_width , Y0 - self.border_width / 2 ) , self.tr )\n self.border.render( screen , ( X0 - self.border_width / 2 + W32 * self.tex_width , Y0 - self.border_width / 2 + H32 * self.tex_width ) , self.br )\n\n for X in range( 1 , W32 * 2 ):\n Dest_X = X0 + X * self.border_width - self.border_width / 2\n Dest_Y = Y0 - self.border_width / 2\n self.border.render( screen , ( Dest_X , Dest_Y ) , self.t )\n Dest_Y = Y0 + H32 * self.tex_width - self.border_width / 2\n self.border.render( screen , ( Dest_X , Dest_Y ) , self.b )\n for Y in range( 1 ,H32 * 2 ):\n Dest_Y = Y0 + Y * self.border_width - self.border_width / 2\n Dest_X = X0 - self.border_width / 2\n self.border.render( screen , ( Dest_X , Dest_Y ) , self.l )\n Dest_X = X0 + W32 * self.tex_width - self.border_width / 2\n self.border.render( screen , ( Dest_X , Dest_Y ) , self.r )",
"def export_frame(self,i,width=settings.IMAGE_WIDTH,height=settings.IMAGE_HEIGHT):\n img = cairo.ImageSurface(cairo.FORMAT_ARGB32,width,height)\n context = cairo.Context(img)\n self.render(context,i)\n context.paint()\n return img",
"def draw_offscreen(context):\n offscreen = SprytileGui.offscreen\n target_img = SprytileGui.texture_grid\n tex_size = SprytileGui.tex_size\n\n offscreen.bind()\n glClear(GL_COLOR_BUFFER_BIT)\n glDisable(GL_DEPTH_TEST)\n glEnable(GL_BLEND)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluOrtho2D(0, tex_size[0], 0, tex_size[1])\n\n def draw_full_quad():\n texco = [(0, 0), (0, 1), (1, 1), (1, 0)]\n verco = [(0, 0), (0, tex_size[1]), (tex_size[0], tex_size[1]), (tex_size[0], 0)]\n glBegin(bgl.GL_QUADS)\n for i in range(4):\n glTexCoord2f(texco[i][0], texco[i][1])\n glVertex2f(verco[i][0], verco[i][1])\n glEnd()\n\n glColor4f(0.0, 0.0, 0.0, 0.5)\n draw_full_quad()\n\n if target_img is not None:\n glColor4f(1.0, 1.0, 1.0, 1.0)\n target_img.gl_load(0, GL_NEAREST, GL_NEAREST)\n glBindTexture(GL_TEXTURE_2D, target_img.bindcode[0])\n # We need to backup and restore the MAG_FILTER to avoid messing up the Blender viewport\n old_mag_filter = Buffer(GL_INT, 1)\n glGetTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, old_mag_filter)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glEnable(GL_TEXTURE_2D)\n draw_full_quad()\n glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, old_mag_filter)\n\n # Translate the gl context by grid matrix\n grid_matrix = sprytile_utils.get_grid_matrix(SprytileGui.loaded_grid)\n matrix_vals = [grid_matrix[j][i] for i in range(4) for j in range(4)]\n grid_buff = bgl.Buffer(bgl.GL_FLOAT, 16, matrix_vals)\n\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glLoadMatrixf(grid_buff)\n\n glDisable(GL_TEXTURE_2D)\n\n # Get data for drawing additional overlays\n grid_size = SprytileGui.loaded_grid.grid\n padding = SprytileGui.loaded_grid.padding\n margin = SprytileGui.loaded_grid.margin\n curr_sel = SprytileGui.loaded_grid.tile_selection\n is_pixel_grid = sprytile_utils.grid_is_single_pixel(SprytileGui.loaded_grid)\n is_use_mouse = context.scene.sprytile_ui.use_mouse\n is_selecting = SprytileGui.is_selecting\n\n glLineWidth(1)\n\n # Draw box for currently selected tile(s)\n # Pixel grid selection is drawn in draw_tile_select_ui\n sprytile_data = context.scene.sprytile_data\n is_not_base_layer = sprytile_data.work_layer != \"BASE\"\n draw_outline = sprytile_data.outline_preview or is_not_base_layer\n if draw_outline and is_selecting is False and not is_pixel_grid:\n if is_not_base_layer:\n glColor4f(0.98, 0.94, 0.12, 1.0)\n elif SprytileGui.is_moving:\n glColor4f(1.0, 0.0, 0.0, 1.0)\n else:\n glColor4f(1.0, 1.0, 1.0, 1.0)\n curr_sel_min, curr_sel_max = SprytileGui.get_sel_bounds(\n grid_size, padding, margin,\n curr_sel[0], curr_sel[1],\n curr_sel[2], curr_sel[3]\n )\n SprytileGui.draw_selection(curr_sel_min, curr_sel_max)\n\n # Inside gui, draw appropriate selection for under mouse\n if is_use_mouse and is_selecting is False and SprytileGui.cursor_grid_pos is not None:\n\n cursor_pos = SprytileGui.cursor_grid_pos\n # In pixel grid, draw cross hair\n if is_pixel_grid and SprytileGui.is_moving is False:\n glColor4f(1.0, 1.0, 1.0, 0.5)\n glBegin(GL_LINE_STRIP)\n glVertex2i(0, int(cursor_pos.y + 1))\n glVertex2i(tex_size[0], int(cursor_pos.y + 1))\n glEnd()\n\n glBegin(GL_LINE_STRIP)\n glVertex2i(int(cursor_pos.x + 1), 0)\n glVertex2i(int(cursor_pos.x + 1), tex_size[1])\n glEnd()\n # Draw box around selection\n elif SprytileGui.is_moving is False:\n glColor4f(1.0, 0.0, 0.0, 1.0)\n cursor_min, cursor_max = SprytileGui.get_sel_bounds(grid_size, padding, margin,\n int(cursor_pos.x), int(cursor_pos.y),)\n SprytileGui.draw_selection(cursor_min, cursor_max)\n\n glPopMatrix()\n offscreen.unbind()",
"def CreateSubBitmap(*args, **kwargs):\n return _gdi_.GraphicsRenderer_CreateSubBitmap(*args, **kwargs)",
"def _show_rgb(self):\n R, G, B = self._rgb_frames()\n image = numpy.dstack((R, G, B))\n imageItem = self.parent.image.getImageItem()\n imageItem.updateImage(image)",
"def draw(self, screen):\n for pair in self.pipes:\n for e in pair:\n screen.blit(e.img, (e.x, e.y))",
"def draw(self, screen: pygame.Surface) -> None:\n screen.blit(PIPE_SPRITES[PipesEnum.Bottom], (int(self.x), self.bottom_pipe_y))\n screen.blit(PIPE_SPRITES[PipesEnum.Top], (int(self.x), self.top_pipe_y))",
"def draw(self):\n self.write_image()\n self.update()",
"def draw(self):\n return ImageDraw.Draw(self.buffer)",
"def draw(self):\n destination = self.getCoordinate(self.position)\n self.rect = self.main.screen.blit(self.pawn, destination)\n\n pass",
"def render(self):\n dirty_rects = self.all_sprites.draw(self.screen)\n pg.display.update(dirty_rects)",
"def blit_source_image(output, template, image, panel):\n img = Image.open(image)\n screen = TEMPLATES[template]['screen']\n factor = float(screen[0]) / float(img.size[0])\n dimensions = [int(i * factor) for i in img.size]\n if panel:\n dimensions[1] -= TEMPLATES[template]['panel']\n img = img.resize(dimensions, Image.ANTIALIAS)\n img = img.crop([0, 0] + [min(*i) for i in zip(dimensions, screen)])\n offset = list(TEMPLATES[template]['offset'])\n if panel:\n offset[1] += TEMPLATES[template]['panel']\n output.paste(img, tuple(offset))",
"def _show_rgb(self):\n R, G, B = self._get_frames()\n image = numpy.dstack((R, G, B))\n self.image_view.setImage(image)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called when a mouse button is pressed in the widget. Adjust method signature as appropriate for callback. | def button_press_event(self, widget, event):
x, y = event.x, event.y
# x, y = coordinates where the button was pressed
self.last_win_x, self.last_win_y = x, y
button = 0
# Prepare a button mask with bits set as follows:
# left button: 0x1
# middle button: 0x2
# right button: 0x4
# Others can be added as appropriate
self.logger.debug("button down event at %dx%d, button=%x" % (x, y, button))
data_x, data_y = self.check_cursor_location()
return self.make_ui_callback('button-press', button, data_x, data_y) | [
"def on_mouse_press(self, x, y, button):\n\n pass",
"def mouse_press(self, btn, x, y, modifiers):",
"def on_mouse_press(self, x, y, button, modifiers):\n pass",
"def _press(self, event):",
"def on_mouse_release(self, x, y, button):\n pass",
"def onMousePressed(self, event):\n (x,y) = (event.x, event.y)\n try:\n for key in self.dragAndDrops:\n dragAndDrop = self.dragAndDrops[key]\n if (dragAndDrop.isClickInsideBox(x,y)): \n self.currentlyDragging = key\n for key in self.buttons:\n button = self.buttons[key]\n if (button.isClickInsideBox(x,y)): \n button.clicked()\n except Exception,e: print str(e)",
"def on_clicked(self, widget):\n pass",
"def widgetClicked(self):\n pass",
"def button_clicked(self, widget, data=None):\n print \"button %s clicked\" % data",
"def press(self):\n self.clicked = True\n if self.command:\n self.command(self.name)",
"def handle_click(self, mouseX, mouseY):\n pass",
"def clicked(self):\n self.callback()",
"def clickOnWidget(self,button, (x,y),obj=None):\n \n if self.checkClick((x,y)) and button==MOUSE_LEFT:\n gui.setFocus(self.index)\n self.activate()\n return None",
"def mouse_press_event(self, x, y, button):\n if button == 1:\n print(\"Left mouse button pressed @\", x, y)\n if button == 2:\n print(\"Right mouse button pressed @\", x, y)",
"def input(self, event: pygame.event) -> None:\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n self.user_clicked = True",
"def on_mouse_up(self, event):\n if event.button == 1:\n for obj in self._clicked.values():\n obj.on_mouse_up(event)\n self._clicked.clear()",
"def mousePressEvent(self, event): \n if event.type() == qtc.QEvent.MouseButtonPress:\n if event.button() == qtc.Qt.LeftButton:\n self.leftClicked.emit()\n\n elif event.button() == qtc.Qt.MidButton:\n self.middleClicked.emit()\n\n else:\n super(TagWidget, self).mousePressEvent(event)",
"def mousePressEvent(self, mouse_event):\r\n return",
"def set_on_mouse_pressed(self, callback):\n self.on_mouse_pressed = callback"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called when a drop (drag/drop) event happens in the widget. Adjust method signature as appropriate for callback. | def drop_event(self, widget, event):
# make a call back with a list of URLs that were dropped
#self.logger.debug("dropped filename(s): %s" % (str(paths)))
#self.make_ui_callback('drag-drop', paths)
raise NotImplementedError | [
"def hook_drop(self):\n widget = self.widget\n widget.setAcceptDrops(True)\n widget.dragEnterEvent = self.dragEnterEvent\n widget.dragMoveEvent = self.dragMoveEvent\n widget.dragLeaveEvent = self.dragLeaveEvent\n widget.dropEvent = self.dropEvent",
"def dropEvent(self, e):\n\n\t\tif e.mimeData().hasUrls:\n\t\t\te.setDropAction(QtCore.Qt.CopyAction)\n\t\t\te.accept()\n\t\t\tfor url in e.mimeData().urls():\n\t\t\t\tfname = str(url.toLocalFile())\n\n\t\t\t# print(\"Dropped '%s' on to widget.\" % fname)\n\t\t\tif os.path.isfile(fname):\n\t\t\t\tif os.path.splitext(fname)[1] in _allowed_formats:\n\t\t\t\t\tself.updateThumbnail(fname)\n\t\telse:\n\t\t\te.ignore()",
"def __drop_cb(self, k):\n self.__systems.layout.on_drop()\n waste_tableau = common.TableArea.WASTE, common.TableArea.TABLEAU\n if self.__state.drag_info.start_area in waste_tableau:\n if not self.__drop_foundation(k):\n self.__drop_tableau(k)\n elif self.__state.drag_info.start_area == common.TableArea.FOUNDATION:\n self.__drop_tableau(k)\n self.__state.refresh_next_frame = 1\n self.__state.valid_drop = True",
"def on_drop(self):\n print(\"You have dropped\", self.name)",
"def dropEvent(self, event):\n\n # Get the id color to drop the items into\n drop_id_color = self.itemAt(event.pos())\n drop_id_color = self.invisibleRootItem() \\\n if drop_id_color is None else drop_id_color\n\n # If the drop position is not valid we pass\n if drop_id_color is None:\n event.ignore()\n return\n\n # If the drop position is not an id color item we pass\n if drop_id_color.data(0, QtCore.Qt.UserRole) != \"color\":\n event.ignore()\n return\n\n # Get the drop items - the selected tree items\n drop_items = [x for x in self.selectedItems()\n if x.data(0, QtCore.Qt.UserRole) == \"object\"] or None\n\n # If not items selected we pass\n if drop_items is None:\n event.ignore()\n return\n\n # Drop the items into the new tree parent\n self._drop_tree_items(drop_items, drop_id_color)\n\n event.accept()\n\n return None",
"def dropEvent(self, event):\n if event.mimeData().hasImage:\n event.setDropAction(Qt.CopyAction)\n file_path = event.mimeData().urls()[0].toLocalFile()\n self.set_image(file_path)\n self.folderLocation.setText(file_path)\n \n event.accept()\n else:\n event.ignore()",
"def dropEvent(self, e):\n if e.mimeData().hasUrls:\n e.setDropAction(QtCore.Qt.CopyAction)\n e.accept()\n # Workaround for OSx dragging and dropping\n for url in e.mimeData().urls():\n if op_sys == 'Darwin':\n fname = str(NSURL.URLWithString_(str(url.toString())).filePathURL().path())\n else:\n fname = str(url.toLocalFile())\n\n\n\n\n else:\n e.ignore()",
"def dropEvent(self, event):\n\n position = event.pos()\n widget = event.source()\n\n sublayout = self.layout().itemAt(2)\n index_new = self.get_index(position)\n if index_new >= 0:\n index = min(index_new, sublayout.count() - 1)\n sublayout.insertWidget(index, widget)\n event.setDropAction(Qt.MoveAction)\n event.accept()",
"def onItemDropped(self):\n self.itemDropped.emit()",
"def layout_drag_received(self, widget, context, x, y, selection, targetType, time):\n if targetType == config.data.target_type['annotation']:\n sources=[ self.controller.package.get(uri) for uri in unicode(selection.data, 'utf8').split('\\n') ]\n # We received a drop. Determine the location.\n\n # Correct y value according to scrollbar position\n y += widget.parent.get_vscrollbar().get_adjustment().get_value()\n\n a=[ at\n for (at, p) in self.layer_position.iteritems()\n if (y >= p and y <= p + self.button_height) ]\n if a:\n # Copy/Move to a[0]\n if config.data.os == 'win32':\n # Control/Shift mods for DND is broken on win32. Force ACTION_ASK.\n ac=gtk.gdk.ACTION_ASK\n else:\n ac=context.actions\n self.move_or_copy_annotations(sources, a[0], position=self.pixel2unit(self.adjustment.value + x, absolute=True), action=ac)\n else:\n # Maybe we should propose to create a new annotation-type ?\n # Create a type\n dest=self.create_annotation_type()\n if dest is None:\n return True\n self.move_or_copy_annotations(sources, dest, position=self.pixel2unit(self.adjustment.value + x, absolute=True), action=context.actions)\n return True\n elif targetType == config.data.target_type['annotation-type']:\n source=self.controller.package.get(unicode(selection.data, 'utf8'))\n # We received a drop. Determine the location.\n a=[ at\n for (at, p) in self.layer_position.iteritems()\n if (y >= p and y <= p + self.button_height) ]\n if a:\n # Copy/Move to a[0]\n if source != a[0]:\n self.copy_annotation_type(source, a[0])\n else:\n # Create an annotation in the type.\n self.create_annotation(position=self.pixel2unit(self.adjustment.value + x, absolute=True),\n type=source,\n duration=self.pixel2unit(context.get_source_widget().get_allocation().width),\n )\n else:\n # Maybe we should propose to create a new annotation-type ?\n # Create a type\n dest=self.create_annotation_type()\n if dest is None:\n return True\n self.copy_annotation_type(source, dest)\n return True\n elif targetType == config.data.target_type['timestamp']:\n # We received a drop. Create an annotation.\n a=[ at\n for (at, p) in self.layer_position.iteritems()\n if (y >= p and y <= p + self.button_height) ]\n if a:\n typ=a[0]\n else:\n typ=self.create_annotation_type()\n if typ is not None:\n data=decode_drop_parameters(selection.data)\n begin=long(data['timestamp'])\n content=data.get('comment', None)\n # Create an annotation of type typ with the timestamp as begin\n self.create_annotation(begin, typ, content=content)\n else:\n print \"Unknown target type for drop: %d\" % targetType\n return False",
"def _on_drop(self, event):\n data = event.mimeData().data(constants.QGRAPH_DD_MIME_TYPE)\n if not data.isNull():\n data_stream = QDataStream(data, QIODevice.ReadOnly)\n parsed = json.loads(data_stream.readString().decode('utf8'))\n\n # Refer to `mime.py` for docs about format\n version = parsed['version']\n if version not in (1, 2):\n raise ValueError(\"Unsupported version of QmxGraph MIME data: {}\".format(version))\n\n x = event.pos().x()\n y = event.pos().y()\n\n if version in (1, 2):\n vertices = parsed.get('vertices', [])\n scale = self.api.get_zoom_scale()\n for v in vertices:\n # place vertices with an offset so their center falls\n # in the event point.\n vertex_x = x + (v['dx'] - v['width'] * 0.5) * scale\n vertex_y = y + (v['dy'] - v['height'] * 0.5) * scale\n self.api.insert_vertex(\n x=vertex_x,\n y=vertex_y,\n width=v['width'],\n height=v['height'],\n label=v['label'],\n style=v.get('style', None),\n tags=v.get('tags', {}),\n )\n\n if version in (2,):\n decorations = parsed.get('decorations', [])\n for v in decorations:\n self.api.insert_decoration(\n x=x,\n y=y,\n width=v['width'],\n height=v['height'],\n label=v['label'],\n style=v.get('style', None),\n tags=v.get('tags', {}),\n )\n\n event.acceptProposedAction()\n else:\n event.ignore()",
"def dropEvent(self, event):\n\n if event.source() != self:\n board = event.source()\n board.takeItem(board.currentRow())\n event.setDropAction(Qt.MoveAction)\n\n event.setDropAction(Qt.MoveAction)\n super().dropEvent(event)",
"def drop_complete(self, *args):\r\n self.dropping = False\r\n self.animating = False",
"def dropEvent(self, event: QDropEvent):\n if not all([x.isLocalFile() for x in event.mimeData().urls()]):\n QMessageBox().warning(self, \"Load error\", \"Not all files are locally. Cannot load data.\", QMessageBox.Ok)\n paths = [x.toLocalFile() for x in event.mimeData().urls()]\n if self.files_num != -1 and len(paths) > self.files_num:\n QMessageBox.information(self, \"To many files\", \"currently support only drag and drop one file\")\n return\n self.read_drop(paths)",
"def addDropListener(self, callback: 'callable'):\n self.getView().addDropListener(callback)",
"def _OnDragFinished( self, left, top, right, bottom ):\n pass",
"def on_mouse_drag(self, event):\n pass",
"def on_drag_data_received(self, widget, drag_content, x, y, data, info, my_data):\n text = data.get_text()\n if self.debug:\n print \"graph drawer received text: %s\" % text\n if not self.in_slave_column(x, y):\n return\n\n if self.debug:\n print \"in slave area\"\n\n drop_type = None\n sl = []\n if y < (self.prev_height / 2.0):\n if self.debug:\n print \"in peripheral bus\"\n drop_type = SlaveType.PERIPHERAL\n sl = self.boxes[\"pslaves\"]\n else:\n drop_type = SlaveType.MEMORY\n sl = self.boxes[\"mslaves\"]\n\n drop_index = 0\n for slave_box in sl:\n if y < slave_box.y + (slave_box.height / 2.0):\n break\n else:\n drop_index += 1\n\n if self.debug:\n print \"drop location is at %d\" % drop_index\n\n if (self.slave_add_callback is not None):\n self.slave_add_callback(text, drop_type, drop_index)\n\n f = text.rpartition(\"/\")[2]\n tp = \"memory\"\n if drop_type == SlaveType.PERIPHERAL:\n tp = \"peripheral\"\n self.status.print_info(__file__, \"droping slave %s in the %s bus at %d\" % (f, tp, drop_index))",
"def dropMimeData(self, p_int, QMimeData, Qt_DropAction): # real signature unknown; restored from __doc__\r\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets details on currently logged in athlete. | def get_athlete(token):
url = "https://www.strava.com/api/v3/athlete"
params = {'access_token': token}
response = return_json(url, "GET", parameters=params, timeout=10)
return response | [
"def set_athlete(response):\n name = response['athlete']['firstname'] + \" \" + response['athlete']['lastname']\n athlete = {\n 'id': response['athlete']['id'],\n 'name': name,\n 'access_token': response['access_token'],\n 'refresh_token': response['refresh_token'],\n 'expires_at': response['expires_at'],\n 'ts_activity': 0}\n return athlete",
"def fetch_profile(access_token):\n\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n # Fetch athlete profile\n r = requests.get(API_URL + \"/athlete\", headers=headers)\n profile = r.json()\n if \"errors\" in profile:\n raise AuthError(profile[\"message\"])\n\n return {\n \"firstName\": profile[\"firstname\"],\n \"lastName\": profile[\"lastname\"],\n \"imgUrl\": profile[\"profile\"],\n \"profileUrl\": \"https://www.strava.com/athletes/{}\".format(profile[\"id\"]),\n }",
"def get_account_details(self):\n pass",
"def athletes_get(label=None, page=None, per_page=None): # noqa: E501\n\n\n return query_manager.get_resource(\n label=label,\n page=page,\n per_page=per_page,\n rdf_type_uri=ATHLETE_TYPE_URI,\n rdf_type_name=ATHLETE_TYPE_NAME, \n kls=Athlete)",
"def get_athlete(name_athlete):\n if not name_athlete:\n return 'nhập tên vận động viên'\n connection = Connection().get_connection()\n try:\n cursor = connection.cursor() # prepare an object cursor\n\n query = \"\"\"\n SELECT * FROM athlete WHERE name='{}';\n \"\"\".format(name_athlete.strip().lower())\n log_debug(query)\n cursor.execute(query)\n result = cursor.fetchall()\n result = answer_athele(name_athlete, result)\n # cursor.close()\n # del cursor\n # connection.close()\n return result\n except MySQLError as ex:\n connection.close()\n log_error(\"Can't get if of athlete with name - {}: {}\".format(name_athlete, ex))\n return []",
"def get_athlete_performance(self, athlete_number):\n query = \"SELECT discipline_name, best_attempt, points FROM achievements WHERE athlete_number = {}\".format(athlete_number)\n performance = self.DB_Handler.get_results(query)\n\n return performance",
"def athletes_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=ATHLETE_TYPE_URI,\n rdf_type_name=ATHLETE_TYPE_NAME, \n kls=Athlete)",
"def getInfo(self):\n self.name, self.description = achievements[self.id]",
"def get_own_details(self):\n\n url_route = \"people/me\"\n\n data = requests.get(self.URL + url_route, headers=self.headers)\n return data",
"def get_teacher(self) -> str :\n return self.teacher",
"def get_teacher():\n\n rows = db.engine.execute(f\"SELECT * FROM teacher_login WHERE loginid = {g.user.loginid}\")\n res = []\n for row in rows:\n res.append(dict(row))\n return jsonify(res)",
"def details(self):\n logging.info(self.user)",
"def get_user_details(self):\n return self._make_request('user.get_user_details').user_details",
"def getDBAthletesUsingAPI():\n athletes_response = api_requester.getAthletes()\n\n # Parse response into Athlete db objects\n athletes_to_return = list()\n for athlete in athletes_response.json():\n athletes_to_return.append(\n getAthleteObjectFromJSON(athlete))\n\n return athletes_to_return",
"def getDetailsByID(self, server, token, accessoriesID):\n self.uri = '/api/v1/accessories/'\n self.server = server + self.uri + str(accessoriesID)\n headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(token)}\n results = requests.get(self.server, headers=headers) \n return results.content",
"def get(self, alergen_id: int) -> Alergen:\n pass",
"def get_user_info(self) -> str:\n return self._searcher.get_user_info()",
"def loan_details(request, id):\n loan = User.objects.get(id=id)\n ctx = {'loan': loan}\n return render(request, 'loan-detail.html', ctx)",
"def _athlete_endpoint(self, athlete):\n return '{host}{athlete}'.format(\n host=self.host,\n athlete=quote_plus(athlete)\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stores athlete's id, first name, last name, weight and ftp into strava_athlete KV Store collection. | def kvstore_save_athlete(session_key, athlete_id, firstname, lastname, weight, ftp): # pylint: disable=too-many-arguments
url = 'https://localhost:8089/servicesNS/nobody/TA-strava-for-splunk/storage/collections/data/strava_athlete/batch_save'
headers = {'Content-Type': 'application/json', 'Authorization': f'Splunk {session_key}'}
payload = [{"_key": athlete_id, "id": athlete_id, "firstname": firstname, "lastname": lastname, "fullname": firstname + " " + lastname, "weight": weight, "ftp": ftp}]
helper.send_http_request(url, "POST", headers=headers, payload=payload, verify=False, use_proxy=False) | [
"def set_athlete(response):\n name = response['athlete']['firstname'] + \" \" + response['athlete']['lastname']\n athlete = {\n 'id': response['athlete']['id'],\n 'name': name,\n 'access_token': response['access_token'],\n 'refresh_token': response['refresh_token'],\n 'expires_at': response['expires_at'],\n 'ts_activity': 0}\n return athlete",
"def log_strava_event(athlete_id, action):\n strava_event = StravaEvent(athlete_id=athlete_id, action=action, timestamp=datetime.utcnow())\n db.session.add(strava_event)\n db.session.commit()",
"def save(self):\n # bloc_dict = self.bloc.get()\n # for apartment_id in bloc_dict.keys():\n # bloc_dict[apartment_id] = Apartment.to_dictionary(bloc_dict[apartment_id])\n #\n fp = open(\"apartments.json\", \"w\")\n fp.write(json.dumps(Bloc.to_dictionary(self.bloc)))\n fp.close()",
"def save_data(scrapper_data):\n print(f'Persisting {len(scrapper_data)} results')\n\n restaurant_persistor, city_persistor = RestaurantHelper(), CityHelper()\n\n city_persistor.insert(scrapper_data[0].city)\n city_persistor.commit()\n\n for restaurant in scrapper_data:\n restaurant_persistor.insert(restaurant)\n\n restaurant_persistor.commit()\n\n print('Done.')",
"def save_to_terralego(self):\n tags = self.terralego_tags and json.loads(self.terralego_tags) or None\n tags = self._update_tags_with_model(tags)\n self.terralego_tags = json.dumps(tags) # Save tags in case of error before the update_from_terralego_data\n if self.terralego_id is None:\n data = geodirectory.create_entry(self.terralego_geometry, tags)\n else:\n data = geodirectory.update_entry(self.terralego_id, self.terralego_geometry, tags)\n self.update_from_terralego_data(data)",
"def store_in_db(offers):\n with open(OFFERS_FILE, 'w', encoding='utf8') as f:\n json.dump(offers, f, ensure_ascii=False, indent=4)",
"def test_athlete_model(self):\n weightlifting = WeightLifting()\n weightlifting_pk = weightlifting.pk\n weightlifting.movement = 'movement'\n weightlifting.rep_scheme = 'rep_scheme'\n weightlifting.description = 'description'\n weightlifting.save()\n weight = WeightLifting.objects.create(movement='movement', rep_scheme='rep_scheme',\n description='description')\n athlete = Athlete.objects.create(favorite_movement=weight, athlete=self.user)\n self.assertEqual(athlete.workouts_completed, 0)\n self.assertEqual(athlete.personal_records, 0)\n self.assertEqual(athlete.favorite_movement, weight)\n athlete.workouts_completed = 9\n athlete.personal_records = 101\n self.assertTrue(athlete.workouts_completed == 9)\n self.assertTrue(athlete.personal_records == 101)",
"def store(self, details):",
"def save_to_kvstore(self, name, entries, stats):\n collection_name = name\n collection = self.service.kvstore[collection_name]\n for entry in entries:\n collection.data.insert(json.dumps(entry))",
"def add_raw_athlete(conn, athlete_id, raw_text):\n statement = \"INSERT INTO %s VALUES (%i, '%s', DEFAULT)\" % \\\n (TABLE_SCRAPING, athlete_id, mdb.escape_string(raw_text))\n cur = conn.cursor()\n cur.execute(statement)\n return",
"def store_in_file(self):\n with open(dogs_file, \"a\") as store:\n store.writelines(self.dog_name + \"\\n\")\n print(\"\\tYou store \" + self.dog_name + \" in \" + dogs_file)",
"def store(self):\n\n # Initialise the datastore\n try:\n # Open existing\n store = shelve.open('lesson/store', 'w')\n except Exception:\n # Create new\n # Only if doesn't exist\n store = shelve.open('lesson/store', 'n')\n\n # Store the lesson\n store[self._id] = self\n store.close()",
"def store_anagrams(filename, anagram_map):\n shelf = shelve.open(filename, 'c')\n\n for word, word_list in anagram_map.items():\n shelf[word] = word_list\n\n shelf.close()",
"def store_all_to_database(self, session):\n\n description = 'Established in 1974, JSM is a family-owned provider of quality apartments. We offer a variety of units from studios to five bedrooms with every location benefitting from our award winning amenities, responsive 24 hour maintenance, and friendly property management staff. JSM Development began in Champaign, IL, and manages roughly 1,500 apartments and 450,000 sq/ft of commercial space. JSM has been a major contributor to the development of Campustown in Champaign and the East Campus area in Urbana at the University of Illinois. These popular locations are now home to major national retailers such as Urban Outfitters, Chipotle, Panera, Cold Stone Creamery, and Noodles & Co.'\n\n # Insert a JSM company instance into the database\n current_company = Company(\n name='JSM',\n baseurl='https://apartments.jsmliving.com/',\n description = description\n )\n session.add(current_company)\n\n # Iterate over the apartments, storing each in the database\n for apartment in self.apartment_data:\n logging.info(\"Inserting %s to database\", apartment['name'])\n new_apartment = Apartment(\n company=current_company,\n url=apartment['url'],\n name=apartment['name'],\n bedrooms=apartment['bedrooms'],\n bathrooms=apartment['bathrooms'],\n price=apartment['price'],\n leasing_period=apartment['leasing_period'],\n description=apartment['description'],\n address=apartment['address'],\n lat=apartment['lat'],\n lng=apartment['lng']\n )\n session.add(new_apartment)\n\n # Insert images for the given apartment\n for index, image_url in enumerate(apartment['image_urls']):\n new_image = Image(\n url=image_url,\n apartment_id=new_apartment.id,\n type=0,\n image_index=index\n )\n session.add(new_image)\n\n # Connect images to apartment\n new_apartment.images.append(new_image)\n\n # Insert floorplan image, if it exists\n if apartment['floorplan_url'] != 0:\n new_floorplan_image = Image(\n url=apartment['floorplan_url'],\n apartment_id=new_apartment.id,\n type=1,\n image_index=len(apartment['image_urls'])\n )\n session.add(new_floorplan_image)\n\n # Connect images to apartment\n new_apartment.images.append(new_floorplan_image)\n\n # Insert amenities for the given apartment\n for amenity in apartment['amenities']:\n new_amenity = Amenity(\n apartment_id=new_apartment.id,\n amenity=amenity\n )\n session.add(new_amenity)\n\n # Connect amenity to apartment\n new_apartment.amenities.append(new_amenity)\n\n # Write all queries to the database\n session.commit()",
"def save_idea(self): # Will be used to populate the database I guess..\n Database.insert(\"ideas\", self.json())",
"def save(self):\n self.lock()\n\n trader = self.strategy.trader()\n\n for trade in self.trades:\n t_data = trade.dumps()\n ops_data = [operation.dumps() for operation in trade.operations]\n\n # store per trade\n Database.inst().store_user_trade((trader.name, trader.account.name, self.instrument.market_id,\n self.strategy.identifier, trade.id, trade.trade_type, t_data, ops_data))\n\n # dumps of regions\n trader_data = {}\n regions_data = [region.dumps() for region in self.regions]\n\n Database.inst().store_user_trader((trader.name, trader.account.name, self.instrument.market_id,\n self.strategy.identifier, self.activity, trader_data, regions_data))\n\n self.unlock()",
"def athletes_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=ATHLETE_TYPE_URI,\n rdf_type_name=ATHLETE_TYPE_NAME, \n kls=Athlete)",
"def get_athlete(token):\n url = \"https://www.strava.com/api/v3/athlete\"\n params = {'access_token': token}\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response",
"def save_suggestion(suggestion: Suggestion):\n client = datastore.Client()\n\n kind = \"Suggestion\"\n k = client.key(kind)\n entity = datastore.Entity(key=k)\n suggestion.populateEntity(entity)\n\n logger.debug(f\"Saved location {suggestion}\")\n\n client.put(entity)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates dict with athlete details, including token expiry. | def set_athlete(response):
name = response['athlete']['firstname'] + " " + response['athlete']['lastname']
athlete = {
'id': response['athlete']['id'],
'name': name,
'access_token': response['access_token'],
'refresh_token': response['refresh_token'],
'expires_at': response['expires_at'],
'ts_activity': 0}
return athlete | [
"def get_athlete(token):\n url = \"https://www.strava.com/api/v3/athlete\"\n params = {'access_token': token}\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response",
"def alpaca_create(self, keyname = \"ALPACA_API_KEY\", secret = \"ALPACA_SECRET_KEY\"):\n aak = os.getenv(keyname)\n ask = os.getenv(secret)\n if type(aak) is not str | type(aak) is not str:\n raise Exception(\"Could not load API or Secret Key\")\n #try to create object regardless \n alpaca = tradeapi.REST(\n aak,\n ask,\n api_version=\"v2\"\n )\n self.alpaca_api = alpaca\n return alpaca",
"def getDBAthletesUsingAPI():\n athletes_response = api_requester.getAthletes()\n\n # Parse response into Athlete db objects\n athletes_to_return = list()\n for athlete in athletes_response.json():\n athletes_to_return.append(\n getAthleteObjectFromJSON(athlete))\n\n return athletes_to_return",
"def gen_ad(self) -> dict[str, Union[int, float]]:\n user_id: int = np.random.choice(self.users)\n pack_id: int = np.random.choice(self.pack_ids)\n\n ad = {\n \"userID\": int(user_id),\n \"gemPackID\": int(pack_id),\n \"time\": float(time.time())\n }\n return ad",
"def create_initial_attributes(self):\n attributes = {}\n # attributes[\"fertility\"] = self.initialise_fertility()\n attributes[\"age\"] = self.draw_age_distribution()\n attributes[\"experience\"] = datetime.timedelta(\n self.draw_experience(attributes[\"age\"]))\n attributes[\"skill\"] = self.draw_skill()\n attributes[\"DOB\"] = self.timestepper.start_date - attributes[\"age\"]\n attributes[\"aspiration\"] = self.get_intial_aspiration()\n attributes[\"ident\"] = next(self.id_state)\n return attributes",
"def create_amenity():\n body = request.get_json() # transfrom the HTTP body request to dict\n if not body: # if HTTP body req is not a valid JSON\n abort(400, {\"Not a JSON\"}) # raise err and message\n if 'name' not in body: # if dict doesn't contain the key name\n abort(400, {\"Missing name\"})\n objects = Amenity(name=body['name'])\n storage.new(objects)\n storage.save()\n return jsonify(objects.to_dict()), 201 # returns new Amenity",
"def fetch_profile(access_token):\n\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n # Fetch athlete profile\n r = requests.get(API_URL + \"/athlete\", headers=headers)\n profile = r.json()\n if \"errors\" in profile:\n raise AuthError(profile[\"message\"])\n\n return {\n \"firstName\": profile[\"firstname\"],\n \"lastName\": profile[\"lastname\"],\n \"imgUrl\": profile[\"profile\"],\n \"profileUrl\": \"https://www.strava.com/athletes/{}\".format(profile[\"id\"]),\n }",
"def as_dict(self):\n tea_dict = dict()\n tea_dict[\"type\"] = \"tea\"\n tea_dict[\"variety\"] = self.tea_type\n tea_dict[\"Additions\"] = dict()\n for counter, addition in enumerate(self.additions, 1):\n addition_key = \"addition{}\".format(counter)\n tea_dict[\"Additions\"][addition_key] = addition\n \n return tea_dict",
"def get_access(access_token='',expire_time=0):\r\n #Get a new access token if it expires or is five minutes away from exp#iration\r\n if (expire_time==0) or (len(access_token)==0) or (time.time()-expire_time>=-300):\r\n\r\n #API needed to authorize account with refresh token\r\n auth_url = 'https://api.tdameritrade.com/v1/oauth2/token'\r\n\r\n #Data needed for token\r\n data = {'grant_type':'refresh_token',\r\n 'refresh_token':TDAuth_Info.refresh_token,\r\n 'client_id':TDAuth_Info.client_id}\r\n\r\n #Post the data to get the token\r\n auth_reply_json = requests.post(url=auth_url,data=data)\r\n auth_reply=auth_reply_json.json()\r\n\r\n #Now use the token to get account information\r\n access_token = auth_reply['access_token']\r\n expire_time=time.time()+auth_reply['expires_in']\r\n \r\n return (access_token,expire_time)",
"def create_audience(d, bus, creation_time, modified_time=\"-\", id=None):\n print('Creating Audience Definition')\n d['bus'] = bus\n create = d['create_version']\n d['creation_type'] = 'static'\n dynamodb = boto3.resource('dynamodb', region_name='us-east-1')\n table = dynamodb.Table('assets')\n found, status, status_text, actual_audience_size = get_current_status(d)\n status, status_text = modify_status(found, status, status_text, d)\n d['actual_audience_size'] = actual_audience_size\n\n if not found:\n d['id'] = str(uuid.uuid4().hex)[:20]\n\n d['creation_time'] = creation_time\n d['modified_time'] = modified_time\n d['status_text'] = status_text\n d['status'] = status\n d['asset'] = bus + '#audience#info'\n\n if not d['control']:\n print('Zeroing out Control as Control is False')\n d['control_size'] = 'None'\n d['actual_control_size'] = 'None'\n elif create:\n # If running to S3, wait for audience creation to update size\n d['actual_audience_size'] = 'Running...'\n # If running to S3, wait for audience creation to update size\n d['actual_control_size'] = 'Running...'\n else:\n d['actual_control_size'] = d['control_size']\n\n response = table.put_item(\n Item=json.loads(json.dumps(d), parse_float=decimal.Decimal)\n )\n\n if response['ResponseMetadata']['HTTPStatusCode'] == 200:\n print('Succesful Response')\n # json.dumps(response, indent=4, cls=DecimalEncoder)\n result = 'Success'\n else:\n print(response)\n print('Audience Definition Creation Failed')\n\n raise BaseException('Audience Definition Creation Failed')\n\n print('Saving audience for', d['id'])\n\n if create:\n print('Create is True, Running audience creation via ECS')\n create_ecs(d['id'], bus)\n\n return result, d['id']",
"def create_access_token(self):\n\t\t# Wraper for also caching invalid results\n #def getMetadataRofs(path):\n #\ttry:\n # \treturn self.client.metadata(path)\n # except Exception, e:\n # log.write('Exception at getMetadataRofs for path '+ path + '\\n')\n # pprint(e, log)\n # return False\n\n\t\ttry:\n\t\t\trequest_token = self.session.obtain_request_token()\n\t\t\turl = self.session.build_authorize_url(request_token)\n\t\t\tprint url\n\t\t\traw_input()\n\t\t\taccess_token = self.session.obtain_access_token(request_token)\n\t\t\tself.client = client.DropboxClient(self.session)\n\t\t\t\n\t\t\t# Build cache for metadata querying\n\n\t\t\t# Wraper for also caching invalid results\n\t\t\tdef getMetadataRofs(path):\n\t\t\t\ttry:\n\t\t\t\t\treturn self.client.metadata(path)\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tlogger.error('Exception at getMetadataRofs for path '+ path + '\\n')\n\t\t logger.debug(sys.exc_info()[0])\n\t\t\t\t\treturn False\n\n\t\t\tself.cache_metadata = Cache(getMetadataRofs)\n\t\t\tself.cache_files = {}\n\n\t\texcept Exception, e:\n\t\t\tlogger.error('Exception %s at create_access_token' % (sys.exc_info()[0]))\n\t\t\tlogger.debug(pformat(sys.exc_info()))",
"def create_header():\n return {\"Authorization\": \"Bearer \" + TOKEN}",
"def allocation() -> Union[Tuple[str, int], Dict[str, Any]]:\n manager = get_manager()\n psk = header_token(request)\n if manager.psk_is_valid(psk):\n return {\"alloc_token\": manager.create_token().value}\n else:\n return \"Not authorized\", 403",
"def create_temporary_access_token(self, api_token: str) -> dict:\n query = \"\"\"\n mutation CreateToken {\n createMyProfileTemporaryReadAccessToken(input: {}) {\n temporaryReadAccessToken {\n token\n expiresAt\n }\n }\n }\n \"\"\"\n\n path = jmespath.compile(\n \"\"\"\n data.createMyProfileTemporaryReadAccessToken.temporaryReadAccessToken.{\n token: token\n expires_at: expiresAt\n }\n \"\"\"\n )\n data = self.do_query(query, api_token=api_token)\n parsed_data = path.search(data)\n self.contains_keys(parsed_data, [\"token\", \"expires_at\"])\n parsed_data[\"expires_at\"] = parse_datetime(parsed_data[\"expires_at\"])\n return parsed_data",
"def get_initial_author_dict():\n adict = {}\n try:\n ah = run_sql(\"select aterm,hitlist from rnkAUTHORDATA\")\n for (a, h) in ah:\n adict[a] = deserialize_via_marshal(h)\n return adict\n except:\n register_exception(prefix=\"could not read rnkAUTHORDATA\", alert_admin=True)\n return {}",
"def _get_ENA_ontology():\n ontology = Ontology(convert_to_id('ENA', 'ontology'))\n ena_terms = sorted(ontology.terms)\n # make \"Other\" last on the list\n ena_terms.remove('Other')\n ena_terms.append('Other')\n\n return {'ENA': ena_terms, 'User': sorted(ontology.user_defined_terms)}",
"def create(tarea: dict) -> dict:\n tarea[\"id\"] = generar_consecutivo()\n __tareas.append(tarea)\n return tarea",
"def make_author(self,resp):\n author = {}\n author['name'] = resp['authority_name']\n author['cts_id'] = resp['canonical_id']\n author['works'] = []\n a_id = self.mongo.db.annotation.insert(author)\n new_auth = self.mongo.db.annotation.find_one({'_id' : a_id})\n return new_auth",
"def generate_expired_auth_token(self):\n token = self.generate_auth_token()\n token.created = timezone.now()-timezone.timedelta(hours=25)\n token.save()\n return token"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.