query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Delete image belonging to an answer with answer_id.
|
def delete_a_image(answer_id):
current_image = get_answer_image(answer_id)
if current_image:
remove_answer_image(answer_id)
try:
os.remove("static/uploads/" + current_image)
except FileNotFoundError:
pass
|
[
"def remove_answer_image(answer_id):\n SQL = \"\"\"UPDATE answer SET image = NULL WHERE id = %s;\"\"\"\n data = (answer_id,)\n fetch = None\n db.run_statements(((SQL, data, fetch),))",
"def remove_answer_and_get_q_id(answer_id):\n image_to_delete, question_id = get_answer_image_and_q_id(answer_id)\n if image_to_delete:\n try:\n os.remove(\"static/uploads/\" + image_to_delete)\n except (FileNotFoundError, TypeError):\n pass\n\n delete_answer_by_id(answer_id)\n return question_id",
"def img_delete_by_id(self, img_id: int) -> None:\n img = self.img_by_id(img_id)\n if img:\n self.__session.delete(img)\n self.commit()\n else:\n print('No such image')",
"def delete_selected_answer(self, instance, answer):\n\n del instance\n if answer == 'yes':\n app = App.get_running_app()\n self.viewer.stop()\n fullpath = self.fullpath\n filename = self.photo\n if self.type == 'Tag':\n app.Tag.remove(fullpath, self.target, message=True)\n deleted = True\n else:\n photo_info = app.Photo.exist(fullpath)\n deleted = app.Photo.delete_file(fullpath, filename, message=True)\n if deleted:\n if photo_info:\n app.update_photoinfo(folders=photo_info[1])\n if deleted:\n app.photos.commit()\n if len(self.photos) == 1:\n app.show_database()\n else:\n self.next_photo()\n Cache.remove('kv.loader')\n self.cache_nearby_images()\n #Cache.remove('kv.image')\n #Cache.remove('kv.texture')\n self.update_tags()\n self.update_treeview()\n self.dismiss_popup()",
"def delete_image(self, http_request, image_id):\n image = self.image_by_id(image_id)\n if image:\n self.glance_admin_image_store.remove(image)\n http_request.setResponseCode(204)\n return b''\n http_request.setResponseCode(404)\n return b''",
"def delete_answer_by_id(answer_id):\n SQL = \"\"\"DELETE FROM answer WHERE id = %s;\"\"\"\n data = (answer_id,)\n fetch = None\n db.run_statements(((SQL, data, fetch),))",
"def delete(self, answer_id):\n le_answer = get_an_answer(answer_id)\n if not le_answer:\n return {'success': False, 'msg': 'answer does not exist'}\n else:\n return delete_a_answer(answer_id)",
"def img_delete_by_path(self, img_path: str) -> None:\n img = self.img_by_path(img_path)\n if img:\n self.__session.delete(img)\n self.commit()\n else:\n print('No such image')",
"def delete_answer_by_id(conn, answer_id):\n SQL = \"\"\"DELETE FROM answer WHERE id = %s;\"\"\"\n data = (answer_id,)\n with conn.cursor() as cursor:\n cursor.execute(SQL, data)",
"def delete_image(sender, instance, **kwargs):\n if os.path.exists(instance.image.path):\n os.remove(instance.image.path)",
"def delete_image(self, image_id, timeout=None):\n\n url = '{}/image/{}'.format(self.endpoint, image_id)\n response = self.session.delete(url, timeout=timeout)\n return response.json()",
"def del_image(request):\n if not request.user.is_authenticated():\n return HttpResponse(-1)\n img_name = request.POST.get(\"img_name\", \"\")\n if img_name == \"\":\n return HttpResponse(-2)\n file = settings.MEDIA_ROOT + \"/upload/\" + img_name\n if os.path.exists(file):\n os.remove(file)\n return HttpResponse(0)\n return HttpResponse(-3)",
"def _delete_image(self, context, image_id, image_service) -> None:\n try:\n image_meta = image_service.show(context, image_id)\n image_status = image_meta.get('status')\n if image_status == 'queued' or image_status == 'saving':\n LOG.warning(\"Deleting image in unexpected status: \"\n \"%(image_status)s.\",\n {'image_status': image_status},\n resource={'type': 'image', 'id': image_id})\n image_service.delete(context, image_id)\n except Exception:\n LOG.warning(\"Image delete encountered an error.\",\n exc_info=True, resource={'type': 'image',\n 'id': image_id})",
"def delete_photo(request, photo_id):\n if not request.user.is_superuser:\n messages.error(\n request, 'Sorry, you need to be a Picturesque admin to perform this operation.'\n )\n return redirect(reverse('home'))\n\n photo = get_object_or_404(Photo, pk=photo_id)\n photo.delete()\n messages.success(request, 'Photo deleted!')\n return redirect(reverse('photos'))",
"async def delete_answer(self, ctx: Context, question: str, *, answer: str):\n question = await ctx.db.fetchval(\"SELECT question_id from question where content = $1\", question)\n\n if not question:\n return await ctx.send(f\":no_entry: | a question with id `{question}` does not exist.\")\n\n async with ctx.db.acquire():\n check = await ctx.db.execute(\"\"\"DELETE FROM answer where question_id = $1 and LOWER(content) = $2 \n RETURNING answer\"\"\", question, answer.lower())\n\n if check == \"DELETE 0\":\n return await ctx.send(f\"The answer `{answer}` does not exist.\")\n\n await ctx.send(\"> successfully updated.\")",
"def delete_avatar_image(avatar_id: UUID) -> Result[None, str]:\n avatar = db.session.get(DbTourneyAvatar, avatar_id)\n\n if avatar is None:\n return Err('Unknown avatar ID')\n\n # Delete file.\n upload.delete(avatar.path)\n\n # Delete database record.\n db.session.delete(avatar)\n db.session.commit()\n\n return Ok(None)",
"def db_delete_one_image(imgId):\n\tprint \"delete one image from database: \"+ str(imgId)\n\timage\t\t\t= Picture.objects.get(pk=imgId)\n\timage.visible \t= False\n\timage.save()",
"def delete(self, request, vnf_id):\n return self.deleteImage(request, vnf_id)",
"def delete_image(self, index):\n if isinstance(index, int) == False or index > self.maximum_image_count:\n raise Exception(\n \"Index for deletion should be smaller integer than maximum_image_count\")\n # Delete the image from the image list by\n # poping the entry out of the dictionary!\n self.image_list.pop(index, None)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove answer image by updating database, setting image to NULL.
|
def remove_answer_image(answer_id):
SQL = """UPDATE answer SET image = NULL WHERE id = %s;"""
data = (answer_id,)
fetch = None
db.run_statements(((SQL, data, fetch),))
|
[
"def delete_a_image(answer_id):\n current_image = get_answer_image(answer_id)\n if current_image:\n remove_answer_image(answer_id)\n try:\n os.remove(\"static/uploads/\" + current_image)\n except FileNotFoundError:\n pass",
"def db_delete_one_image(imgId):\n\tprint \"delete one image from database: \"+ str(imgId)\n\timage\t\t\t= Picture.objects.get(pk=imgId)\n\timage.visible \t= False\n\timage.save()",
"def delete_image(sender, instance, **kwargs):\n if os.path.exists(instance.image.path):\n os.remove(instance.image.path)",
"def clearImages(self) -> None:\n ...",
"def _remove_existing(img):\n if os.path.exists(img):\n os.unlink(img)\n return img",
"def delete_selected_answer(self, instance, answer):\n\n del instance\n if answer == 'yes':\n app = App.get_running_app()\n self.viewer.stop()\n fullpath = self.fullpath\n filename = self.photo\n if self.type == 'Tag':\n app.Tag.remove(fullpath, self.target, message=True)\n deleted = True\n else:\n photo_info = app.Photo.exist(fullpath)\n deleted = app.Photo.delete_file(fullpath, filename, message=True)\n if deleted:\n if photo_info:\n app.update_photoinfo(folders=photo_info[1])\n if deleted:\n app.photos.commit()\n if len(self.photos) == 1:\n app.show_database()\n else:\n self.next_photo()\n Cache.remove('kv.loader')\n self.cache_nearby_images()\n #Cache.remove('kv.image')\n #Cache.remove('kv.texture')\n self.update_tags()\n self.update_treeview()\n self.dismiss_popup()",
"def remove_label(self, image_id):\n\n Image.objects.filter(pk=image_id).update(label=None)",
"def remove_image(self: E) -> E:\n try:\n del self._image\n except AttributeError:\n pass\n\n return self",
"def remove_answer_and_get_q_id(answer_id):\n image_to_delete, question_id = get_answer_image_and_q_id(answer_id)\n if image_to_delete:\n try:\n os.remove(\"static/uploads/\" + image_to_delete)\n except (FileNotFoundError, TypeError):\n pass\n\n delete_answer_by_id(answer_id)\n return question_id",
"def delete(self, *args, **kwargs):\n self.image.delete()\n super(StoredImage, self).delete(*args, **kwargs)",
"def delete_metadata(full_path_to_img):\n piexif.remove(full_path_to_img, \"clean_image.jpg\")\n move(\"clean_image.jpg\", \"documents/clean_image.jpg\")",
"def cleanup_thumbnail(sender, instance, **kargs):\n if instance.file.name.endswith('.png'):\n delete(instance.file)",
"def img_delete_by_path(self, img_path: str) -> None:\n img = self.img_by_path(img_path)\n if img:\n self.__session.delete(img)\n self.commit()\n else:\n print('No such image')",
"def delete_image(self, node_image):\r\n\r\n raise NotImplementedError(\r\n 'delete_image not implemented for this driver')",
"def rename_answer_image(filename, answer_id):\n SQL = \"\"\"UPDATE answer SET image = %s WHERE id = %s;\"\"\"\n data = (filename, answer_id)\n fetch = None\n db.run_statements(((SQL, data, fetch),))",
"def delete_image_tag(self, img, tag):\r\n return img.delete_tag(tag)",
"def delete_metadata_from_png(full_path_to_img):\n image = Image.open(full_path_to_img)\n image.save(\"documents/clean_image.png\")",
"def del_image(request):\n if not request.user.is_authenticated():\n return HttpResponse(-1)\n img_name = request.POST.get(\"img_name\", \"\")\n if img_name == \"\":\n return HttpResponse(-2)\n file = settings.MEDIA_ROOT + \"/upload/\" + img_name\n if os.path.exists(file):\n os.remove(file)\n return HttpResponse(0)\n return HttpResponse(-3)",
"def remove_image(self, image):\n try:\n self.client.remove_image(image = image['Id'], force = True)\n Logger.log(\"Removing image: %s\" % image['Id'])\n except:\n Logger.log_container_error(\"image\", image)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return question_id based on answer_id.
|
def get_question_id(answer_id):
SQL = """SELECT question_id FROM answer WHERE id = %s;"""
data = (answer_id,)
fetch = "one"
question_id = db.run_statements(((SQL, data, fetch),))[0][0]
return question_id
|
[
"def get_answer_to_question(question_id):\n return Question.query.filter_by(id=question_id).first_or_404().answer",
"def get_answer_by_id(answer_id):\n\n return Answer.query.get(answer_id)",
"def _get_answer(self, answer_id):\n return self._translate_sent(self.answer_pool[answer_id])",
"def get_answer_details(answer_id):\n SQL = \"\"\"SELECT id, submission_time, vote_number, question_id, message, image\n FROM answer WHERE id = %s;\"\"\"\n data = (answer_id,)\n fetch = \"one\"\n\n answer = db.run_statements(((SQL, data, fetch),))[0]\n return answer",
"def get_answer_image_and_q_id(answer_id):\n SQL = \"\"\"SELECT image, question_id FROM answer WHERE id = %s;\"\"\"\n data = (answer_id,)\n fetch = \"one\"\n\n a_img_and_id = db.run_statements(((SQL, data, fetch),))[0]\n return a_img_and_id",
"def remove_answer_and_get_q_id(answer_id):\n image_to_delete, question_id = get_answer_image_and_q_id(answer_id)\n if image_to_delete:\n try:\n os.remove(\"static/uploads/\" + image_to_delete)\n except (FileNotFoundError, TypeError):\n pass\n\n delete_answer_by_id(answer_id)\n return question_id",
"def getQuestionId(self):\n return self._qid",
"def get_other_answer_ids(answer_id, question_id):\n SQL = \"\"\"SELECT id FROM answer WHERE question_id = %s AND id != %s;\"\"\"\n data = (question_id, answer_id)\n fetch = \"col\"\n other_answer_ids = db.run_statements(((SQL, data, fetch),))[0]\n return other_answer_ids",
"def get_question_id(self):\n return # osid.id.Id",
"def get(self, answer_id):\n le_answer = get_an_answer(answer_id)\n if not le_answer:\n return {'success': False, 'message': 'answer not found'}\n else:\n return le_answer",
"def valid_answer_id(answer_id):\n SQL = \"\"\"SELECT id FROM answer WHERE id = %s;\"\"\"\n data = (answer_id,)\n fetch = \"one\"\n try:\n found_id = db.run_statements(((SQL, data, fetch),))[0][0]\n except (DatabaseError, TypeError):\n return False\n return True",
"def get_question_by_id(question_id):\n return Question.query.get(question_id)",
"def get(self, question_id):\n return specific_question(question_id)",
"def get_question_by_id(question_id):\n\n return Question.query.filter_by(question_id=question_id).first()",
"def view_answers(self, id):\n answers = self.db\n quiz_and_answers = answers[id-1]\n return quiz_and_answers",
"def get_answer_for_question(question_number):\n if question_number == 1:\n results = run_query(query_for_question1)\n return prepare_answer1(results)\n\n if question_number == 2:\n results = run_query(query_for_question2)\n return prepare_answer2(results)\n\n if question_number == 3:\n results = run_query(query_for_question3)\n return prepare_answer3(results)",
"def _is_answer_correct(self, answer_id=int, question_id=int):\n try:\n for question in self.questions_json_list:\n print question\n print \"Q_ID: \" + str(question['id'])\n print \"PASSED Q_ID: \" + str(question_id)\n if question['id'] == question_id:\n print True\n for alternative in question['alternatives']:\n if alternative['id'] == answer_id:\n return alternative['isCorrect']\n\n # for alternative in self.questions_json_list[question_id]['alternatives']:\n # print self.questions_json_list[question_id]['id']\n # print alternative\n # if alternative['id'] == answer_id:\n # return alternative['isCorrect']\n except Exception as ex:\n print ex",
"def get_student_answer_variable_name(self, student_answers, aid):\n if aid in student_answers:\n for key, val in six.iteritems(self.context):\n # convert val into unicode because student answer always be a unicode string\n # even it is a list, dict etc.\n if six.text_type(val) == student_answers[aid]:\n return '$' + key\n return None",
"def get_test_question_answer(self):\n query_string = \"\"\"\n {\n \"query\": {\n \"term\" : {\"test_completed\": false}\n }\n }\n \"\"\"\n answer_doc = None\n test_answer_es = Elasticsearch([self.application.es_test_host])\n search_results = test_answer_es.search(self.application.es_test_index,\n self.application.es_test_type,\n body=query_string, size=10)\n if search_results['hits']['total'] > 0:\n answer_doc = random.choice(search_results['hits']['hits'])\n\n if not answer_doc:\n return self.generate_done_message()\n\n answer = answer_doc['_source']['answer']\n test_answer_id = answer_doc['_id']\n c_id = answer_doc['_source']['c_id']\n\n query_string = \"\"\"\n {\n \"query\": {\n \"term\" : {\"c_id\": %s}\n }\n }\n \"\"\" % c_id\n test_question_es = Elasticsearch([self.application.es_test_question_host])\n search_results = test_question_es.search(\n self.application.es_test_question_index,\n self.application.es_test_question_type, body=query_string, size=1)\n question = search_results['hits']['hits'][0]['_source']['question']\n\n return (question, answer, test_answer_id)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Mark an answer accepted. Deselect any previously marked answer, since only one accepted question might remain.
|
def mark_accepted_exclusively(answer_id, question_id):
other_answer_ids = get_other_answer_ids(answer_id, question_id)
if not other_answer_ids:
other_answer_ids = (None,)
SQL1 = """UPDATE answer SET accepted = true WHERE id = %s;"""
data1 = (answer_id,)
SQL2 = """UPDATE answer SET accepted = false WHERE accepted = true and id IN %s;"""
data2 = (tuple(other_answer_ids),)
fetch = None
db.run_statements(((SQL1, data1, fetch), (SQL2, data2, fetch)))
|
[
"def remove_accept_mark(answer_id):\n SQL = \"\"\"UPDATE answer SET accepted = false WHERE accepted = true and id = %s;\"\"\"\n data = (answer_id,)\n fetch = None\n db.run_statements(((SQL, data, fetch),))",
"def accept_answer(self, post, user=None):\n assert post is None or post.topic == self, \\\n 'that post does not belong to the topic'\n if self.answer is not None:\n self.answer.is_answer = False\n atomic_add(self.answer.author, 'reputation',\n -settings.REPUTATION_MAP['LOSE_ON_LOST_ANSWER'])\n if user is None:\n user = post and post.author or self.author\n if post is not None:\n post.is_answer = True\n atomic_add(post.author, 'reputation',\n settings.REPUTATION_MAP['GAIN_ON_ACCEPTED_ANSWER'])\n self.answer_author = post.author\n self.answer_date = post.created\n self.answer = post\n try_award('accept', user, self, post)",
"def force_accept(self):\n self.set_state('accepted')\n self.__votes_cast = OOBTree.OOBTree()",
"def mark_accepted(self, count=1):\r\n if self.accepted + count > self.sent:\r\n raise InvitationError('There can\\'t be more accepted ' \\\r\n 'invitations than sent invitations.')\r\n self.accepted = models.F('accepted') + count\r\n self.save()",
"def force_accept(self):\n self.set_state('accepted')",
"def is_accepted(self, is_accepted):\n\n self._is_accepted = is_accepted",
"def mark_accepted(self, new_user):\r\n self.user.invitation_stats.mark_accepted()\r\n signals.invitation_accepted.send(sender=self,\r\n inviting_user=self.user,\r\n new_user=new_user)\r\n self.delete()",
"def terms_accepted(self, terms_accepted):\n\n self._terms_accepted = terms_accepted",
"def onYes(self):\n selection = self.meetupList.curselection()\n if len(selection) == 1:\n meetup_data = json.loads(self.meetupList.get(selection[0]).lstrip().rstrip())\n peerid = next(iter(meetup_data))\n if peerid != self.btpeer.myid:\n self.btpeer.meetups[peerid]['accepted'] = True\n self.updateMeetupList()\n self.btpeer.sendtopeer( peerid, MEETREPLY,\n '%s %s' % (self.btpeer.myid, 'Yes'))",
"def answer(self, answer):\n if answer is None:\n raise ValueError(\"Invalid value for `answer`, must not be `None`\")\n\n self._answer = answer",
"def answer_question(self, student_id, question_id, option_id):\n ques = self.get_question(question_id)\n is_student_eligible, student = self.student_eligible(student_id)\n if ques and is_student_eligible:\n if ques.get_right_answer() and option_id == ques.get_right_answer().option_id:\n self.add_student_score(student, 1)",
"def return_accepted(self, return_accepted):\n\n self._return_accepted = return_accepted",
"def confirm(self, question, default=False, true_answer_regex=\"(?i)^y\"):\r\n return self._io.confirm(question, default, true_answer_regex)",
"def add_possible_answer(self, answer):\n self.possible_answers.append(answer)",
"def score_answer(self, answer, answer_spec):\n raise NotImplementedError",
"def process_question(self):\n for rb in self.rbs:\n rb.configure(state = DISABLED)\n if self.var.get()==self.questions[self.index].answer: \n self.correct += 1\n self.feedback.config(text = \"Correct! \" + str(self.correct) + \"/\" + str(self.index + 1))\n else:\n self.feedback.config(text = \"Incorrect! The answer is \"+ self.questions[self.index].answer + \" \" +\n str(self.correct) + \"/\" + str(self.index + 1))",
"def is_terms_accepted(self, is_terms_accepted):\n\n self._is_terms_accepted = is_terms_accepted",
"def handle_accepted(cls, agreement): # pragma: no cover",
"def __AcceptAllMandates(self, user):\n if self._mandates:\n for mandate in self._mandates:\n if mandate.IsAcceptedByTrader(user) is False:\n mandate.AddAcceptedTrader(user)\n mandate.Commit()\n getLogger().debug('Accepted mandate (%s) .' % mandate.Name())\n else:\n getLogger().debug('Mandate has previously been accepted (%s)' % mandate.Name())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return all answer ids for question with question_id, other than answer_id.
|
def get_other_answer_ids(answer_id, question_id):
SQL = """SELECT id FROM answer WHERE question_id = %s AND id != %s;"""
data = (question_id, answer_id)
fetch = "col"
other_answer_ids = db.run_statements(((SQL, data, fetch),))[0]
return other_answer_ids
|
[
"def get_answer_ids(self):\n return # osid.id.IdList",
"def get_forum_question_ids():\n return list(map(lambda q: q.id, get_forum_questions()))",
"def answered_questions(self):\n return self.question_set.filter(\n status=question_constants.ANSWERED\n )",
"def get_questions_answered(attempt_id, quiz_id):\n params = {\n 'filter': '{{\"$and\": [{{\"relatedActivities\": {{\"$elemMatch\":{{\"$eq\":\"{u}/mod/quiz/attempt'\n '.php?attempt={a}&cmid={q}\"}}}}}}, {{\"statement.verb.id\":'\n '\"http://adlnet.gov/expapi/verbs/answered\"}}]}}'.format(a=attempt_id, q=quiz_id,\n u=settings.MOODLE_BASE_URL)\n }\n\n resp = connect('api/connection/statement/', 200, 'get', params=params)\n json = resp.json()\n return json",
"def getSessionAnswers(self, sessionId):\n\t\tanswerResults = Service.db.query(\"SELECT * FROM answers WHERE session_id = %s\", sessionId)\n\t\treturn [self._map(answerResult) for answerResult in answerResults]",
"def view_answers(self, id):\n answers = self.db\n quiz_and_answers = answers[id-1]\n return quiz_and_answers",
"def get_answers(self, workspace_id):\n return QuestionAnswer.objects.filter(\n Q(\n workspace_id=workspace_id,\n document__document_type__stage_num=2\n ) |\n Q(\n workspace_id=workspace_id,\n document__document_type__stage_num=6\n )\n )",
"def get_answers(self):\n answers = {}\n for answer in Answer.objects.filter(answer_sheet=self):\n answers[answer.question.id] = answer\n return answers",
"def answerset(answerset_id):\n return render_template('answerset.html', answerset_id=answerset_id, answer_id=[])",
"def get_respondent_ids(self):\n self.respondent_ids = []\n [self.respondent_ids.append(t.respondent_id) for t in self.testimony]\n self.respondent_ids = tuple(set(self.respondent_ids))\n return self.respondent_ids",
"def get_answer_to_question(question_id):\n return Question.query.filter_by(id=question_id).first_or_404().answer",
"def get_answers(self, obj):\n answer_list = Answer.objects.filter(question=obj.pk)\n serializer = AnswerSerializer(instance=answer_list, many=True)\n return serializer.data",
"def selectable_answers_with_pk(self):\n return self._selectable_answers(with_pk=True)",
"def _find_q_ids(html, groups):\n question_ids = []\n for component in tags.get_components_from_html(html):\n if component['cpt_name'] == 'question':\n question_ids.append(int(component['quid']))\n elif component['cpt_name'] == 'question-group':\n qgid = int(component['qgid'])\n if qgid in groups:\n for question_id in groups[qgid]:\n question_ids.append(int(question_id))\n return question_ids",
"def _selectable_answers(self, with_pk=False):\n if self.type == \"OPEN\":\n return None\n elif self.type == \"MCQ\":\n return self.answers.filter(is_deleted=False)\n elif self.type == \"LINKED\":\n if with_pk:\n return [\n (\n (answer.pk, answer.answer),\n (answer.linked_answer.pk, answer.linked_answer.answer),\n )\n for answer in self.answers.filter(is_deleted=False)\n if answer.answer and answer.linked_answer.answer\n ]\n else:\n return [\n (answer.answer, answer.linked_answer.answer)\n for answer in self.answers.filter(is_deleted=False)\n if answer.answer and answer.linked_answer.answer\n ]",
"def get_answers(self):\n if len(self.answer_ids) > 1:\n return self.default_answer_map\n if self.expect:\n return {self.answer_ids[0]: self.expect}\n return self.default_answer_map",
"def all_answered(self) -> Set[str]:\n return reduce(set.intersection, self.answers, initial=self.any_answered)",
"def get_queryset(self):\n qs = self.queryset\n question_id = self.request.query_params.get('question')\n if question_id is not None:\n qs = qs.filter(answer__question_id=question_id)\n return qs",
"def get_answers(question_num):\n question_num = str(question_num)\n question = get_current_question_filename(question_num)\n print(\"getting question: %s\" % question)\n if not question:\n return []\n folder, filename = question.split('/')\n name_of_question = filename.split('.')[0]\n all_answers = get_contents(\"answers\")\n answers = []\n for file_obj in all_answers:\n filepath = file_obj['Key'].split('/')[-1]\n name = filepath.split('.')[0]\n if name_of_question in name:\n print(\"finding answer %s\" % name)\n answers.append(filepath)\n return answers"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove accept mark from answer.
|
def remove_accept_mark(answer_id):
SQL = """UPDATE answer SET accepted = false WHERE accepted = true and id = %s;"""
data = (answer_id,)
fetch = None
db.run_statements(((SQL, data, fetch),))
|
[
"def clean_answer(self, answer):\n answer = answer.replace('\"', \"\")\n answer = re.sub(r'\\(.*?\\)', '', answer)\n if answer[0:4].lower() == \"the \":\n answer = answer[4:]\n if answer[0:3].lower() == \"an \":\n answer = answer[3:]\n if answer[0:2].lower() == \"a \":\n answer = answer[2:]\n\n return answer.strip()",
"def force_accept(self):\n self.set_state('accepted')\n self.__votes_cast = OOBTree.OOBTree()",
"def can_unaccept_as_answer(self, post):\n if self.is_admin:\n return True\n if post.topic.author == self:\n return True\n return self.reputation >= \\\n settings.REPUTATION_MAP['UNACCEPT_ANSWER']",
"def clear_answer_entry(self):\n self.answer_entry.delete(0, constants.END)\n self.set_focus_on_answer()",
"def onNo(self):\n selection = self.meetupList.curselection()\n if len(selection) == 1:\n meetup_data = json.loads(self.meetupList.get(selection[0]).lstrip().rstrip())\n peerid = next(iter(meetup_data))\n if peerid != self.btpeer.myid:\n self.btpeer.meetups[peerid]['accepted'] = False\n self.updateMeetupList()\n self.btpeer.sendtopeer( peerid, MEETREPLY,\n '%s %s' % (self.btpeer.myid, 'No'))",
"def force_accept(self):\n self.set_state('accepted')",
"def reject_at_end(self, reject_at_end):\n\n self._reject_at_end = reject_at_end",
"def confirm(self, question, default=False, true_answer_regex=\"(?i)^y\"):\r\n return self._io.confirm(question, default, true_answer_regex)",
"def removeAIInterest(self, handle):\n self._sendRemoveAIInterest(handle)",
"def reject(self):\n # print(\"REJECTING REQUEST RN\")\n self.rejected = timezone.now\n # self.save()\n signals.follow_request_rejected.send(sender=self)\n self.delete()",
"def tap_clear():\n temp_clear = \"\"\n answer.set(temp_clear)",
"def clear_accept_charset(self, ):\n self.clear_attribute_value(self.AttributeNames.ACCEPT_CHARSET)\n return self",
"def clear_ans(self) -> None:\n self.display_ans.set(\"\")",
"def remove_accept_charset(self, value):\n self.attrs.remove_value(self.AttributeNames.ACCEPT_CHARSET, klass)\n return self",
"def mark_accepted_exclusively(answer_id, question_id):\n other_answer_ids = get_other_answer_ids(answer_id, question_id)\n if not other_answer_ids:\n other_answer_ids = (None,)\n\n SQL1 = \"\"\"UPDATE answer SET accepted = true WHERE id = %s;\"\"\"\n data1 = (answer_id,)\n SQL2 = \"\"\"UPDATE answer SET accepted = false WHERE accepted = true and id IN %s;\"\"\"\n data2 = (tuple(other_answer_ids),)\n fetch = None\n db.run_statements(((SQL1, data1, fetch), (SQL2, data2, fetch)))",
"def reject(self) -> None:\n\n assert self.state == 'submitted'\n self.state = 'rejected'",
"def correct_answer_response():\n return '\\nCorrect!'",
"def __unlabel_line(self, line):\n\n if line[0:3] == '!*!':\n line = line[3:]\n return line",
"def cb_comment_reject(args):\n request = args['request']\n session = request.getSession()\n form = request.getForm()\n data = request.getData()\n\n try:\n nospam = int(form[\"nospam\"].value)\n sess_nospam = int(session[\"nospam\"])\n except:\n nospam = 0\n sess_nospam = 1\n\n if nospam != sess_nospam:\n _remember_comment(request)\n data[\"cmt_nospam_error\"] = \"Secret number did not match.\"\n return True\n else:\n _forget_comment(request)\n if \"cmt_nospam_error\" in data:\n del data[\"cmt_nospam_error\"]\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
``NullAgency.before_create`` returns the ``IObject`` passed to it with no modifications.
|
def test_before_create(self, obj):
state = _KubernetesState()
actual = NullAgency().before_create(state, obj)
self.assertThat(actual, Equals(obj))
|
[
"def test_after_create(self, obj):\n state = _KubernetesState()\n actual = NullAgency().after_create(state, obj)\n self.assertThat(actual, Equals(obj))",
"def before_create_object(self, data, view_kwargs):\n raise NotImplementedError",
"def pre_create(cls, **kwargs):\n return kwargs",
"def after_create_object(self, obj, data, view_kwargs):\n raise NotImplementedError",
"async def __entity_created__(self) -> None:",
"def before_create_relationship(self, json_data, relationship_field, related_id_field, view_kwargs):\n raise NotImplementedError",
"def test__Webhook__precreate__0():\n webhook_id = 202302050049\n webhook = Webhook.precreate(webhook_id)\n \n _assert_fields_set(webhook)\n vampytest.assert_eq(webhook.id, webhook_id)",
"def test__Attachment__precreate__0():\n attachment_id = 202211010000\n \n attachment = Attachment.precreate(attachment_id)\n _assert_fields_set(attachment)\n \n vampytest.assert_eq(attachment.id, attachment_id)",
"def on_post_create(self, obj_id, obj):\n # Note MM: Some resources do not have a LC state anymore\n #self.advance_lcs(obj_id, LCS.AVAILABLE)\n\n return",
"def perform_create(self, serializer):\n log_response = log_transaction()\n if log_response.status_code == status.HTTP_201_CREATED:\n serializer.save()\n logging.info(\"Record Created -- POST to idemia /pre-enrollments\")\n else:\n raise TransactionServiceUnavailable()",
"def post_creation(self):\n self.deped_org = True",
"def create_opportunity():",
"def test__Connection__precreate__0():\n connection_id = 202210080001\n connection = Connection.precreate(connection_id)\n _assert_fields_set(connection)\n vampytest.assert_eq(connection.id, connection_id)",
"def after_create_relationship(self, obj, updated, json_data, relationship_field, related_id_field, view_kwargs):\n raise NotImplementedError",
"def handle_create(self):\r\n asclient = self.stack.clients.auto_scale()\r\n group = asclient.create(**self._get_create_args())\r\n self.resource_id_set(str(group.id))",
"async def create_hook() -> bool:\n await self.__entity_created__()\n return True",
"def create(self):\n ret = self._get_attr(\"create\")\n return ret",
"def handle_create(self):\r\n asclient = self.stack.clients.auto_scale()\r\n args = self._get_args(self.properties)\r\n policy = asclient.add_policy(**args)\r\n resource_id = '%s:%s' % (self.properties[self.GROUP], policy.id)\r\n self.resource_id_set(resource_id)",
"def _pre_init(self, **kwargs) -> None:\n raise NotImplementedError"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
``NullAgency.after_create`` returns the ``IObject`` passed to it with no modifications.
|
def test_after_create(self, obj):
state = _KubernetesState()
actual = NullAgency().after_create(state, obj)
self.assertThat(actual, Equals(obj))
|
[
"def after_create_object(self, obj, data, view_kwargs):\n raise NotImplementedError",
"def test_before_create(self, obj):\n state = _KubernetesState()\n actual = NullAgency().before_create(state, obj)\n self.assertThat(actual, Equals(obj))",
"def post_creation(self):\n self.deped_org = True",
"def after_create_relationship(self, obj, updated, json_data, relationship_field, related_id_field, view_kwargs):\n raise NotImplementedError",
"def on_post_create(self, obj_id, obj):\n # Note MM: Some resources do not have a LC state anymore\n #self.advance_lcs(obj_id, LCS.AVAILABLE)\n\n return",
"async def __entity_created__(self) -> None:",
"def before_create_object(self, data, view_kwargs):\n raise NotImplementedError",
"def after_save(self, instance=None):\n pass",
"def after_get_object(self, obj, view_kwargs):\n raise NotImplementedError",
"def create_opportunity():",
"def create_agent_done(self):\n assert not self.flag_create_agent_done\n\n self.flag_create_agent_done = True\n self._try_load_balance()",
"def post_init(self):\n if self._id is None: # New object\n self._id = self.gen_id()\n self._saved = {}\n else:\n self.update_saved()",
"def create_manager_office(sender, instance, created, **kwargs):\n if created:\n OrganizationalStructureOffice.objects.create(\n name=DEFAULT_ORGANIZATIONAL_STRUCTURE_OFFICE,\n slug = slugify(DEFAULT_ORGANIZATIONAL_STRUCTURE_OFFICE),\n description=DEFAULT_ORGANIZATIONAL_STRUCTURE_OFFICE_DESC,\n organizational_structure=instance,\n is_default=True,\n is_active=True\n )\n # log action\n logger.info('[{}] default office {}'\n ' created in structure {}'.format(\n timezone.localtime(),\n DEFAULT_ORGANIZATIONAL_STRUCTURE_OFFICE,\n instance)\n )",
"def on_create(self) -> typing.Optional[\"AwsSdkCall\"]:\n return self._values.get('on_create')",
"def createEvent(self):\n\n raise NotImplementedError( \"Should have implemented this\" )",
"def post(self, request, *args, **kwargs):\n self.object = None\n self.other_object = None\n return super(OneToOneCreateMixin,self).post(request, *args, **kwargs)",
"async def create_hook() -> bool:\n await self.__entity_created__()\n return True",
"def new_deal_added(self, context, payload):\n \n deal_entity = ActiveCampaignDeal(\n deal_id=payload['deal[id]'],\n title=payload['deal[title]'],\n value=payload['deal[value]'],\n owner_id=payload['deal[owner]'],\n currency=payload['deal[currency]'],\n pipeline_title=payload['deal[pipeline_title]'],\n contact_email_address=payload['deal[contact_email]'],\n account_id=payload['customer_acct_id'],\n contact_id=payload['contact[id]'],\n pipeline_id=payload['deal[pipelineid]'],\n stage=payload['deal[stage_title]'],\n forecasted_close_date=payload['deal[fields][0][value]']\n )\n return deal_entity.__dict__",
"def pre_create(cls, **kwargs):\n return kwargs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Any version other than ``None`` is interpreted as a string representation of an integer and incremented.
|
def test_incremented(self):
version = _incrementResourceVersion(_incrementResourceVersion(None))
updated = _incrementResourceVersion(version)
self.expectThat(updated, IsInstance(unicode))
self.expectThat(updated, AfterPreprocessing(int, Equals(int(version) + 1)))
|
[
"def inc_version(self, ref: str) -> int:\n if ref in self.varcounts:\n self.varcounts[ref] += 1\n return self.varcounts[ref]\n else:\n self.varcounts[ref] = 1\n return 1",
"def increment_null(self):\n self.null_count += 1",
"def autoincrement_version():\n latest_tag = run_command(['git', 'describe', '--abbrev=0'])\n if latest_tag == \"\":\n msg_info(\"There are no tags yet in this repository.\")\n version = \"1\"\n elif \".\" in latest_tag:\n version = latest_tag.replace(\"v\", \"\").split(\".\")[0] + \".\" + str(int(latest_tag[-1]) + 1)\n else:\n version = int(latest_tag.replace(\"v\", \"\")) + 1\n return version",
"def incr_version(self):\n\n self.__entity[\"version\"] += 1\n return self.__entity[\"version\"]",
"def test_version_incrementer_bad(self):\n assert bu.increment(\"10.3.2.9999\", 3) == \"10.3.2.3\"",
"def inc_packageversion(self):\n match = re.compile(r\"^(\\d*)\\.?((\\d*)|(corr\\d*corr))$\")\n m = match.search(self.packageversion)\n #print(m.group(0))\n #print(m.group(1)[4:-4])\n #print(m.group(2))\n if m:\n if \"corr\" in m.group(2):\n pre_inc = int(m.group(2)[4:-4]) + 1\n elif m.group(2) == \"\":\n pre_inc = 1\n else:\n pre_inc = int(m.group(2)) + 1\n self.packageversion = m.group(1) + \".corr\" + str(pre_inc) + \"corr\"\n else:\n self.packageversion = \"99999error\"",
"def bump(ctx, v):\n try:\n new_value = v.next_bump()\n new_value = v.omit_prefix(new_value)\n click.echo(new_value)\n except ValueError:\n click.echo(\"version was not an integer; could not bump.\")\n ctx.exit(1)",
"def test_version_incrementer_good(self):\n assert bu.increment(\"10.3.2.2000\", 1000) == \"10.3.2.3000\"",
"def adding_one(integer_one):\n return integer_one + 1",
"def adding_one(num):\n return num + 1",
"def semver_incr_patch(ver: str) -> str:\n parts = ver.split(\".\")\n patch = str(int(parts[-1]) + 1)\n\n parts = parts[:-1]\n parts.append(patch)\n\n return \".\".join(parts)",
"def toIntegers(self) -> str:\n\n # If our base doesn't have integer versions, just use a dumb default\n if self.base.isName():\n return \"0.0.0+0\"\n\n # Start with our base string, sans a leading 'v'\n string = f\"{self.base}\"[1:]\n\n # If we have commits, include the commit count as the 'build'\n if (self.info is not None) and (self.info.commits is not None):\n string += f\"+{self.info.commits}\"\n else:\n string += \"+0\"\n\n return string",
"def addIntString(self, value: 'int const') -> \"void\":\n return _coin.SbString_addIntString(self, value)",
"def incrementHard(s):\n lastNum = re.compile(r'(?:(\\d+)[^\\d]*(\\d+)[^\\d]*)+')\n m = lastNum.search(s)\n if m:\n next = str(int(m.group(1))+1)\n start, end = m.span(1)\n s = s[:max(end-len(next), start)] + next + s[end:]\n reset = \"1\"\n resetStart, resetEnd = m.span(2)\n endSpan = len(m.group(2))\n s = s[:max(resetEnd-endSpan, resetStart)] + reset.rjust(endSpan,'0') + s[resetEnd:]\n return s",
"def BuildNumber(self) -> int:",
"def SoGLLightIdElement_increment(*args) -> \"int32_t\":\n return _coin.SoGLLightIdElement_increment(*args)",
"def _plan_auto_increment_prefix_number():\n if cpr_auto_increase.value == 'Yes':\n num = int(cpr_prefix_num.value)\n num += 1\n yield from bps.mv(cpr_prefix_num, str(num))",
"def increment(s):\n m = lastNum.search(s)\n if m:\n next = str(int(m.group(1)) + 1)\n start, end = m.span(1)\n s = s[:max(end - len(next), start)] + next + s[end:]\n return s",
"def bump(self, idx: int = -1) -> Version:\n version = self._version\n if idx == -1 and self.pre:\n ret = type(self)(version).complete()\n ret.pre = (self.pre[0], self.pre[1] + 1)\n else:\n head, value = version[:idx], int(version[idx])\n ret = type(self)((*head, value + 1)).complete()\n ret.pre = None\n return ret"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Edit the nickname of the bot in all servers.
|
async def edit_nick(self, nick: str) -> None:
for guild in self.guilds:
await guild.get_member(self.user.id).edit(nick=nick)
|
[
"async def nickpoo(self, ctx, target: discord.Member):\n await target.edit(nick=\"\\U0001f4a9\")\n await ctx.send(\"Nickname changed successfully\")",
"def set_nick_name(self, val):\n self.nick = val",
"def rename(server, name):\r\n server.update(name)",
"def rename(self, new_nickname):\n \n for channel in self.channels:\n channel.rename_user(self.source.nickname, new_nickname)\n \n if new_nickname not in self.nicklist:\n self.nicklist.append(new_nickname)\n \n self.source.nickname = new_nickname",
"async def change_username(self, ctx, username: str):\n member_id = ctx.message.author.id\n\n if (member_id == ctx.message.server.owner_id\n or int(member_id) == ConfigLoader().load_config_setting_int(\n 'BotSettings',\n 'owner_id'\n )\n ):\n await self.bot.change_nickname(ctx.message.server.me, username)\n return await self.bot.say(\"Changed my username!\")",
"async def rename(self, ctx, new_name=\"\"):\n game = get_game(ctx)\n if not new_name:\n new_name = ctx.author.nick if ctx.author.nick is not None else ctx.author.name\n for mode in game.leaderboards:\n if ctx.author.id in game.leaderboards[mode]:\n game.leaderboards[mode][ctx.author.id].name = new_name\n await ctx.send(f\"You have been renamed to {new_name}\")",
"def set_nickname(self, nickname):\n\n # this is just all IRC spec basics\n if len(nickname) > 9 or nickname[0] == \"-\" or nickname[0] == \"#\":\n print(\"Client tried to connect with invalid username\")\n code = \"432\"\n msg = f\":{nickname}\"\n self.send_code(code, nickname, msg)\n return False\n\n try:\n if int(nickname[0]):\n print(\"Client tried to connect with invalid username\")\n code = \"432\"\n msg = f\":{nickname}\"\n self.send_code(code, nickname, msg)\n return False\n except ValueError:\n pass\n\n for client in self.server_mem.clients:\n if self.server_mem.clients[client] == nickname:\n print(\"Client tried to connect with taken username\")\n code = \"433\"\n msg = f\":{nickname}\"\n self.send_code(code, nickname, msg)\n return False\n\n self.nickname = nickname\n self.nick_set = True\n\n msg = f\"NICK {nickname}\"\n self.send_message_from_server(msg)\n self.server_mem.clients[self.socket] = self.nickname\n return True",
"def set_realname(self, name):\n\n self.realname = name\n\n self.server_mem.clients[self.socket] = self.nickname\n self.send_welcome_messages() # separated this for atomicity.",
"def changeNickname(self, nickname, user_id, thread_id=None, thread_type=ThreadType.USER):\n Sender.s_change_nickname(self.Sender, self, nickname, user_id, thread_id, thread_type)",
"def name_bot(irc):\n nick = sys.platform[:3] + '-' + str(int(time.time())) # platform + timestamp\n real_name = nick[3:]\n\n irc.send(('NICK ' + nick + '\\r\\n').encode('utf-8'))\n irc.send(('USER ' + nick + ' ' + nick +\n ' ' + nick + ' :' + real_name + '\\r\\n').encode('utf-8'))\n\n while True:\n receive = irc.recv(4096).decode('utf-8')\n\n if 'Nickname is already in use' in receive: # try another nickname\n nick = sys.platform[:3] + '-' + str(int(time.time()))\n irc.send(('NICK ' + nick + '\\r\\n').encode('utf-8'))\n\n elif nick in receive or 'motd' in receive.lower():\n # successfully connected\n return nick",
"async def nick(self, ctx, member: MemberConv, nick: str = None):\n\n logger.info(\n \"Setting the nickname of user '%s' (%d) to %r\", member.name, member.id, nick\n )\n\n if member.top_role >= ctx.me.top_role:\n raise ManualCheckFailure(\"I don't have permission to nick this user\")\n\n mod = user_discrim(ctx.author)\n await member.edit(\n nick=nick, reason=f\"{mod} {'un' if nick is None else ''}set nickname\"\n )",
"def set_family_nick_name(self, val):\n self.famnick = val",
"async def _name(self, ctx, new_name: str):\n mother = ctx.message.author\n if common.has_mon(str(mother)):\n common.user_data['players'][str(mother)]['mon']['name'] = new_name\n await self.bot.say(\"Congratulations, {0}, your mon has been named {1}!\".format(mother.mention, new_name))\n else:\n await self.bot.say(\"{0}, you have no mon. You need to hatch an egg first.\".format(mother.mention))",
"async def add(self, ctx: commands.Context, user, *name):\n if len(mentions := ctx.message.mentions) == 0:\n await ctx.send(\"Please supply a user to override their nickname.\")\n\n override_user = ctx.message.mentions[0]\n new_nickname = \" \".join(name)\n\n # Change the user's nickname\n await override_user.edit(nick=new_nickname)\n\n # Send a message about the override\n await ctx.send(f\"{override_user.name}'s nickname is now overridden to {new_nickname}.\")\n\n # Update guild data and write changes\n self.check_guild_data_exists(ctx.guild.id)\n\n self.guild_data[ctx.guild.id][\"overrides\"][override_user.id] = new_nickname\n\n self.write_guild_data_changes()",
"def alterCollidedNick(self, nickname):\n return nickname + '____'",
"def onChangePlayerName(self, inEvent: RealTimeEvent):\n\n player_id = inEvent.parameters['player_id']\n player_location = inEvent.parameters['player_location']\n name = inEvent.parameters['name']\n\n if player_id in self.server.PlayerInterface.onlinePlayerWithId:\n player = self.server.PlayerInterface.onlinePlayerWithId[player_id]\n if name not in player.getAliases():\n player.addAlias(name)\n self.realTimeRound.changePlayerName(player_id, player_location, name)",
"def setlastfm(self, server, message, \n username: \"Your Last.FM username\"):\n nick = server.lower(message.address.nick)\n if not username:\n username = message.address.nick\n self.users[nick] = username\n self.savefile()\n return \"\u000304│ ♫ │ \u0003Associated %s with Last.FM user %s.\" % (message.address.nick, username)",
"def rename_user(session, tg_host, login, new_name):\r\n url = f\"https://{tg_host}/api/v3/users/{login}\"\r\n body = {\"name\": new_name}\r\n return session.put(url, data=body)",
"def test_modify_user_existing_nickname(self):\n print('(' + self.test_modify_user_existing_nickname.__name__ + ')',\n self.test_modify_user_existing_nickname.__doc__)\n request_data = MODIFY_USER_VALID_DATA.copy()\n request_data['nickname'] = 'AxelW'\n nickname = 'Mystery'\n resp = self.client.put(resources.api.url_for(resources.User, nickname=nickname),\n headers={CONTENT_TYPE: resources.JSON},\n data=json.dumps(request_data))\n self._assertErrorMessage(resp, 409, 'Existing nickname')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
test farhenheit to kelvin
|
def testf_to_k(self):
for deg_C, deg_F, deg_K in self.knownC_F_K:
result = conversions.convertfarhenheittokelvin(deg_F)
self.assertEqual(deg_K, result)
|
[
"def testConvertFahrenheitToKelvin(self): \n for val in self.known_values:\n from_val = val[1]\n expected_val = val[2]\n returned_val = c.convertFahrenheitToKelvin(from_val)\n self.assertEqual(returned_val,\n expected_val,\n msg=(\n '{}º Kelvin is not equal to expected value'\n ' of {}º Kelvin.') \\\n .format(returned_val, expected_val)\n )",
"def testConvertKelvinToFahrenheit(self):\n for val in self.known_values:\n from_val = val[2]\n expected_val = val[1]\n returned_val = c.convertKelvinToFahrenheit(from_val)\n self.assertEqual(returned_val,\n expected_val,\n msg=(\n '{}º Fahrenheit is not equal to expected value'\n ' of {}º Fahrenheit.') \\\n .format(returned_val, expected_val)\n )",
"def testCelciusToKelvin(self):\r\n for test in self.testCases:\r\n result = conversions.convertCelsiusToKelvin(test[\"celsius\"])\r\n self.assertEqual(test[\"kelvin\"], result)",
"def testConvertCelsiusToKelvin(self):\n for val in self.known_values:\n from_val = val[0]\n expected_val = val[2]\n returned_val = c.convertCelsiusToKelvin(from_val)\n self.assertEqual(returned_val,\n expected_val,\n msg=(\n '{}º Kelvin is not equal to expected value'\n ' of {}º Kelvin.') \\\n .format(returned_val, expected_val)\n )",
"def testConvertKelvinToFahrenheit(self):\r\n print (\"\\nTesting conversion from Kelvin to Fahrenheit.\\n\")\r\n for kelvin, fahr in self.knownValuesKelvinToFahr:\r\n print (('Testing {} conversion should be equal to {}').format(kelvin, fahr))\r\n result = conversions.convertKelvinToFahrenheit(kelvin)\r\n print (('The result of the conversion: {}\\n').format(result))\r\n self.assertEqual(fahr, result)",
"def testConvertKelvinToCelsius(self):\n for val in self.known_values:\n from_val = val[2]\n expected_val = val[0]\n returned_val = c.convertKelvinToCelsius(from_val)\n self.assertEqual(returned_val,\n expected_val,\n msg=(\n '{}º Celsius is not equal to expected value'\n ' of {}º Celsius.') \\\n .format(returned_val, expected_val)\n )",
"def test_KG2E_KL():\n testing_function('kg2e', distance_measure=\"kl_divergence\")",
"def testConvertCelsiusToKelvin(self):\r\n print(\"\\nTesting conversion from Celsius to Kelvin. \\n\")\r\n for celsius, kelvin in self.knownValuesCelsiusToKelvin:\r\n print (('Testing {} convertion should be equal to {}').format(celsius, kelvin))\r\n result = conversions.convertCelsiusToKelvin(celsius)\r\n print (('The result of the conversion: {}\\n').format(result))\r\n self.assertEqual(kelvin, result)",
"def test_to_celsius():\n pass",
"def katz(serum_na, serum_glucose):\n return serum_na + 0.016*(serum_glucose-100)",
"def testConvertKelvinToCelsius(self):\r\n print (\"\\nTesting conversion from Kelvin to Celsius. \\n\")\r\n for kelvin, celsius in self.knownValuesKelvinToCels:\r\n print (('Testing {} convertion should be equal to {}').format(kelvin, celsius))\r\n result = conversions.convertKelvinToCelsius(kelvin)\r\n print (('The result of the conversion: {}\\n').format(result))\r\n self.assertEqual(celsius, result)",
"def test_u2kperp_val():\n test_z = 7.6363125\n test_u = 10\n test_kperp = cosmo.u2kperp(test_u, test_z)\n test_val = 2 * np.pi * test_u / Planck15.comoving_transverse_distance(test_z)\n assert np.isclose(test_val.value, test_kperp.value)",
"def test_known_tke(uvw_and_known_tke):\n u, v, w, e_true = uvw_and_known_tke\n assert_array_equal(e_true, tke(u, v, w))",
"def test_zernike_detector_response(self):\n self.assertTrue(abs(self.kp_sorted[0].response - 1256.8241) < 0.01, \"Incorrect max response\")",
"def test_kraus_adjoint(self):\n mats = self.unitaries\n chans = [Kraus(mat) for mat in mats]\n self._compare_adjoint_to_operator(chans, mats)",
"def test_readMolekyl(self):\n self.assertEqual(kollaMolekylen(\"H2\"), \"Formeln är syntaktiskt korrekt\")\n self.assertEqual(kollaMolekylen(\"Mn4\"), \"Formeln är syntaktiskt korrekt\")",
"def test_p(self):\n x1 = [10, 11, 12, 13]\n x2 = [5, 6, 8, 9]\n p = welchs_ttest(x1, x2)[\"p\"]\n\n assert abs(p-0.0043) < 0.00001",
"def test_handcrafted_examples(self):\n self.assertTrue(abs(pi(1000000) - 3.14) < 0.01)",
"def uvw_and_known_tke():\n u = np.array([-2, -1, 0, 1, 2])\n v = -u\n w = 2 * u\n # 0.5 * sqrt(2 + 2 + 8)\n e_true = np.sqrt(12) / 2.\n return u, v, w, e_true"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the number of arguments of the given function.
|
def _num_arguments(func: Callable) -> int:
sig = signature(func)
return len(sig.parameters)
|
[
"def number_of_arguments(func):\n if isinstance(func, functools.partial):\n total_args = len(inspect.signature(func.func).parameters)\n return total_args - len(func.args) - len(func.keywords)\n return len(inspect.signature(func).parameters)",
"def getArgumentCount(self):\n return len(self.__rawArgs__)",
"def len_arguments(self):\n return self.arguments.len()",
"def check_function_argument_count(func, input_arity, infeed_queue):\n def format_error(complaint, quantity):\n return \"%s %d argument%s\" % (complaint, quantity, \"\"\n if quantity == 1 else \"s\")\n\n number_of_arguments_needed = input_arity\n if infeed_queue is not None:\n number_of_arguments_needed += infeed_queue.number_of_tuple_elements\n arg_spec = tf_inspect.getargspec(func)\n number_of_args = len(arg_spec.args)\n if arg_spec.defaults is None:\n number_of_defaults = 0\n else:\n number_of_defaults = len(arg_spec.defaults)\n min_required_arguments = number_of_args - number_of_defaults\n if number_of_arguments_needed < min_required_arguments:\n # The required number of arguments is not enough to call the function.\n if number_of_defaults == 0 and arg_spec.varargs is None:\n return format_error(\"exactly\", number_of_args)\n else:\n return format_error(\"at least\", min_required_arguments)\n if arg_spec.varargs is None and number_of_arguments_needed > number_of_args:\n # The required number of arguments is too many to call the function.\n if number_of_defaults == 0:\n return format_error(\"exactly\", number_of_args)\n else:\n return format_error(\"at most\", number_of_args)\n # Since there are varargs, func can accept any number of arguments\n # greater than the minimum.\n return None",
"def length(*args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def nArgs(self):\n n=2 # x and t\n if self.has_input:\n n+=1 #u\n if self.implicit:\n n+=1 # xdot\n return n",
"def calculateNumberOfTaggedFunctions(self):\n return len(self.getFunctionAddresses(self.createFunctionContextFilter()))",
"def nArgsOutput(self):\n return self.nArgsImplicit-1",
"def get_func_len(func):\n\treturn min(max(map(lambda bb: bb.start + get_bb_len(bb) - func.start, func.basic_blocks)), func.view.end - func.start)",
"def number_of_parameters(module: nn.Module):\n return sum(parameter.numel() for parameter in module.parameters())",
"def expr_len(e: Expression) -> int:\n return 1 + sum(map(expr_len, e.children()))",
"def getNumFuncEvals(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return SliceSamplerBase.getNumFuncEvals(self)",
"def findLengthFromParam(*args, **kwargs):\n \n pass",
"def calculateNumberOfFunctions(self):\n number_of_functions = 0\n for seg_ea in self.ida_proxy.Segments():\n for function_ea in self.ida_proxy.Functions(self.ida_proxy.SegStart(seg_ea), self.ida_proxy.SegEnd(seg_ea)):\n number_of_functions += 1\n return number_of_functions",
"def get_num_rewards():\n return len(REWARD_FUNCTIONS)",
"def check_number_of_args(method, params):\n\n min_args, max_args = method.arguments_range\n if (max_args - 2) >= len(params) >= (min_args - 2):\n return True\n\n return False",
"def arity(assoc):\n return len(assoc)",
"def count(seq): # real signature unknown; restored from __doc__\n pass",
"def get_number_of_operands(self) -> int:\n return self._number_of_operands"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Decorator or method to register a callable as a given name.
|
def register(self, name: str, func: Optional[Callable] = None) -> Optional[Callable]:
if name in self.callables.keys():
logging.warning("Overriding callable of name '%s'.", name)
def assign(func):
if not callable(func):
raise ValueError("Argument func must be callable.")
if self.enforce_arity >= 0 and _num_arguments(func) != self.enforce_arity:
raise ValueError(
f"Function has arity {_num_arguments(func)} not {self.enforce_arity}."
)
self.callables[name] = func
return func
if func is None:
return assign
assign(func)
return None
|
[
"def register(name, fn):\n return el.Dotted.register(name, fn)",
"def register_binning(name: Optional[str] = None):\n\n def decorator(f: Callable) -> Callable:\n key = name or f.__name__[:-8]\n binning_methods[key] = f\n return f\n\n return decorator",
"def register(name):\n\n def add_to_dict(func):\n ATTACKS[name] = func\n return func\n\n return add_to_dict",
"def register_filter_func(name: str) -> Callable:\n\n def _inner_wrapper(func: Callable) -> Callable:\n _filter_funcs[name] = func\n return func\n\n return _inner_wrapper",
"def register(func):\n print('running register(%s)' % func)\n registry.append(func)\n return func",
"def register(self, name, func):\n with self.lock:\n if name not in self.hooks:\n self.hooks[name] = set([func])\n else:\n self.hooks[name].add(func)\n\n # Module is already loaded, call hook right away\n if name in sys.modules:\n func(sys.modules[name])",
"def register(self, hyperp_name, hyperp_fn):\n assert hyperp_name not in self.name_to_h_fn\n self.name_to_h_fn[hyperp_name] = hyperp_fn",
"def register_interpolator(name: str) -> Callable:\n def decorator(f: Callable):\n # Add the model to the list of valid upscalers.\n VALID_INTERPOLATORS[name] = f\n # Add the model to the overarching valid model list.\n register_model(name)\n # Return the function.\n return f\n # Return the decorator.\n return decorator",
"def register_collator(name):\n\n def register_collator_fn(func):\n if name in COLLATOR_REGISTRY:\n raise ValueError(\"Cannot register duplicate collator ({})\".format(name))\n\n if func.__name__ in COLLATOR_NAMES:\n raise ValueError(\n \"Cannot register task with duplicate collator name ({})\".format(\n func.__name__\n )\n )\n COLLATOR_REGISTRY[name] = func\n COLLATOR_NAMES.add(func.__name__)\n return func\n\n return register_collator_fn",
"def register_decorator(func: Callable) -> Callable:\n module_name = module.__name__ if module else func.__module__\n func_name = func.__name__\n _UNITS.setdefault(module_name, dict())[func_name] = unit\n\n return func",
"def _register(\n self, internal_qualified_name: OpName, symbolic_function: SymbolicFunction\n ) -> None:\n self._registry[internal_qualified_name].append(symbolic_function)",
"def register_upscaler(name: str) -> Callable:\n def decorator(f: Callable):\n # Add the model to the list of valid upscalers.\n VALID_UPSCALERS[name] = f\n # Add the model to the overarching valid model list.\n register_model(name)\n # Return the function.\n return f\n # Return the decorator.\n return decorator",
"def register_task(self, key, wrapped_func):\n if key in self.registry:\n raise ValueError('Implementation is already registered: %r' % (key, ))\n self.registry[key] = wrapped_func",
"def register_colorizer(name: str) -> Callable:\n def decorator(f: Callable):\n # Add the model to the list of valid colorizers.\n VALID_COLORIZERS[name] = f\n # Add the model to the overarching valid model list.\n register_model(name)\n # Return the function.\n return f\n # Return the decorator.\n return decorator",
"def register(registry:list):\n def decorate(func):\n registry.append(func)\n return func\n return decorate",
"def register_operation(self, op_name, target_fn):\n self._operations[op_name] = target_fn",
"def register(dataset_name):\n\n def decorator(decorator_dataset_class, decorator_dataset_name):\n _DATASETS[decorator_dataset_name] = decorator_dataset_class\n return decorator_dataset_class\n\n return lambda dataset_class: decorator(dataset_class, dataset_name)",
"def register(condition_name, fn=None, validator=None):\n global _conditions, _validators\n\n if fn is None:\n # Be a decorator\n def decorator(fn):\n register(condition_name, fn=fn, validator=validator)\n return fn\n\n return decorator\n\n # Don't be a decorator, just register\n if condition_name in _conditions:\n raise DuplicateCondition(\n 'Flag condition \"{name}\" already registered.'.format(\n name=condition_name\n )\n )\n\n # We attach the validator to the callable to allow for both a single source\n # of truth for conditions (_conditions) and to allow for validators to be\n # defined on a callable class along with their condition.\n if validator is not None or not hasattr(fn, \"validate\"):\n fn.validate = validator\n\n _conditions[condition_name] = fn",
"def register_problem(name=None):\n\n def decorator(p_cls, registration_name=None):\n \"\"\"Registers & returns p_cls with registration_name or default name.\"\"\"\n p_name = registration_name or default_name(p_cls)\n if p_name in _PROBLEMS:\n raise LookupError(\"Problem %s already registered.\" % p_name)\n\n _PROBLEMS[p_name] = p_cls\n p_cls.name = p_name\n return p_cls\n\n # Handle if decorator was used without parens\n if callable(name):\n p_cls = name\n return decorator(p_cls, registration_name=default_name(p_cls))\n\n return lambda p_cls: decorator(p_cls, name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find a callable given its name.
|
def find(self, name: str) -> Optional[Callable]:
return self.callables.get(name)
|
[
"def locate_qualified_function(qualified_name: str) -> Callable[[], Iterable[ET]]:\n if \".\" not in qualified_name:\n raise QueryException(\"Could not find a '.' in the function name, e.g. my.reddit.rexport.comments\")\n rdot_index = qualified_name.rindex(\".\")\n return locate_function(qualified_name[:rdot_index], qualified_name[rdot_index + 1:])",
"def locate_function(module_name: str, function_name: str) -> Callable[[], Iterable[ET]]:\n try:\n mod = importlib.import_module(module_name)\n for (fname, f) in inspect.getmembers(mod, inspect.isfunction):\n if fname == function_name:\n return f\n # in case the function is defined dynamically,\n # like with a globals().setdefault(...) or a module-level __getattr__ function\n func = getattr(mod, function_name, None)\n if func is not None and callable(func):\n return func\n except Exception as e:\n raise QueryException(str(e))\n raise QueryException(f\"Could not find function '{function_name}' in '{module_name}'\")",
"def findCall(node, methodName):\n\n if type(methodName) is str:\n methodName = set([methodName])\n \n def matcher(node):\n call = getCallName(node)\n if call and call in methodName:\n return call\n \n return query(node, matcher)",
"def _findFunction(self, functionPath):\n\n # Strip module.funcName type paths to deal with earlier versions\n # of the daemon. module is simply thrown away\n parts = functionPath.split(\".\")\n if len(parts)>1:\n calledName = parts[1]\n else:\n calledName = functionPath\n\n if calledName not in self._functions.keys():\n raise xmlrpc.NoSuchFunction(xmlrpc.XMLRPC.NOT_FOUND, \\\n \"Requested function (%s) does not exist!\" % calledName)\n func = self._functions[calledName]\n\n return func",
"def get_callable_from_string(f_name):\n try:\n mod_name, func_name = get_mod_func(f_name)\n if not mod_name and not func_name:\n raise AttributeError(\n \"%s couldn't be converted to a module or function name\" % f_name)\n\n module = __import__(mod_name)\n\n if not func_name:\n func_name = mod_name # The common case is an eponymous class\n\n return getattr(module, func_name)\n\n except (ImportError, AttributeError) as exc:\n raise RuntimeError(\"Unable to create a callable object for '%s': %s\" %\n (f_name, exc))",
"def get_method(self, name):\n return find(self.methods, lambda m: m.name == name)",
"def find_by_name(self, name):\n name = str(name)\n for locator in self._locators:\n if locator[\"name\"] == name:\n return locator\n return None",
"def find_task_from_name(name: str, tasks):\n for t in tasks:\n if t.name == name:\n return t\n return None",
"def get(self, name: str) ->Callable:\n if name in self:\n data = self[name]\n return data['algorithm']\n err_msg = \"'{}' not found in registry. Available names: {}\"\n available_names = ', '.join(sorted(self.keys())) or 'none'\n raise KeyError(err_msg.format(name, available_names))",
"def resolve_by_name(func, name, *args):\n if uuidutils.is_uuid_like(name):\n return name\n\n results = func(criterion={\"name\": \"%s\" % name}, *args)\n length = len(results)\n\n if length == 1:\n return results[0][\"id\"]\n elif length == 0:\n raise exceptions.NotFound(\"Name %s didn't resolve\" % name)\n else:\n msg = \"Multiple matches found for %s, please use ID instead.\" % name\n raise exceptions.NoUniqueMatch(msg)",
"def function_lookup(pymod_path):\n module_name, func_name = pymod_path.rsplit('.', 1)\n module = importlib.import_module(module_name)\n shell_function = getattr(module, func_name)\n assert callable(shell_function), shell_function\n return shell_function",
"def _get_function_from_str(path: str) -> Callable:\n module_name, _, function_name = path.rpartition(\".\")\n module = importlib.import_module(module_name)\n function = getattr(module, function_name)\n return function",
"def find_matching_method(self, url: str, request) -> Callable:\n\n if url in self.parameter_method_cache:\n return self.parameter_method_cache[url][0]\n\n has_method_call = url in self.full_route_method_map\n conditional_method = self.find_conditional_handler_match(request)\n path_method = self.find_parameter_match(request)\n\n if path_method is not None:\n return path_method\n elif conditional_method is not None:\n return conditional_method\n elif has_method_call:\n return self.full_route_method_map[url]\n else:\n return None",
"def find_node(self, name):\n for node in self._nodes:\n if node.name == name:\n return node\n self._lexer.raise_KeyError()",
"def get_op_from_module_name(self, name):\n return next((op for op in self._ops.values() if op.dotted_name == name), None)",
"def resolve_method(self, name):\n\n\t\tif \".\" in name:\n\t\t\tifname, name = name.rsplit(\".\", 1)\n\t\telse:\n\t\t\tifname = None\n\n\t\tfor iface in self.interfaces:\n\t\t\tif iface.name == ifname or ifname is None:\n\t\t\t\tfor method in iface.methods:\n\t\t\t\t\tif method.name == name:\n\t\t\t\t\t\treturn iface, method\n\t\telse:\n\t\t\treturn None, None",
"def cmd(name: str) -> Callable:\n return g.new_cmd_decorator(name, ['c', 'findCommands',])",
"def find_symbol(target, name, module=MACINTALK_MODULE):\n for mod in target.module_iter():\n if module and module != mod.GetFileSpec().GetFilename():\n continue\n for sym in mod:\n if sym.GetName() == name:\n return sym\n raise RuntimeError('symbol not found: ' + name)",
"def findFunction(node):\n \n return query(node, lambda node: node.type == \"function\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add the given datacontainer to the view. This is used in the file open method
|
def addDataContainer(self, datacontainer):
if isinstance(datacontainer, DataHandling.DataContainer.DataContainer):
self._datacontainer.append(datacontainer)
if self.show_window:
self.view.addFileToFilelist(datacontainer)
|
[
"def add_container(self, container):\n self.__container_list.append(container)",
"def merge_container(self, container):\n logger.debug('Merging containers')\n print(type(self))\n\n self._add_to_container(\n container.data,\n container.electrode_positions, container.topography)",
"def replaceDataContainer(self, index, datacontainer):\n \n if (isinstance(datacontainer, DataHandling.DataContainer.DataContainer) and\n index >= 0 and\n index < len(self._datacontainer)):\n self._datacontainer[index] = datacontainer\n \n if self.show_window:\n self.view.replaceDataContainerWidget(index, datacontainer)\n \n return True\n else:\n return False",
"async def containeradd(self, ctx, *, input_data: str):\n\t\ttry:\n\t\t\tname, data = input_data.split(';',1)\n\t\texcept IndexError:\n\t\t\tawait self.bot.say(\"Plz format as !container add name;data (data in JSON format)\")\n\t\t\treturn\n\t\ttry:\n\t\t\tself.containers[name] = json.loads(data)\n\t\texcept ValueError:\n\t\t\tawait self.bot.say(\"Error in reading the JSON format\")\n\t\t\treturn\n\t\tself.save_containers()\n\t\tawait self.bot.say(\"Data added\")",
"def containerView(itemList=bool, itemInfo=\"string\", viewDescription=bool, viewList=bool, viewLabel=bool, viewName=\"string\"):\n pass",
"def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_stimulus(self.optical_series)",
"def addListData(tag, dataType, numEle, data):\n api_data_, api_data_n_ = _ivectordouble(data)\n ierr = c_int()\n lib.gmshViewAddListData(\n c_int(tag),\n c_char_p(dataType.encode()),\n c_int(numEle),\n api_data_, api_data_n_,\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshViewAddListData returned non-zero error code: \",\n ierr.value)",
"def setContainer(self, container: 'ScXMLObject') -> \"void\":\n return _coin.ScXMLDataObj_setContainer(self, container)",
"def __add_data(self, data: TextFile):\n if self._data_list.count(data) > 0: return\n\n self.insertItem(len(self._data_list) - 1, str(data))\n self._data_list.append(data)",
"def add_dataset(self, container):\n if not isinstance(container, ABCLattice):\n message = 'Only lattice containers are supported in JYULBEngine'\n raise TypeError(message)\n if bool(self._proxy_lattice):\n message = 'A lattice container already exists in JYULBEngine'\n raise ValueError(message)\n lat_type = container.primitive_cell.bravais_lattice\n if lat_type is not BravaisLattice.CUBIC:\n message = 'Lattice type is not cubic'\n raise ValueError(message)\n\n pylat = PyLattice(np.array(container.size, dtype=np.uint32),\n np.array(container.origin, dtype=np.float64))\n self._pygeom = PyGeometry(pylat)\n\n for node in container.iter_nodes():\n ijk = np.array(node.index, dtype=np.uint32)\n self._pygeom.set_material_ijk(ijk, node.data[CUBA.MATERIAL_ID])\n\n self._prms.time_step = self.CM[CUBA.TIME_STEP]\n\n rden = self.SP[CUBAExtension.REFERENCE_DENSITY]\n self._prms.reference_density = rden\n\n self._prms.kinematic_viscosity = self.SP[CUBA.KINEMATIC_VISCOSITY]\n\n grav = np.array(self.SP[CUBAExtension.GRAVITY], dtype=np.float64)\n self._prms.gravity = grav\n\n self._prms.flow_type = self.SP[CUBAExtension.FLOW_TYPE]\n\n coll_op = self.CM[CUBAExtension.COLLISION_OPERATOR]\n self._prms.collision_operator = coll_op\n\n self._prms.external_forcing = True\n\n self._solver = solver.PySolver(self._pygeom, self._prms)\n pyfdata = self._solver.get_field_data()\n\n name = container.name\n pc = container.primitive_cell\n self._proxy_lattice = ProxyLattice(name, pc, self._pygeom, pyfdata)\n\n self._proxy_lattice.update_nodes(container.iter_nodes())\n\n self._proxy_lattice.data = container.data",
"def add_data_item(self, **data_item_attributes):\n self.add_leaf(DataItem(**data_item_attributes))",
"def addedDataObject(ob, event):\n log.info('Added data object')\n ob.index_object()",
"def add_data(self, data):\n self.label_columns.add_data(data)\n self.errors_description.add_data(data)",
"def view(self, view):\n self.view.append(view)",
"def data_view_setup(self):\n \n #\n #-------------------------- Save Scene Button -------------------------\n #\n \n pass",
"def add_data_field(self, data_field):\n assert isinstance(data_field, DataField), \"Only objects of type SnowLibrary.keywords.file_creator.DataField can be added. Instead got: {}\".format(type(data_field))\n self.fields.append(data_field)\n self.field_count += 1\n if not self._has_fields:\n self._has_fields = True",
"def build(self, data_dict):\n # The widgets are part of every instance\n self.ques_image.source = data_dict[\"image\"]\n self.header_label.text = data_dict[\"header\"]\n # But this content is generated dynamically\n self.box_container.add_widget(self.get_content(data_dict))",
"def ModifyContainer(self, container):",
"def addFile(fig, canvas):\n data = readData()\n file_path = tkinter.filedialog.askopenfilename()\n if not file_path:\n pass\n else:\n data.append((file_path, openChrom(file_path)))\n fig.clear()\n axes = fig.add_subplot(111)\n for i in data:\n x_array, y_array = list(zip(*i[1]))\n axes.plot(x_array, y_array, label=str(os.path.split(i[0])[-1]))\n axes.legend()\n canvas.draw()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get all the datacontainers that are currently loaded Returns list The list of datacontainers
|
def getDataContainerList(self):
return self._datacontainer
|
[
"def list_containers(self):\r\n return list(self.iterate_containers())",
"def containers():",
"def containers(self):\n return SystemCommand(self.cmd.containers).output",
"def get_all_containers(client):\n return [client.inspect_container(container[\"Id\"])\n for container in client.containers(all=True)]",
"def take_containers_stats(self):\n\t\treturn self.stats_colector.collect_stats()",
"def listDatasets():\n return ee_extra.STAC.core.listDatasets()",
"def docker_container_list(self):\n containers = Container.objects()\n if len(containers) == 0:\n print(\"No containers exist\")\n return\n\n print(\"Name\\t\\tStatus\")\n for container in containers:\n print(container.containerName + \"\\t\\t\" + container.containerStatus)",
"def list_containers_info(self, limit=None, marker=None, **parms):\r\n hdrs, conts = self.connection.get_container(\"\", limit=limit,\r\n marker=marker)\r\n return conts",
"def list_container_objects(self, container):\r\n return list(self.iterate_container_objects(container))",
"def get_containers():\n cmd = \"docker ps -a\"\n res = Popen(split(cmd),\n stdout=PIPE).communicate()[0].decode().split('\\n')[1:-1]\n names = [i.split()[-1] for i in res]\n names = [i for ext in CONTAINER_EXT_TO_BACKUP for i in names\n if ext in i and i not in EXCLUDED_VOLUMES]\n return names",
"def list_container_meta(self, user, account, container, domain,\n until=None):\n return []",
"def list_public_containers(self):\r\n response = self.connection.cdn_request(\"GET\", [\"\"])\r\n status = response.status\r\n if not 200 <= status < 300:\r\n raise exc.CDNFailed(\"Bad response: (%s) %s\" % (status,\r\n response.reason))\r\n return response.read().splitlines()",
"def all_containers(self):\n containers = dict()\n for in_artifact, out_artifact in self.all_artifacts():\n if isinstance(in_artifact, Aliquot) and isinstance(out_artifact, Aliquot):\n if in_artifact.container and out_artifact.container:\n key = in_artifact.container.id + \"|\" + out_artifact.container.id\n if key not in containers:\n containers[key] = (in_artifact.container, out_artifact.container)\n return list(containers.values())",
"def query_all_objects( self ):\n return self._k8s.query_daemonsets( filter=self._filter )",
"def get_datasets(self):\n # type: () -> List[Any]\n self.cursor.execute(\"SELECT id, name, directory FROM datasets\")\n datasets = []\n for d_id, name, directory in self.cursor.fetchall():\n datasets.append({'id': d_id, 'name': name, 'directory': directory})\n return datasets",
"def container_list(self, token, all_containers=False):\n path = \"/ps\"\n job_info = self._get_job_info()\n token_file = self._get_token_file(job_info[\"home\"],\n job_info['job_id'])\n token = token_parse(token, token_file)\n parameters = {\"token\": token, \"all\": all_containers}\n results = self.control.execute_get(path=path, parameters=parameters)\n\n return results",
"def list_datasets():\n # TODO: Query datasets in database\n return []",
"def list_containers(self, do_print=False):\n\t\tgenerator = self.blob_service.list_containers()\n\t\tif do_print:\n\t\t\tprint 'Azure account \\'%s\\' containers list :' % self.ACCOUNT_LOGIN\n\t\t\tfor container in generator:\n\t\t\t\tprint container.name\n\t\treturn generator",
"def get_all_collections(self):\n return self.client.get(\"/collections\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Replace the datacontainer at the given index.
|
def replaceDataContainer(self, index, datacontainer):
if (isinstance(datacontainer, DataHandling.DataContainer.DataContainer) and
index >= 0 and
index < len(self._datacontainer)):
self._datacontainer[index] = datacontainer
if self.show_window:
self.view.replaceDataContainerWidget(index, datacontainer)
return True
else:
return False
|
[
"def replace(self, index: int, dataset: Optional[_TypeMultiBlockLeaf]) -> None:\n name = self.get_block_name(index)\n self[index] = dataset\n self.set_block_name(index, name)",
"def put(self, index: int, value: Any):\n self[index] = value",
"def __setitem__(\n self,\n index,\n data,\n ):\n i: int = 0\n name: Optional[str] = None\n if isinstance(index, str):\n try:\n i = self.get_index_by_name(index)\n except KeyError:\n self.append(data, index)\n return\n name = index\n elif isinstance(index, slice):\n index_iter = range(self.n_blocks)[index]\n for i, (idx, d) in enumerate(zip_longest(index_iter, data)):\n if idx is None:\n self.insert(\n index_iter[-1] + 1 + (i - len(index_iter)), d\n ) # insert after last entry, increasing\n elif d is None:\n del self[index_iter[-1] + 1] # delete next entry\n else:\n self[idx] = d #\n return\n else:\n i = index\n\n # data, i, and name are a single value now\n if data is not None and not is_pyvista_dataset(data):\n data = wrap(data)\n data = cast(pyvista.DataSet, data)\n\n i = range(self.n_blocks)[i]\n\n # this is the only spot in the class where we actually add\n # data to the MultiBlock\n\n # check if we are overwriting a block\n existing_dataset = self.GetBlock(i)\n if existing_dataset is not None:\n self._remove_ref(i)\n self.SetBlock(i, data)\n if data is not None:\n self._refs[data.memory_address] = data\n\n if name is None:\n name = f'Block-{i:02}'\n self.set_block_name(i, name) # Note that this calls self.Modified()",
"def replace_index(x, index, value):\n # assume x has a copy-constructor and can be interpreted as a list\n y = list(x)\n y[index] = value\n cctor = copy_constructor(x)\n result = cctor(y)\n return result",
"def __setitem__(self, index, value):\n self.setValue(value, index)",
"def write(self, index, data):\n self.read(index) # refresh self.data (aka full_data)\n self.data[index] = data\n \n with open(self.storage_path, 'wb') as handle:\n pickle.dump(self.data, handle)",
"def set_item(self, index, new_item):\n row = index.row() if hasattr(index, \"row\") else index\n self.collection[row] = new_item\n self.dataChanged.emit(self.index(\n row, 0), self.index(row, self.rowCount() - 1))",
"def replace(self, index, element):\n if index < 0 or index > self.size - 1:\n return False\n \n trav = self.head\n while index > 0:\n index -= 1\n trav = trav.next\n trav.value = element\n \n return True",
"def replaceIndex(self, parent: 'SoNode', index: 'int const', newchild: 'SoNode') -> \"void\":\n return _coin.SoPath_replaceIndex(self, parent, index, newchild)",
"def pop_data(index):\n\n\t\tself.db.loc[index, 'data'].pop(-1)",
"def test_setitem_out_of_range(self, index):\n ds = DatasetList([0])\n\n with pytest.raises(IndexError):\n ds[index] = 1",
"def insert_at_index(self, index: int, value: object) -> None:\n # check to see if the array is full, if it is double\n if self.size == self.capacity: \n self.resize(self.capacity * 2)\n # raise exception if index is invalid\n if (index < 0) or (index > self.size):\n raise DynamicArrayException\n # move all elements right starting from back and working towards index\n for i in range(self.size, index, -1): \n self.data[i] = self.data[i - 1]\n # after, place the value at the specified index\n self.data[index] = value\n # add to the size of da\n self.size += 1",
"def db_save_index(self, index_data):\n self.db[COLLECTION_INDEX_DATA].create_index([('name', pymongo.ASCENDING)])\n self.db[COLLECTION_INDEX_DATA].replace_one({'name': index_data['name']}, index_data, upsert=True)",
"def set_data(self, data, index=None):\n if data is not None:\n data = self.data_class(data)\n logger.info(\"Length of supplied data: %d\", data.ndata)\n self.check_data(data)\n self._data = data\n if index is not None:\n if len(index) != data.ndata:\n raise AttributeError(\"Length of index vector must match number of input data\")\n else:\n index = np.arange(data.ndata)\n self.index = index\n logger.info(\"self.ndata: %d\", self.ndata)",
"def swap(self, index):\n self.__cpu.memory.stack.swap(index)",
"def fix(self, index: int, game: Game) -> None:\n self.store.fix(index, game)",
"def replaceData(self, offset: int, count: int, data):\n self.args[0] = self.args[0][:offset] + data + self.args[0][offset + count:]\n return self.args[0]",
"def _update_index(datatable, index, old_index=None):\n _check_index(datatable._dataframe, index)\n if old_index is not None:\n datatable._update_columns({old_index: datatable.columns[old_index].remove_semantic_tags('index')})\n datatable.columns[index]._set_as_index()",
"def set(self, index: 'int const', node: 'SoNode') -> \"void\":\n return _coin.SoChildList_set(self, index, node)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the datapoint of the background_datacontainer at the given index
|
def _getBackgroundDataPoint(self, background_datacontainer, index):
if isinstance(background_datacontainer, DataHandling.DataContainer.DataContainer):
return background_datacontainer.datapoints[index]
elif isinstance(background_datacontainer, (list, tuple)):
return background_datacontainer[index]
|
[
"def datum(self, *index):\n data = self.get_data(None)\n if data is None:\n raise ValueError(\n \"ERROR: Can't return an element when there is no data array\"\n )\n\n return data.datum(*index)",
"def get_data_point_info(self, data_info, index):\n\t\tfor data_point in data_info:\n\t\t\tif data_point[0] == index:\n\t\t\t\treturn data_point",
"def value_at(self, index):\n index = np.where(self.indices == index)[0]\n return self.data[index] if index.size != 0 else 0",
"def get_sample(self, index):\n assert isinstance(index, int)\n\n return self.dataset[:, index].reshape(settings.IMAGE_WIDTH, settings.IMAGE_HEIGHT)",
"def __getitem__(self, index: int) -> List[object]:\n return [d[index] for d in self.datasets]",
"def __getitem__(self, ind):\n if isinstance(ind, slice):\n return self.TAA[ind]\n else:\n return self.TAA[ind, 0]",
"def __getitem__(self, index):\n if isinstance(index, int):\n return self.__savepoint_list[index]\n return self.__make_savepoint_collection(index, True)",
"def get_by_index(self, index):\n return self.tile_list[index]",
"def __getitem__(self, i):\n return self._data[i]",
"def point(self, indx=0):\n if indx > self._npts - 1:\n return self._results[-1][1]\n return self._results[indx][1]",
"def get_at_index(self, idx):\n return self._get_node_at_index(idx).data",
"def __getitem__(self, idx):\n if isinstance(idx, int):\n batch = [self.dataset[idx]]\n else:\n batch = self.dataset[idx]\n\n images, targets = zip(*batch)\n\n if self.no_background:\n\n images = [process_img(i) for i in images]\n\n else:\n n_trials = len(images)\n n_back = len(self.backgrounds)\n back_inds = np.random.choice(n_back, n_trials)\n backgrounds = self.backgrounds[back_inds]\n\n images = [self.overlay(back, img)\n for back,img in zip(backgrounds, images)]\n\n if self.vae:\n targets = [resize(i, (64,64)) for i in images]\n\n pairs = list(zip(images, targets))\n if isinstance(idx, int):\n pairs = pairs[0]\n return pairs",
"def get_sample(self, index):\n assert isinstance(index, int)\n\n img = Image.open(self.dataset[index])\n\n return np.asarray(img.convert('L')) if img.getbands() != ('L', ) else np.asarray(img)",
"def get_qpimage(self, idx):\n # raw data\n qpi = self.get_qpimage_raw(idx)\n if \"identifier\" not in qpi:\n msg = \"`get_qpimage_raw` does not set 'identifier' \" \\\n + \"in class '{}'!\".format(self.__class__)\n raise KeyError(msg)\n # bg data\n if self._bgdata:\n if len(self._bgdata) == 1:\n # One background for all\n bgidx = 0\n else:\n bgidx = idx\n\n if isinstance(self._bgdata, SeriesData):\n # `get_qpimage` does take `idx`\n bg = self._bgdata.get_qpimage_raw(bgidx)\n else:\n # `self._bgdata` is a QPImage\n bg = self._bgdata[bgidx]\n qpi.set_bg_data(bg_data=bg)\n return qpi",
"def get_example(self, i):\n key = str(i)\n if key not in self.cache:\n self.cache[key] = self._dataset[i]\n return self.cache[key]",
"def get_instance(self, index):\n return self.instances[index]",
"def __getitem__ ( self , index ) :\n return self._histos[ index ]",
"def current_data_slice(self):\n return self.get_data_slice(self.i)",
"def get_byindex(self, index):\n return self.dict_pref[self.polygon_objs[index]]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a human readable string of the indices in the list_data. The list_data is assumed to be a list full of integers. This function will
|
def zipIndices(self, list_data):
# sort the list
list_data.sort()
# the return value
zip_indices = ""
# the last element in the list_data
last_element = None
# the index of the list_data when an element had been added
added_i = None
for i, element in enumerate(list_data):
# check if the last element is the current element - 1, if it is
# this will be in the range
if last_element != None and last_element + 1 == element:
last_element = element
continue
else:
# element is not in the range, check if the last element exists
if last_element != None:
# if it exists check if the last element has been added
# already, if not this is a range so end the range
if added_i != i -1:
zip_indices += ":" + str(last_element)
# indicate that a new element is starting
zip_indices += ";"
# add the current element
zip_indices += str(element)
# save the added index
added_i = i
# save the element as the last element for the next iteration
last_element = element
# check if the list_data is a range form the first element to the last
if len(list_data) > 1 and zip_indices == str(list_data[0]):
zip_indices += ":" + str(list_data[-1])
return zip_indices
|
[
"def print_indexes(my_list):\n\n # for i in range(len(my_list)):\n # print \"{} {}\".format(i, my_list[i]) \n\n # return\n\n # learned 'enumerate' function, i is the index position and item is the \n # element in that position in the list\n # print both of those elemetns\n\n for i, item in enumerate(my_list):\n print i, item\n return",
"def numbered(lst: List[Any]) -> List[str]:\n return [f\"{i} - {a}\" for i, a in enumerate(lst)]",
"def print_indices_text(lines_array, the_indexes, message=''):\n # print(f'found this {message} at indexes {the_indexes}')\n print(\"\\n\".join(['[{:5}] {}'.format(i, lines_array[i]) for i in the_indexes]))",
"def get_base_64_index_string(self):\n packed_indices = ''\n for tile_index in self.tile_indices:\n packed_indices += struct.pack(\"<L\", tile_index)\n\n return packed_indices.encode('base64')",
"def _getHTMLAboutIndexes(self, indexes):\n s = ''\n prev = None\n for idx in indexes:\n if prev is None:\n s = '<b>%s</b>'%idx\n prev = idx\n elif idx != prev:\n if prev-idx < 0:\n s += ', %s'%(prev-idx)\n else:\n s += ', +%s'%(prev-idx)\n prev = idx\n s += ', <i>%s</i>'%indexes[-1]\n return s",
"def print_queue_index_list(self):\n result = []\n for val in self.queue:\n result.append(str(val[1].get_index()).ljust(4))\n print \"[debug] PIFO_Queue: print_queue_index_list \".ljust(80) + str(result)",
"def _list_2d_to_str(list_2d):\n output_str = ''\n for idx2d, l in enumerate(list_2d):\n for idx, val in enumerate(l):\n output_str += str(val)\n if idx < len(l) - 1:\n output_str += ' '\n\n if idx2d < len(list_2d) - 1:\n output_str += '_'\n return output_str",
"def to_pepidx_str(self):\n s = ''\n s += self.sequence + '\\t'\n s += self.ptm_string + '\\t'\n s += repr(self.binindex) + '\\n'\n return s",
"def __get_index(data):\n return list(range(len(data)))",
"def getId(l: list) -> str:\n h = \"0x\"+\"\".join(l)\n i = int(h, 16)\n return f\"{h} ({str(i)})\"",
"def inverted_index_template(terms_list: list) -> list:\r\n inverted_index = []\r\n for i in range(len(terms_list)):\r\n inverted_index.append((i + 1, 0, 0, []))\r\n return inverted_index",
"def idx2str(self, indexes):\n assert isinstance(indexes, list)\n strings = []\n for index in indexes:\n string = [self.idx2char[i] for i in index]\n # use ',' to join char list.\n string = ','.join(string)\n strings.append(string)\n\n return strings",
"def get_text(self):\n self.sorted_indices = []\n self.sen_instIdx_map = []\n # weather_summary = self.get_weather_summary()\n weather_singles = self.get_weather_singles()\n ground_items = self.get_ground_items()\n grass_road_text = self.get_grass_road_text()\n\n texts = [weather_singles, ground_items, grass_road_text]\n return \" \".join([text for text in texts if text != \"\"]), self.sorted_indices, self.sen_instIdx_map",
"def convertIndexListtoHeading(self, list):\n headings = []\n for i in list:\n i = int(i)\n headings.append(self.potentialHeadings[i])\n return headings",
"def _repr_label_vector_description(self, label_vector_indices):\n\n string = [\"1\"]\n for cross_terms in label_vector_indices:\n sub_string = []\n for descr, order in cross_terms:\n if order > 1:\n sub_string.append(\"{0}^{1}\".format(descr, order))\n else:\n sub_string.append(descr)\n\n if len(sub_string) > 1:\n string.append(\"({})\".format(\" * \".join(sub_string)))\n else:\n string.append(sub_string[0])\n\n return \" + \".join(string)",
"def prettyPrintList(title, listToPrint, indexOffSet = 0):\n print(title)\n for index,listElement in enumerate(listToPrint):\n print(\" [ {:3} ] - {}\".format(index+indexOffSet,listElement))",
"def _index_label(self, label: Any) -> List[int]:\n raise NotImplementedError",
"def convertHeadingListtoIndex(self, list):\n headingIndices = []\n for i in list:\n i = int(i)\n headingIndices.append(self.potentialHeadings.index(i))\n return headingIndices",
"def printing_menu_index(choices_list):\n menu = TerminalMenu(choices_list)\n choice_index = menu.show()\n return choice_index"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates an array which contains all the indices in the given indices_string. This is the inverted method for the GraphWizard.zipIndices method. Paramters
|
def unzipIndices(self, indices_string):
space_regexp = re.compile("\\s+")
indices_string = space_regexp.sub("", indices_string)
datapoints = []
# split datapoints by ;
indices_string = indices_string.split(";")
for datapoint_range in indices_string:
# split datapoint ranges by :
datapoint_range = datapoint_range.split(":")
# remove empty or non-strings
datapoint_range = filter(lambda x : my_utilities.is_numeric(x),
datapoint_range)
# convert to int
datapoint_range = list(map(lambda x: int(my_utilities.force_float(x, True)),
datapoint_range))
if len(datapoint_range) == 1:
# only one element so this is a single datapoint index
datapoints.append(my_utilities.force_float(datapoint_range[0]))
elif len(datapoint_range) > 1:
# create range
mi = min(datapoint_range)
ma = max(datapoint_range) + 1
datapoints = datapoints + list(range(mi, ma))
return datapoints
|
[
"def indices():\n return [1.0, 3.0, 1.0, 3.0, 1.0]",
"def split_indices(self, indices):\n out_ind = [[] for _ in range(self.num_patitions)]\n for key in indices:\n part = self.get_partition_index(key)\n ind = self.mapping_to_partition[part][key]\n out_ind[part].append(ind)\n return out_ind",
"def enumerate_indices(expr, indices):\r\n\r\n # Make sure indices is a list\r\n if not indices: return expr\r\n if not isinstance(indices, collections.Iterable): indices = [indices]\r\n\r\n all_expr = [expr]\r\n indices = list(indices) # To allow for popping below\r\n\r\n # For every index\r\n while indices:\r\n index = indices.pop()\r\n\r\n # For every previously added expression\r\n num_expr = len(all_expr)\r\n for i in range(0,num_expr):\r\n expr = all_expr.pop(0)\r\n # Substitute \r\n new_exprs = [expr.subs(index, val) for val in range(index.lower, index.upper+1)]\r\n # Add only if doesnt exist yet\r\n [all_expr.append(new) for new in new_exprs if new not in all_expr]\r\n\r\n return all_expr",
"def build_inverted_index(self):\n list_count = len(self.unique_labels)\n self.inv_idx = [] # inverted index is a list of lists\n for i in range(list_count): # create the index structure\n self.inv_idx.append([])\n\n for k, v in self.data_dict.items(): # k: filename, v: label\n v = int(v)\n k = str(k)\n self.inv_idx[v].append(k)\n\n for i in range(len(self.inv_idx)): # shuffle the indexes for randomness in multiple epochs\n np.random.shuffle(self.inv_idx[i])\n self.inv_idx[i].append('monkey') # monkey is a sentinel value that marks the end of a list",
"def _get_indices(self, n_indices):\n raise NotImplementedError",
"def uvIndices(*args, **kwargs):\n \n pass",
"def decode_indices(indices, vocabulary):\n\n decoded_tokens = [vocabulary[index] for index in indices]\n return \" \".join(decoded_tokens)",
"def words2indices(self, words):\n return [self.word2index(w) for w in words]",
"def torch_indices_to_tokens(self, indices: torch.LongTensor) -> np.array:\n pass",
"def extract_indices(indices, start_index = 0, stepsize = 1, length = 2):\n samples = np.arange(start_index, length, stepsize).astype('int')\n return np.setdiff1d(indices, samples)",
"def get_inverse_indices(n, indices):\n if indices.max() >= n:\n raise ValueError(\"Indices contain values that are out of range for n\")\n inv = np.arange(n, dtype=int)\n return inv[~np.in1d(inv, indices)]",
"def get_indices(mnemonic):\n if len(mnemonic) == 0:\n raise ValueError\n return [get_index_from_word(word) for word in mnemonic.split()]",
"def imdb2indices(inputs):\n X = [] # results\n word2index = imdb.get_word_index()\n word2index = {k:(v+3) for k,v in word2index.items()}\n word2index[\"<PAD>\"], word2index[\"<START>\"], word2index[\"<UNK>\"], word2index[\"<UNUSED>\"] = 0,1,2,3\n for input_ in inputs:\n X.append([])\n for word in input_:\n idx = word2index.get(word, word2index[\"<UNK>\"])\n X[-1].append(idx)\n return X",
"def create_backing_indices():\n conn = get_conn(verify=False)\n new_backing_public_index = make_backing_index_name()\n new_backing_private_index = make_backing_index_name()\n new_backing_percolate_index = make_backing_index_name()\n backing_index_tuples = [\n (new_backing_public_index, PUBLIC_ENROLLMENT_INDEX_TYPE),\n (new_backing_private_index, PRIVATE_ENROLLMENT_INDEX_TYPE),\n (new_backing_percolate_index, PERCOLATE_INDEX_TYPE),\n ]\n for backing_index, index_type in backing_index_tuples:\n # Clear away temp alias so we can reuse it, and create mappings\n clear_and_create_index(backing_index, index_type=index_type)\n temp_alias = make_alias_name(index_type, is_reindexing=True)\n if conn.indices.exists_alias(name=temp_alias):\n # Deletes both alias and backing indexes\n conn.indices.delete_alias(index=INDEX_WILDCARD, name=temp_alias)\n\n # Point temp_alias toward new backing index\n conn.indices.put_alias(index=backing_index, name=temp_alias)\n\n return backing_index_tuples",
"def getIndexList():\n response = es.cat.indices(index=indexFilter).split()\n indiciesList = getIndicies(response)\n return indiciesList",
"def returnIteratorIndexesFromIndex(cls, listOfIndexes):",
"def torch_indices_to_tokens(self, indices: torch.LongTensor) -> np.array:\n return self.itos[indices.numpy()]",
"def gen_indices(self):\n q_indices = [x for x in range(self.q)]\n #shuffle indices and zip them with themselves displaced by 1\n np.random.shuffle(q_indices)\n return list(zip(q_indices, q_indices[1:] + [q_indices[0]]))",
"def tokens_to_indices(self, tokens):\n return([self.__getindex__(t) for t in tokens])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the full range of the given iterable, this will return a zipString received by the Controller.zipIndices() index of the given iterable
|
def zipFullRange(self, iterable):
try:
l = len(iterable)
except TypeError as e:
return ""
return self.zipIndices(list(range(0, l)))
|
[
"def get_slice(seq,start=0,stop=None,step=1):\n if stop == None:\n stop = len(seq)\n item = lambda i: seq[i]\n return map(item,xrange(start,stop,step))",
"def list(self):\n return range(*self._vec)",
"def Slice(iterable, start=None, *args, **kwargs):\n return itt.islice(iterable, start, *args, **kwargs)",
"def _rangeify(self, slice):\n start, stop, step = slice.start, slice.stop, slice.step\n if step is None:\n step = 1\n if start is None and step >= 0:\n start = 0\n elif start is None and step < 0:\n start = -1\n if stop is None and step >= 0:\n stop = self.__len__()\n elif stop is None and step < 0:\n stop = -self.__len__() - 1\n return range(start, stop, step)",
"def get_range(self) -> str:\n pass",
"def location_range(start: int, end: int) -> Iterable[int]:\n step = 1\n if start > end:\n step = -1\n\n return range(start, end + step, step)",
"def get_chunk_range(selection, chunks):\n chunk_range = [range(s.start//l, int(np.ceil(s.stop/l)))\n if isinstance(s, slice)\n else range(s//l, (s//l)+1)\n for s, l in zip(selection, chunks)]\n return chunk_range",
"def _expand_range(tuple_or_list):\n return(\n list(\n range(\n tuple_or_list[0],\n tuple_or_list[1] + 1) if all([\n isinstance(tuple_or_list, tuple),\n len(tuple_or_list)==2,\n *[isinstance(v, int) for v in tuple_or_list]\n ]) else tuple_or_list\n )\n )",
"def get_slice(self, zcoords):\n\n start_ind = np.searchsorted(zcoords, self.start)\n end_ind = np.searchsorted(zcoords, self.end)\n return (slice(start_ind, end_ind), ...)",
"def _range_iter_ ( self ) :\n _r = self\n _num = _r.size()\n _i = 0\n while _i < _num :\n yield _r.at(_i)\n _i += 1",
"def slices(n, max_len: int = None) -> iter:\n return chain(\n ((0, 0), ), # Empty word\n (\n (i, j)\n for i in range(n)\n for j in range(i + 1, min(n, i + max_len) + 1 if max_len else n + 1)\n )\n )",
"def well_indexes_from(self, start, num, columnwise=False): \n \n container_type = _CONTAINER_TYPES[self.container_type_id]\n \n start = container_type.robotize(start)\n \n if columnwise:\n row, col = container_type.decompose(start)\n num_rows = self.row_count\n start = col * num_rows + row \n \n \n return range(start,start + num)",
"def reverse_enumerate(iterable):\n return izip(reversed(range(len(iterable))), reversed(iterable))",
"def squash_int_range(cls, ilist):\n irange = []\n rstart = None\n rprev = None\n\n sorted(ilist)\n for i, value in enumerate(ilist):\n if rstart is None:\n if i == (len(ilist) - 1):\n irange.append(value)\n break\n\n rstart = value\n\n if rprev is not None:\n if rprev != (value - 1):\n if rstart == rprev:\n irange.append(rstart)\n else:\n irange.append(\"{}-{}\".format(rstart, rprev))\n if i == (len(ilist) - 1):\n irange.append(value)\n\n rstart = value\n elif i == (len(ilist) - 1):\n irange.append(\"{}-{}\".format(rstart, value))\n break\n\n rprev = value\n\n return irange",
"def index(self, x, start = 0, end=None):",
"def extract_slices_range(split, next_read_index, Y_size, Z_size):\n indexes = []\n x_index_min = -1\n read_start, read_end = next_read_index\n for i in range(0, split.split_x):\n index = int(split.split_pos[-3]) + (int(split.split_pos[-2])) * Y_size + (int(\n split.split_pos[-1]) + i) * Y_size * Z_size\n # if split's one row is in the write range.\n if index >= read_start and index <= read_end:\n if len(indexes) == 0:\n x_index_min = i\n indexes.append(index)\n else:\n continue\n\n X_index_min = index_to_voxel(min(indexes), Y_size, Z_size)[2]\n X_index_max = index_to_voxel(max(indexes), Y_size, Z_size)[2]\n x_index_max = x_index_min + (X_index_max - X_index_min)\n\n return (X_index_min, X_index_max, x_index_min, x_index_max)",
"def get_ranges(self):\r\n pass",
"def extend(self, iterable):\r\n self._ranges = RangeSet._merge_ranges(\r\n self._ranges + (Range(r) for r in iterable)\r\n )",
"def getIndexRange(self) -> Tuple[int, int]:\n return (self.__beginIndex, self.__endIndex)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Removes all the list elements in the list_data that are the same. This supports only numeric lists. The threshold can be a numeric value which defines how much the list_datas elements can differ form the others and still count as the "same value"
|
def uniqueListThreshold(self, list_data, threshold):
# prepare variables
uniques = []
full_list = {}
# go through each list element
for x in list_data:
# tells whether the element has been found or not
found = False
# go through the existing elements in the uniques list
for u in uniques:
# check if the current list_datas element is in the range
# of the current unique value
if u - threshold < x and u + threshold > x:
found = True
full_list[u].append(x)
break;
# if the element is not in the uniques add it as a new unique
if not found:
full_list[x] = [x]
uniques.append(x)
return uniques, full_list
|
[
"def remove_list_by_fraction(source_lst):\n\tflags = [0]\n\ttemp = source_lst[0]\n\tfor i in range(len(source_lst)):\n\t\tif source_lst[i] != temp:\n\t\t\tflags.append(i)\n\t\t\ttemp = source_lst[i]\n\n\treturn flags",
"def RemoveSmallRiderCountsForStation(counts, l1):\n try:\n l1p = []\n for l in l1:\n if l[0] > counts:\n l1p.append(l)\n except(Exception) as e:\n print(e)\n return l1p",
"def clean_list(orig_list):\n t_last = None\n eps = 10\n new_list = []\n if len(orig_list) < 2:\n return orig_list\n\n for item in orig_list:\n if t_last is not None:\n if abs((item - t_last).total_seconds()) > eps:\n new_list.append(item)\n else:\n new_list.append(item)\n t_last = item\n return new_list",
"def test_assertFloatEqualList_unequal(self):\n originals = [0, 1, -1, 10, -10, 100, -100]\n modified = [i + 1e-5 for i in originals]\n try:\n self.assertFloatEqual(originals, modified)\n except:\n pass \n else:\n raise AssertionError, \\\n \"unit_test.assertFloatEqual failed on lists of dissimilar values\"",
"def transformList(the_list, threshold):\n result_list = []\n\n for element in the_list:\n if element > threshold:\n result_list.append(1)\n else:\n result_list.append(0)\n\n return result_list",
"def get_unchanged(src_list, npred_dict_new,\n npred_dict_old,\n npred_threshold=1e4,\n frac_threshold=0.9):\n o = []\n for s in src_list:\n npred_new = npred_dict_new[s]\n if npred_new < npred_threshold:\n o += [s]\n continue\n if npred_dict_old is None:\n npred_old = 0.\n else:\n npred_old = npred_dict_old[s]\n frac = npred_old / npred_new\n if frac > frac_threshold:\n o += [s]\n return o",
"def remove_duplicates(self):\n for data_list in self.converted_data:\n for index, item in enumerate(data_list):\n if index != 0:\n date1 = data_list[index - 1][0]\n date2 = data_list[index][0]\n # If dates of two entries in a row are the same\n if date1 == date2:\n # Deleting one of them\n data_list.pop(index)",
"def find_similar (self, iterable, threshold=0.3):\n \n raise NotImplementedError",
"def prune_threshold(self, base_models_scores, threshold=0.55):\n indices = np.argwhere(base_models_scores < threshold)\n for index in sorted(indices.ravel(), reverse=True):\n if len(self.ensemble_) > 1:\n del self.ensemble_[index]\n base_models_scores = np.delete(base_models_scores, index, axis=0)\n # Return reduced scores list\n return base_models_scores",
"def listRemoveElements(data,sel):\r\n for element in sel:\r\n for i in range(len(data)):\r\n if element == data[i]:\r\n data.pop(i)\r\n break;",
"def _unique(self, lst, eq):\n i = 0\n while i < len(lst):\n j = i + 1\n while j < len(lst):\n if eq(lst[i], lst[j]):\n self.logger.warn(\"skipping %s (duplicate of %s)\"\n % (lst[j], lst[i]))\n del lst[j]\n else:\n j = j + 1\n i = i + 1",
"def unique_threshold(a, thres):\n a = np.sort(a)\n diff = np.diff(a, axis=0)\n ui = np.ones(len(a), 'bool')\n ui[1:] = np.abs(diff) > thres\n\n return a[ui]",
"def remove_nan_from_list(self, the_list):\n\t\treturn [value for value in the_list if not self.isNaN(value) ]",
"def _clean_dup_points(self, tol=TOL): \n \n scrubber = vtk.vtkCleanPolyData()\n scrubber.SetTolerance(tol)\n scrubber.SetInputData(self.Surf)\n scrubber.Update()\n \n N1 = self.Surf.GetNumberOfPoints()\n N2 = scrubber.GetOutput().GetNumberOfPoints()\n \n if N2<N1:\n if self.verb: print \"Removed %d duplicate points\" % (N1-N2)\n self.Surf = scrubber.GetOutput()\n self._update_q()\n else:\n if self.verb: print \"No duplicate points within tolerance\"\n\n self._cleaned = True",
"def find_similar_size(clst_list, thresh=2):\n passing = []\n # For each group of overlapping clusters:\n for oc in clst_list:\n # Generate the interval sizes\n sizes = [len(c) for c in oc]\n # Then, calculate all pairwise differences in sizes\n sizediff = []\n for i, x in enumerate(sizes):\n for y in sizes[i+1:]:\n sizediff.append(abs(y - x))\n passthresh = [True if a <= thresh else False for a in sizediff]\n # If any interval passes the wobble filter, keep it\n if any(passthresh):\n passing.append(oc)\n return passing\n\n # Generate the firt cluster",
"def remove_zeros(input_data, minimum=0.002):\n output = []\n\n for d in input_data:\n if d[1] > minimum:\n output.append(d)\n\n return output",
"def remove_above_threshold(thresh, ls, running_ls_thresh=None, i=0):\n # Initialize running_ls_thresh in first pass\n if running_ls_thresh is None:\n running_ls_thresh = []\n\n if i == len(ls):\n return running_ls_thresh\n else:\n if ls[i] <= thresh:\n running_ls_thresh.append(ls[i])\n return remove_above_threshold(thresh, ls, running_ls_thresh, i + 1)",
"def test_streamthreshold_remove(self):\n st1 = StreamThreshold(threshold=10, width=1000, depth=5)\n self.assertEqual(st1.add(\"this is a test\", 5), 5)\n self.assertEqual(st1.meets_threshold, dict())\n self.assertEqual(st1.add(\"this is a test\", 5), 10)\n self.assertEqual(st1.meets_threshold, {\"this is a test\": 10})\n self.assertEqual(st1.add(\"this is not a test\", 9), 9)\n self.assertEqual(st1.meets_threshold, {\"this is a test\": 10})\n self.assertEqual(st1.add(\"this is a test\", 20), 30)\n self.assertEqual(st1.meets_threshold, {\"this is a test\": 30})\n self.assertEqual(st1.add(\"this is not a test\", 2), 11)\n self.assertEqual(st1.meets_threshold, {\"this is a test\": 30, \"this is not a test\": 11})\n self.assertEqual(st1.remove(\"this is a test\"), 29)\n self.assertEqual(st1.meets_threshold, {\"this is a test\": 29, \"this is not a test\": 11})\n self.assertEqual(st1.remove(\"this is not a test\"), 10)\n self.assertEqual(st1.remove(\"this is not a test\"), 9)\n self.assertEqual(st1.meets_threshold, {\"this is a test\": 29})\n\n self.assertEqual(st1.elements_added, 38)",
"def prune(self, threshold=1e-3):\n\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a new background for the given datacontainer file by intermpolating the background_datacontainer. The measurement_type tells whether the file is a M(T) or a M(H) measurement. The filepath is the target filepath to save in the generated background.
|
def createNewBackground(self, datacontainer, background_datacontainer, measurement_type, filepath = None):
if not isinstance(filepath, str):
filepath = "<temporary created background>"
return DataHandling.calculation.createBackgroundDataContainer(
datacontainer,
background_datacontainer,
measurement_type,
filepath,
self
)
|
[
"def create_backgrounds(\n outdir, background=None, genome=\"hg38\", size=200, custom_background=None\n):\n if background is None:\n background = [\"random\"]\n nr_sequences = {}\n\n # Create background for motif prediction\n if \"gc\" in background:\n pred_bg = \"gc\"\n else:\n pred_bg = background[0]\n\n create_background(\n pred_bg,\n os.path.join(outdir, \"prediction.fa\"),\n os.path.join(outdir, \"prediction.bg.fa\"),\n genome=genome,\n size=size,\n custom_background=custom_background,\n )\n\n # Get background fasta files for statistics\n bg_info = {}\n nr_sequences = {}\n for bg in background:\n fname = os.path.join(outdir, f\"bg.{bg}.fa\")\n nr_sequences[bg] = create_background(\n bg,\n os.path.join(outdir, \"validation.fa\"),\n fname,\n genome=genome,\n size=size,\n custom_background=custom_background,\n )\n\n bg_info[bg] = fname\n return bg_info",
"def read_background_from_file(dataset: 'TextDataset', filename: str, background_class):\n new_instances = OrderedDict()\n for instance in dataset.instances:\n background_instance = BackgroundInstance(instance, [])\n new_instances[instance.index] = background_instance\n for line in codecs.open(filename, \"r\", \"utf-8\"):\n fields = line.strip().split(\"\\t\")\n index = int(fields[0])\n if index in new_instances:\n instance = new_instances[index]\n for sequence in fields[1:]:\n instance.background.append(background_class.read_from_line(sequence, None))\n return TextDataset(list(new_instances.values()))",
"def __init__(self, filepath, filetype):\n\n try:\n self.filepath = filepath\n self.name, self.base, self.env, self.light, self.spots = filepath.replace(filetype, '').split('-')\n self.name = self.name.split('/')[-1]\n self.base = float(self.base)\n self.spots = not 'no' in self.spots\n self.z = ImageLayer.FOREGROUND if 'foreground' in filepath else ImageLayer.BACKGROUND\n\n except Exception as e:\n\n print('Error ', e)\n print('on file ', filepath, filetype)\n raise Exception('Error creating the image layer!')",
"def generate_dataset(background_folder, foreground_folder, dest_folder, specs, log_file='dataset_inventory.csv', log_init_rows=0, verbose=2):\n\n background_layers = LayerPicker(background_folder, '.jpg')\n foreground_layers = LayerPicker(foreground_folder, '.png')\n\n logger = CombLogger(log_file, log_init_rows)\n\n for i, back in enumerate(background_layers):\n\n try:\n generate_composition(back, foreground_layers, dest_folder, specs, logger, verbose)\n\n foreground_layers.reset()\n\n print('>>>>>>> Status: {}%'.format((i+1)*100./len(background_layers)))\n\n except Exception as e:\n\n print(str(e))\n print('on line', e.__traceback__.tb_lineno)\n print(e.__traceback__.tb_frame.f_code)\n print('\\nError on background {}'.format(str(back)))\n raise e",
"def background(phot_path):\n df = pines_log_reader(phot_path)\n sources = get_source_names(df)\n if sources[0]+' Background' in df.keys():\n files = np.array(df['Filename'])\n bg = np.array(df[sources[0]+' Background'], dtype=float)\n files = np.array(files[np.where(~np.isnan(bg))])\n bg = np.array(bg[np.where(~np.isnan(bg))], dtype=float)\n \n if len(files) != len(bg):\n return\n \n #Get exptimes \n exptimes = np.zeros(len(files))\n bands = np.zeros(len(files), dtype='object')\n for i in range(len(files)):\n path = phot_path.parent.parent/('reduced/'+files[i])\n hdr = fits.open(path)[0].header\n exptimes[i] = hdr['EXPTIME']\n bands[i] = hdr['FILTNME2']\n\n return bg, exptimes, bands",
"def make_background_file(out_file, exp_time, instrument, sky_center,\n overwrite=False, foreground=True, instr_bkgnd=True,\n ptsrc_bkgnd=True, no_dither=False, dither_params=None,\n subpixel_res=False, input_sources=None, \n absorb_model=\"wabs\", nH=0.05, prng=None):\n prng = parse_prng(prng)\n events, event_params = make_background(exp_time, instrument, sky_center, \n ptsrc_bkgnd=ptsrc_bkgnd, \n foreground=foreground, \n instr_bkgnd=instr_bkgnd,\n no_dither=no_dither,\n dither_params=dither_params, \n subpixel_res=subpixel_res,\n input_sources=input_sources,\n absorb_model=absorb_model,\n nH=nH, prng=prng)\n write_event_file(events, event_params, out_file, overwrite=overwrite)",
"def from_background(species, pup, bg_labeling_params, bg_clips_dir, df_save_dir, df_save_name, inprogress_dir):\n\t\n\t#print pup\n\tprint('\\n################################\\nlabeling background from pup', pup,'\\n################################\\n')\n\t\n\t#get the paths to the clips\n\tclips = [bg_clips_dir+i for i in os.listdir(bg_clips_dir) if i.endswith('.wav') and pup in i]\n\t\n\t#check if there are completed source files for this species, label, and iteration and if so pick up from where you left off\n\t\n\tsource_file_save_path = inprogress_dir+pup+'_bg_completed_source_files'\n\thuman_save_path = inprogress_dir+pup+'_bg_completed_human_labels'\n\tdurations_save_path = inprogress_dir+pup+'_bg_completed_durations'\n\tnewstarts_save_path = inprogress_dir+pup+'_bg_completed_newstarts'\n\tnewstops_save_path = inprogress_dir+pup+'_bg_completed_newstop'\n\n\tif pup in set([i.split('_bg_completed')[0] for i in os.listdir(inprogress_dir)]):\n\t\t\n\t\tprint('loading saved annotations...')\n\t\tdone_source_files = list(np.load(source_file_save_path+'.npy'))\n\t\thuman_labels = list(np.load(human_save_path+'.npy'))\n\t\tdurations = list(np.load(durations_save_path+'.npy'))\n\t\tnewstarts = list(np.load(newstarts_save_path+'.npy'))\n\t\tnewstops = list(np.load(newstops_save_path+'.npy'))\n\n\t\tprint('you already annotated', len(done_source_files), 'clips...amounting to', sum(durations), 'seconds of audio from this pup...\\n')\n\t\t\n\telse:\n\t\tprint('starting from scratch - no previously annotated vocalizations from this sampling params dictionary')\n\t\tdone_source_files = []\n\t\thuman_labels = []\n\t\tdurations = []\n\t\tnewstarts= []\n\t\tnewstops = []\n\t\t\n\t#check if there are other labeling iterations and if so make sure you don't sample the same clips for this one\n\tinprogress_root = os.path.split(inprogress_dir[:-1])[0]\n\tother_iterations = [i for i in os.listdir(inprogress_root) if i!='iteration'+str(bg_labeling_params['sampling_iteration']) and 'iteration' in i]\n\tprint(other_iterations)\n\tother_source_files = []\n\tif len(other_iterations) != 0:\t\n\t\tfor iteration in other_iterations:\n\t\t\tother_pups = [i for i in os.listdir(inprogress_root+'/'+iteration+'/')]\n\t\t\tif pup+'_bg_completed_source_files.npy' in other_pups:\n\t\t\t\tprint('This pup has background clips annotated in another iteration:')\n\t\t\t\tother_source_files_npy = os.path.join(inprogress_root, iteration, pup+'_bg_completed_source_files.npy')\n\t\t\t\tother_source_files.extend(list(np.load(other_source_files_npy)))\n\t\t\t\t\n\tprint(len(other_source_files),'have been annotated from other iterations')\n\t\n\t#get the source files to be processed from this pup and begin - shuffle so that you're not just getting background from the start of recordings\n\tnot_done_source_files = sorted([i.split('/')[-1] for i in clips if i.split('/')[-1] not in done_source_files and i.split('/')[-1] not in other_source_files], key = lambda x: random.random())\n\n\tfor clip in not_done_source_files:\n\t\n\t\t#display cumulative length\n\t\tprint(sum(durations), \"seconds of audio annotated from this pup...\")\n\n\t\t#save so you can recover if interrupted\n\t\tprint('saving progress...\\n')\n\t\tnp.save(file=source_file_save_path, arr=np.array(done_source_files))\n\t\tnp.save(file=human_save_path, arr=np.array(human_labels))\n\t\tnp.save(file=durations_save_path, arr=np.array(durations))\n\t\tnp.save(file=newstarts_save_path, arr=np.array(newstarts))\n\t\tnp.save(file=newstops_save_path, arr=np.array(newstops))\n\n\t\t#read it\n\t\tfs, wav = wavfile.read(bg_clips_dir+clip)\n\t\t\n\t\t#trim the first 10 ms off because this is very often the trailing end of a cry\n\t\tmargin = int(.01*fs)\n\t\tif len(wav) > margin:\n\t\t\twav = wav[margin:]\n\n\t\t#print the clip name\n\t\tprint(clip)\n\n\t\t#get its duration in seconds\n\t\tdur = len(wav)/fs\n\t\ttotal_samples = int(dur*fs)\n\t\tprint('\t\tclip duration (s)', dur)\n\t\t\n\t\t#make a spectrogram and show it\n\t\tt,f,spec = stft(wav, \n\t\t\t\t\t\tnoverlap=bg_labeling_params['noverlap'], \n\t\t\t\t\t\tnperseg=bg_labeling_params['nperseg'], fs=fs)\n\t\tspec = np.log(np.abs(spec))\n\t\tplt.figure(figsize=[5,5])\n\t\tplt.imshow(spec, origin='lower')\n\t\tplt.show()\n\n\t\t#get input\n\t\tval = input(\"Are there vocalizations in this image?\\nq for yes | a for no | u for undo | s to skip to the next clip | x to clip | e to save and exit\")\n\n\t\tif val=='q':\n\t\t\tprint(\"\t\tok - ignoring\")\n\t\t\tdone_source_files.append(clip.split('/')[-1])\n\t\t\thuman_labels.append('voc')\n\t\t\tdurations.append(0)\n\t\t\tnewstarts.append(0)\n\t\t\tnewstops.append(0)\n\t\t\tcontinue\n\t\n\t\telif val=='a':\n\t\t\tprint(\"\t\tok - recording\")\n\t\t\tdone_source_files.append(clip.split('/')[-1])\n\t\t\thuman_labels.append('no_voc')\n\t\t\tdurations.append(dur)\n\t\t\tnewstarts.append(margin)\n\t\t\tnewstops.append(total_samples)\n\t\t\t\n\t\t\tcontinue\n\n\t\telif val=='s':\n\t\t\tprint('\t\tSKIPPING to the next CLIP...')\n\t\t\tdone_source_files.append(clip.split('/')[-1])\n\t\t\thuman_labels.append('NOT_ANNOTATED')\n\t\t\tdurations.append(0)\n\t\t\tnewstarts.append(0)\n\t\t\tnewstops.append(0)\n\t\t\tcontinue\n\t\t\t\n\n\t\telif val == 'x':\n\t\t\t\t\tstart_change = input(\"how many seconds into this clip do you want it to start?\")\n\t\t\t\t\tstop_change = input(\"how many seconds before the end of this clip do you want it to end?\")\n\t\t\t\t\t\n\t\t\t\t\ttotal_samples = int(dur*fs)\n\t\t\t\t\tnew_start = margin+int(float(start_change)*fs)\n\t\t\t\t\tnew_stop = total_samples - int(float(stop_change)*fs)\n\t\t\t\t\tshort_clip = wav[new_start:new_stop] #get the clip\n\t\t\t\t\tnew_dur = len(short_clip)/fs\n\t\t\t\t\tprint('\t\tnew duration is', new_dur)\n\t\t\t\t\t\n\t\t\t\t\tt,f,spec = stft(short_clip, \n\t\t\t\t\t\t\t\t\tnoverlap=bg_labeling_params['noverlap'], \n\t\t\t\t\t\t\t\t\tnperseg=bg_labeling_params['nperseg'], fs=fs)\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\tspec = np.log(np.abs(spec))\n\t\t\t\t\tplt.figure(figsize=[5,5])\n\t\t\t\t\tplt.imshow(spec, origin='lower')\n\t\t\t\t\tplt.show()\n\t\t\t\t\t\n\t\t\t\t\tnew_val = input(\"Are there vocalizations in this image? q for yes | a for no | s for skip\")\n\t\t\t\t\t\n\t\t\t\t\tif new_val=='a':\n\t\t\t\t\t\tprint(\"\t\tok - recording\")\n\t\t\t\t\t\tdone_source_files.append(clip.split('/')[-1])\n\t\t\t\t\t\thuman_labels.append('no_voc')\n\t\t\t\t\t\tdurations.append(new_dur)\n\t\t\t\t\t\tnewstarts.append(new_start)\n\t\t\t\t\t\tnewstops.append(new_stop)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\n\t\t\t\t\telif new_val=='q':\n\t\t\t\t\t\tprint(\"\t\tok - ignoring\")\n\t\t\t\t\t\tdone_source_files.append(clip.split('/')[-1])\n\t\t\t\t\t\thuman_labels.append('voc')\n\t\t\t\t\t\tdurations.append(0)\n\t\t\t\t\t\tnewstarts.append(0)\n\t\t\t\t\t\tnewstops.append(0)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\n\t\t\t\t\telif new_val=='s':\n\t\t\t\t\t\tprint('\t\tSKIPPING to the next CLIP...')\n\t\t\t\t\t\tdone_source_files.append(clip.split('/')[-1])\n\t\t\t\t\t\thuman_labels.append('NOT_ANNOTATED')\n\t\t\t\t\t\tdurations.append(0)\n\t\t\t\t\t\tnewstarts.append(0)\n\t\t\t\t\t\tnewstops.append(0)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"You pressed a key that doesn't make sense - exiting. Current clip not counted as done.\")\n\t\t\t\t\t\treturn\n\t\t\t\t\t\t\n\t\telif val == 'u':\n\t\t\tcount_val = input(\"how many annotations back to you you want to go?\")\n\t\n\t\t\tif count_val in [str(i) for i in range(1,100,1)]:\n\t\t\t\tcount_val=int(count_val)\n\t\t\t\tdone_source_files = done_source_files[:-count_val]\n\t\t\t\thuman_labels = human_labels[:-count_val]\n\t\t\t\tdurations = durations[:-count_val]\n\t\t\t\tnewstarts = newstarts[:-count_val]\n\t\t\t\tnewstops = newstops[:-count_val]\n\t\t\n\t\t\t\tnp.save(file=source_file_save_path, arr=np.array(done_source_files))\n\t\t\t\tnp.save(file=human_save_path, arr=np.array(human_labels))\n\t\t\t\tnp.save(file=durations_save_path, arr=np.array(durations))\n\t\t\t\tnp.save(file=newstarts_save_path, arr=np.array(newstarts))\n\t\t\t\tnp.save(file=newstops_save_path, arr=np.array(newstops))\n\t\t\t\n\t\t\t\tprint('Removed the last', str(count_val), 'annotations and saved the list of completed source files.')\n\t\t\t\treturn \n\t\t\t\t\n\t\t\telse:\n\t\t\t\tprint('Type a number between 1 and 100. Start over to try again.')\n\t\t\t\treturn\n\t\n\t\telif val=='e':\n\t\t\tprint(\"exiting...\")\n\t\t\tprint('you annotated', len(done_source_files),'clips amounting to', sum(durations), 'seconds of background audio...')\n\t\t\tprint('returning the labeled vocalizations to you...')\n\t\t\t#compile the data you just generated\n\t\t\ttemp = pd.DataFrame()\n\t\t\ttemp['source_file'] = done_source_files\n\t\t\ttemp['vocalizations_present?'] = human_labels\n\t\t\ttemp['duration_s'] = durations\n\t\t\ttemp['start_frame'] = newstarts\n\t\t\ttemp['stop_frame'] = newstops\n\t\t\ttemp = temp.reset_index(drop=True)\n\t\n\t\t\t#save\n\t\t\ttemp.to_feather(df_save_dir+df_save_name)\n\t\t\treturn temp\n\t\n\t\telse:\n\t\t\tprint(\"You pressed a key that doesn't make sense - exiting. Current clip not counted as done.\")\n\t\t\treturn\n\t\t\t\n\t#compile the data you just generated\n\tprint('you annotated', len(done_source_files),'clips amounting to', sum(durations), 'seconds of background audio...')\n\ttemp = pd.DataFrame()\n\ttemp['source_file'] = done_source_files\n\ttemp['vocalizations_present?'] = human_labels\n\ttemp['duration_s'] = durations\n\ttemp['start_frame'] = newstarts\n\ttemp['stop_frame'] = newstops\n\ttemp = temp.reset_index(drop=True)\n\t\n\t#save\n\ttemp.to_feather(df_save_dir+df_save_name)\n\treturn temp",
"def read_labeled_background_from_file(dataset: 'TextDataset', filename: str) -> 'TextDataset':\n new_instances = {}\n for instance in dataset.instances:\n background_instance = LabeledBackgroundInstance(instance, [], [])\n new_instances[instance.index] = background_instance\n for line in codecs.open(filename, \"r\", \"utf-8\"):\n fields = line.strip().split(\"\\t\")\n index = int(fields[0])\n correct_background_indices = [int(x) for x in fields[1].split(',')]\n if index in new_instances:\n instance = new_instances[index]\n instance.label = correct_background_indices\n for sequence in fields[2:]:\n instance.background.append(sequence)\n return TextDataset(list(new_instances.values()))",
"def process_test():\n\n test_entry = unpickle(test_file)\n test_dataset = test_entry[b'data']\n test_targets = test_entry[b'fine_labels']\n test_dataset = np.vstack(test_dataset).reshape(-1, 3, 32, 32)\n test_dataset = test_dataset.transpose((0, 2, 3, 1)) \n\n root_path = data_dir + '/cifar100/test/'\n for counter, item in enumerate(test_targets):\n make_dir_if_no_exist(root_path+str(item))\n # write data\n img = test_dataset[counter]\n #bgr_image = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_RGB2BGR)\n file_path = root_path+str(item)+'/'+\"test_img_{0}.jpg\".format(str(counter))\n #print(file_path)\n # something breaks here\n #cv2.imwrite(file_path, bgr_image)\n imageio.imwrite(file_path, img)",
"def set_background(image_path):\n backgroundCommand = \"feh -q --bg-fill ~/Pictures/\" + image_path\n subprocess.run(backgroundCommand, shell=True)",
"def useBackgroundFileIfPresentInSubdir(self, imagesubdir):\n bdir = self.get_basefdir() + \"/\" + imagesubdir\n # set it (whether exists or not)\n self.backgroundDirectory = bdir\n print \"Using background image in %s\" % bdir",
"def add_background(animation, resolution, fps_modes, folder, background_keyword):\n background = ScriptedSprite('layered_sprite', (0, 0), resolution, resolution, fps_modes[0], fps_modes,\\\n sprite_folder=folder, keywords=(background_keyword,), resize_mode='fill', resize_smooth=False)\n background.add_movement((0, 0), (0, 0), 1)\n animation.add_sprite(background, 0, 1, 0)",
"def get_background_clips(raw_wavs_dir, save_location, all_segments_df, start_column, stop_column, margin = 0, label_column = None, species = None, units = 's'):\n\n\t#get the data and optionally subset by species \n\tdf = pd.read_csv(all_segments_df)\n\tif species != None:\n\t\tdf = df.loc[df['species'] == species]\n\n\t#get the names of the recording source files \n\tsource_files = df['source_file'].unique()\n\n\t#check which files have already been segmented\n\talready_processed = [i.split('_backgroundclip')[0] for i in os.listdir(save_location)]\n\t\n\tfor file in source_files:\n\n\t\t#get the start and stop times for this recording's vocalization segmentation\n\t\tsf_df = df.loc[df['source_file'] == file]\n\t\t\n\t\t#make a dataframe for the start and stop times of the intervocalization periods\n\t\tbg_df = pd.DataFrame()\n\t\tbg_df['background_start_time'] = [float(i) for i in sf_df[stop_column][:-1]] #ignore background before first vocalization \n\t\tbg_df['background_stop_time'] = [float(i) for i in sf_df[start_column][1:]] #and after last vocalization\n\t\tbg_df['source_file'] = [i for i in sf_df['source_file'][1:] ] #so background segments df is one row shorter than vocalization segments df\n\t\tbg_df['species'] = [i for i in sf_df['species'][1:] ]\n\t\tbg_df['duration'] = bg_df['background_stop_time'] - bg_df['background_start_time']\n\t\t\n\t\tnum_to_process = len(bg_df)\n\t\tnum_already_processed = len([i for i in already_processed if file in i])\n\t\t\n\t\tif file in already_processed and num_to_process==num_already_processed:\n\t\t\tprint(file, 'already processed, skipping...')\n\n\t\telse:\n\t\t\tpath_to_source = raw_wavs_dir + file \n\t\t\tfs, wav = wavfile.read(path_to_source)\n\t\t\tbg_df['background_clip_number'] = range(num_to_process)\n\n\t\t\tcount = 0\n\t\t\tprint('preparing to get', len(bg_df), 'non-vocalization clips from', file.split('/')[-1])\n\t\t\tfor idx, _ in bg_df.iterrows(): \n\t\t\t\n\t\t\t\t#get the start and stop time\n\t\t\t\tstart, end = bg_df.loc[idx, ('background_start_time')], bg_df.loc[idx, ('background_stop_time')] #get the start and stop time for the clip\n\t\t\t\t\n\t\t\t\t#name the clip\n\t\t\t\tclip_name = bg_df.loc[idx, 'source_file'].split('.wav')[0] + '_' + 'backgroundclip' + '_' + str(bg_df.loc[idx, 'background_clip_number']) + '.wav' #name the clip \n\t\n\t\t\t\t#clip it out and write it\n\t\t\t\tif units == 's':\n\t\t\t\t\tstart= int((start - margin)*fs)\n\t\t\t\t\tend = int((end + margin)*fs)\n\t\t\t\t\tclip = wav[start:end] #get the clip\n\t\t\t\t\twavfile.write(save_location + clip_name, fs, clip) #write the clip to a wav\n\t\t\t\t\tcount+=1\n\n\t\t\t\telif units == 'ms':\n\t\t\t\t\tstart, end = start - margin, end + margin\n\t\t\t\t\tstart, end = int((start/1000)*fs), int((end/1000)*fs) #convert to sampling units\n\t\t\t\t\tclip = wav[start:end] #get the clip\n\t\t\t\t\twavfile.write(save_location + clip_name, fs, clip) #write the clip to a wav\n\t\t\t\t\tcount+=1\n\t\n\t\t\tprint(' ...got', num_to_process,'wav clips')\n\tprint('done.')",
"def add_job_background(self, background_image, **kwargs):\n self.render_background(background_image, **kwargs)",
"def write_data_to_file(container, filepath, ind, print_info=False):\n\n # convert filepath in readable form so when file writting will be logged\n # path will be properly readable even on windows. Without mixed back and forward slashes.\n filepath = _path_utils.readable_norm(filepath)\n\n result = _pix_writer.write_data(container, filepath, ind, print_info=print_info)\n if result != {'FINISHED'}:\n lprint(\"E Unable to export data into file:\\n\\t %r\\n\\t For details check printouts above.\", (filepath,))\n return False\n else:\n lprint(\"I File created!\")\n return True",
"def add_image(self, path: str) -> background.Background:\n if self.background is None:\n self.add_background(path)\n image = self.background.image\n self.window._display_update()\n self.window.window_surface.blit(image, self.rect)\n else:\n image = self.background.add_image(path)\n\n return image",
"def add_background_music(self, path: str) -> CodeScene:\n self.music = BackgroundMusic(path)\n return self",
"def run_background_fit(input_filename: str, user_arguments: FitArguments) -> FitReturnValues:\n # Grab the input data.\n input_data = setup_data(input_filename)\n return_values = run_fit(\n fit_object = three_orientations.BackgroundFit,\n input_data = input_data, user_arguments = user_arguments\n )\n return return_values",
"def create_training_example(background, actrivates, negatives, Ty):\n\n background = background - 20 ## make background quieter\n y = np.zeros((1, Ty))\n previous_segments = []\n number_of_activates = np.random.randint(0, 5)\n random_indices = np.random.randint(len(activates), size=number_of_activates)\n random_activates = [activates[i] for i in random_indices] ## select a random number of random 'activate' in |activates| and insert into background\n\n for random_activate in random_activates:\n background, segment_time = insert_audio_clip(background, random_activate, previous_segments)\n segment_start, segment_end = segment_time\n y = insert_ones(y, segment_end) \n\n number_of_negatives = np.random.randint(0, 3)\n random_indices = np.random.randint(len(negatives), size=number_of_negatives)\n random_negatives = [negatives[i] for i in random_indices]\n\n for random_negative in random_negatives:\n background, _ = insert_audio_clip(background, random_negative, previous_segments)\n\n background = match_target_amplitude(background, -20.0) \n ## ⬆⬆⬆ have already decreased the background volume, now restore the inserted background volume.\n file_handle = background.export(\"train\" + \".wav\", format='wav')\n x = graph_spectrogram(\"train.wav\")\n return x, y\n ## ⬆⬆⬆⬆⬆⬆ list比较特殊,在函数内虽然没有返回,但是append了新的元素仍会保留在函数结束之后"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Cut all the datapoints of the given datacontainer with the given condition. The condition has to be a list of dicts, each dict has to have a key index which holds the key and a min and/or a max index.
|
def cutDataPointRows(self, datacontainer, conditions):
if isinstance(datacontainer, DataHandling.DataContainer.DataContainer):
datacontainer = copy.deepcopy(datacontainer)
datacontainer.addAttribute("Datapoints edited")
for datapoint in datacontainer.datapoints:
datapoint.cutRows(conditions)
datapoint.execFit()
datapoint.parent = datacontainer
return datacontainer
else:
return None
|
[
"def apply_cuts(self, data, cuts):\n cut_mask = np.array([True], dtype=np.bool)\n for cut_key, [cut_low, cut_high] in cuts.items():\n if \"{reco}\" in cut_key:\n cut_key = cut_key.replace(\"{reco}\", self.reco)\n\n if cut_low is not None:\n cut_mask = cut_mask & (data[cut_key] > cut_low)\n if cut_high is not None:\n cut_mask = cut_mask & (data[cut_key] < cut_high)\n\n for key, val in data.items():\n data[key] = val[cut_mask]\n\n return data",
"def cut_subset(data, nb_cut = 700):\n res = []\n for label in data:\n #print('r')\n buffer =[]\n for subset in label:\n sbs = subset[700:-700] \n buffer.append(sbs)\n res.append(buffer)\n return res",
"def cut(x, y, threshold, keep=\"before\"):\n\n import numpy as np\n\n if np.shape(x) != np.shape(y):\n raise ValueError\n print(\"x and y must have same length.\")\n if np.asarray(x).ndim != 1:\n raise ValueError\n print(\"x and y must have dimension = 1.\")\n\n if [i for i in sorted(x)] == [i for i in x]:\n if threshold < x[0]:\n raise ValueError\n print(\"Your threshold is to low. Not cutting list.\")\n if threshold > x[-1]:\n raise ValueError\n print(\"Your threshold is to high. Not cutting list.\")\n for i, item in enumerate(x):\n if item > threshold:\n if keep == \"before\":\n return x[:i], y[:i]\n elif keep == \"after\":\n return x[i:], y[i:]\n elif [i for i in sorted(x, reverse=True)] == [i for i in x]:\n if threshold > x[0]:\n raise ValueError\n print(\"Your threshold is to high. Not cutting list.\")\n if threshold < x[-1]:\n raise ValueError\n print(\"Your threshold is to low. Not cutting list.\")\n for i, item in enumerate(x):\n if item < threshold:\n if keep == \"before\":\n return x[:i], y[:i]\n elif keep == \"after\":\n return x[i:], y[i:]\n else:\n raise ValueError(\n \"Your series x is not sorted. Sort it either ascending or descending.\"\n )",
"def perform_cut(dataset, cut_function):\n data = dataset[:]\n logger.info('Performing cut with function %s', cut_function.__name__)\n cut = cut_function(data)\n logger.info('%d events were cut', np.where(~cut)[0].shape[0])\n return cut",
"def apply_cutoffs(df, cutoffs_dict):\n df_new = df.copy()\n if type(cutoffs_dict) != type(dict()):\n raise ValueError()\n \n for key, value in cutoffs_dict.items():\n df_new = df_new[df_new[key] >= value]\n \n return df_new",
"def filter_data(self, data, conditions):\n for condition in conditions:\n ips_to_remove = set()\n key = condition['key']\n operation = condition['operation']\n value = condition['value']\n \n for ip, fields in data.iteritems():\n # If the field to filter on doesn't exist, remove this ip in the results\n if condition['key'] in fields:\n # If the current ip's fields don't satisfy the condition, remove it from results\n if operation == '=' and not (fields[key] in value):\n ips_to_remove.add(ip)\n elif operation == '>' and not (fields[key] > value):\n ips_to_remove.add(ip)\n elif operation == '<' and not (fields[key] < value):\n ips_to_remove.add(ip)\n else:\n ips_to_remove.add(ip)\n\n # Remove ips after each condition is processed so we don't check them on the next condition\n for ip in ips_to_remove:\n del data[ip]\n \n return data",
"def safe_reslice(original_boolean,original_probability,condition,\n min_points_between,get_best_slice_func):\n new_boolean = original_boolean.copy()\n new_probability = original_probability.copy()\n where_condition = np.where(condition)[0]\n if (where_condition.size > 0):\n new_boolean[where_condition] = 0 \n new_probability[where_condition] = 1\n # get the original and new slices\n mask_original = np.where(original_boolean)[0]\n mask_new = np.where(new_boolean)[0]\n if (mask_new.size == 0 or mask_original.size == 0):\n return new_boolean,new_probability\n # POST: have something to do\n original_events = _event_slices_from_mask(mask_original,\n min_points_between)\n new_events = _event_slices_from_mask(mask_new,min_points_between)\n remove,keep = [],[]\n for e in original_events:\n start,stop = e.start,e.stop\n # determine which new events are within the old events\n candidates = [tmp for tmp in new_events \n if (tmp.start >= start) and (tmp.stop <= stop)\n and (tmp.start < tmp.stop)]\n if (len(candidates) == 0):\n continue\n # determine the best new event within the old event, in the subslice\n # indices\n idx_best = get_best_slice_func(candidates)\n # get the best\n keep.append(candidates[idx_best])\n # remove the events\n remove.extend(candidates[i] for i in range(len(candidates))\n if i != idx_best)\n # anything left over should also be deleted\n remove.extend(new_events)\n # POST: know what to remove and what to keep\n new_boolean = np.zeros(new_boolean.size)\n new_probability = np.ones(new_probability.size)\n for keep_idx in keep:\n new_boolean[keep_idx] = 1\n new_probability[keep_idx] = original_probability[keep_idx]\n # pick out the minimum derivative slice within each previous slice\n return new_boolean,new_probability",
"def cut_head(set_value, set_list, cut_lists):\n first_value = find_nearest_num(set_list, set_value)\n## print('head: set value = ' + str(set_value) + '; found value = ' + str(first_value))\n first_index = set_list.index(first_value)\n for i in range(len(cut_lists)):\n cut_lists[i] = cut_lists[i][first_index -1 :]\n return cut_lists",
"def split_by_dimensions(values, cmp, targets: MaxMinList):\n if values is None or len(values) == 0:\n return []\n if len(values) == 1:\n return [values]\n \n splitted = []\n while True:\n top_group = find_dimension_maxmin_set(values, cmp, targets)\n if len(top_group) > 0:\n splitted.append(top_group)\n for v in top_group:\n values.remove(v)\n continue\n if len(values) > 0:\n splitted.append(values)\n break\n return splitted",
"def slicer(self, condition):\n full_data = self._data.copy()\n series_data = full_data[full_data.columns[0]].copy()\n slicer, _ = get_logic_index(series_data, condition, full_data)\n return slicer",
"def cut_data(table, cut_columns):\n cols = []\n for i in range(len(cut_columns)):\n cols.append(cut_column(table, cut_columns[i]))\n return cols",
"def cutt(self, tlims):\n\toutinst=deepcopy(self)\n\ttidx_min=max(find(self.tof<tlims[0]))\n\ttidx_max=min(find(self.tof>tlims[1]))\n\toutinst.tof=outinst.tof[tidx_min:tidx_max]\n\toutinst.datain=outinst.datain[:,:,tidx_min:(tidx_max-1)]\n\toutinst.d=outinst.d[:,:,tidx_min:tidx_max]\n\treturn outinst",
"def cut_tail(set_value, set_list, cut_lists):\n last_value = find_nearest_num(set_list, set_value)\n## print('tail: set value = ' + str(set_value) + '; found value = ' + str(last_value))\n last_index = set_list.index(last_value)\n for i in range(len(cut_lists)):\n cut_lists[i] = cut_lists[i][:last_index + 1]\n return cut_lists",
"def threshold_data(x, threshold, n_min=1):\n assert n_min > 0, 'invalid minimum segment length [{}]'.format(n_min)\n x = np.asarray(x, dtype=np.float64)\n\n segments, num_segments = futils.threshold_data(x, threshold, n_min)\n slices = [\n slice(seg[0], seg[1])\n for seg in segments[:num_segments]\n ]\n return slices",
"def cut_images(self, cutting_function):\n # Can make this work conditionally based on which images are enabled later\n for d in range(len(self.images)):\n for s in range(len(self.images[d])):\n for f in range(len(self.images[d][s])):\n for i in range(len(self.images[d][s][f])):\n self.images[d][s][f][i].cut_image(cutting_function, (self.x(), self.y(), self.z(), d), self.paksize())",
"def cutKey(targetList, time=(), hierarchy=\"string\", float=(), includeUpperBound=bool, controlPoints=bool, clear=bool, index=int, shape=bool, selectKey=bool, attribute=\"string\", animation=\"string\", option=\"string\"):\n pass",
"def dictCut(dictionary, cut):\n for name, arr in list(dictionary.items()):\n dictionary[name] = arr[cut]",
"def categorize(data_list,element):\r\n\r\n vis_dict = {'0.0':0,'0.3':1,'0.6':2,'1':3,'2':4,'4':3,'6':6}\r\n zr_dict = {'-0.1':0,'1':1,'8':2,'18':3,'28':4,'45':5}\r\n sn_dict = {'-0.1':0,'0.05':1,'0.15':2,'0.45':3,'0.75':4,'0.95':5,'1.35':6,'1.85':7} \r\n wc_dict = {'-20':6,'-10':5,'0':4,'10':3,'20':2,'30':1,}\r\n #sn_dict = {'-0.1':0,'0.05':1,'0.15':2,'0.33':3,'0.75':4,'1.5':5,'2.5':6} \r\n\r\n if element == 'sn':\r\n data_dict = sn_dict\r\n if element == 'zr':\r\n data_dict = zr_dict \r\n if element == 'vis':\r\n data_dict = vis_dict \r\n if element == 'wc':\r\n data_dict = wc_dict\r\n\r\n\r\n category_list = [] \r\n for x in range(0,len(data_list)):\r\n val = data_list[x]\r\n for key in data_dict:\r\n if val > float(key):\r\n x_cat = data_dict[key]\r\n \r\n category_list.append(x_cat)\r\n\r\n return category_list",
"def crop_by_column(self, key, _min, _max):\n column = self.columns[key]\n start = None\n end = None\n # Find the crop indices:\n for i in range(len(column.data)):\n value = column.data[i]\n if value >= _min and start == None:\n start = i \n if value >= _max and end == None:\n end = i\n\n # Default case:\n if start == None:\n start = 0\n if end == None:\n end = len(self.columns[key].data) - 1\n self.crop(start, end + 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Log the given message. There are (currently) 3 different log_type, they are all given in the View Constants. There is the LOG_STATUSBAR which will print the message in the status bar in the view, the LOG_CONSOLE will log in the internal console and the LOG_DEBUG will also log in the console but this will be only displayed if the program runs in debug mode
|
def log(self, message, log_type = Constants.LOG_CONSOLE):
self.view.log(message, log_type)
|
[
"def log_message(self) -> global___LogMessage:",
"def log(self, msg=\"\", level=1):\n\n if self.log_level >= level:\n print(\"[%s] %s\" % (time.strftime(\"%I:%M.%S\"), msg))",
"def _log(type: str, message: str, *args: t.Any, **kwargs: t.Any) -> None:\n global _logger\n\n if _logger is None:\n _logger = logging.getLogger(\"werkzeug\")\n\n if _logger.level == logging.NOTSET:\n _logger.setLevel(logging.INFO)\n\n if not _has_level_handler(_logger):\n _logger.addHandler(_ColorStreamHandler())\n\n getattr(_logger, type)(message.rstrip(), *args, **kwargs)",
"def log(self, msg):\n self.logger.write(msg)",
"def log(msg, level='INFO'):\n if level not in ['DEBUG', 'INFO', 'CRITICAL', 'WARNING']:\n level = 'INFO'\n loglevel = getattr(QgsMessageLog, level)\n QgsMessageLog.logMessage(msg, level=loglevel)",
"def log(self, msg):\n self.logs.append(str(msg))",
"def log_message(function=None, name=None, message_type=None):\n message = None\n if message_type in ['calling', 'starting', 'finishing']:\n message = '{} the {} {}'.format(message_type.capitalize(), function, name)\n elif message_type == 'completed':\n message = 'The {} {} completed successfully'.format(name, function)\n return message",
"def _log(self, logstring, **kwargs):\n if self.logprint:\n print(logstring, **kwargs)",
"def log(msg,level=1):\n _level = None\n try:\n _level = BuiltIn().get_variable_value('${DEBUG}')\n except:\n pass\n if _level is None: _level=1\n if int(_level) >= int(level):\n BuiltIn().log(msg)",
"def logg(msg):\n logging_log(LOGGING_LEVELS['NORMAL']['level'], msg)",
"def log(self, message, level, functionName=None):\n\n if self.logLevel >= level:\n if functionName is not None:\n self.file.write(functionName + \": \" + message + \"\\n\")\n else:\n self.file.write(message + \"\\n\")\n self.file.flush()",
"def log(self, message, category=\"misc\", data=None):\n self._dirty = True\n entry = {\"message\": message, \"timestamp\": time.time(), \"category\": category}\n if data is not None:\n entry[\"data\"] = data\n\n # write to log file\n json.appendline(entry, self.filename)\n # write to stdout\n msg = \"{INTENSE_CYAN}\" + category + \"{PURPLE}:\"\n msg += \"{INTENSE_WHITE}\" + message + \"{NO_COLOR}\"\n print_color(msg)",
"def log(text):\n if LOG:\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n print(f\"[{current_time}] {text}\")",
"def access_log(request, msg=None, level=None):\n if level is None:\n level = logging.INFO\n ai = AccessInfo(request)\n ai.log(level, msg)",
"def log_alert(cls, msg: str, print_on_screen: bool = True):\n\n yellow, reverse, default = cls.ANSI.get('yellow'), cls.ANSI.get('reversed'), cls.ANSI.get('default')\n msg = f\"{yellow}{reverse}{cls.__get_now('%H:%M:%S')}{default}{yellow} {msg}{default}\"\n cls.log(msg, print_on_screen)",
"def debug(self,msg):\n self.logger.debug(msg)",
"def debug_print(message):\n if debug_logging:\n print(message)",
"def _syslog(level, message):\n message = str(message)\n print message\n syslog.syslog(level, LOG_PREFIX + message)",
"def debug(msg):\n if DEBUG:\n log.debug(msg)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Pause or unpause the displaying of errors. The errors will be added to the internal collection and they will be displayed when the error displaying is unpaused again. Use with caution! This may hide errors so the user does not know what is going on!
|
def pauseErrorDisplay(self, pause):
b = self._block_errors
self._block_errors = (pause == True)
if self._block_errors == False and b != self._block_errors:
for error in self._error_collection:
self.error(error[0], error[1], error[2])
self._error_collection = []
|
[
"def clear_errors(self):\n self.epicsLive.clear_errors()",
"def showErrors(self):\n self.log.error('There were {0} errors encountered while executing all operations:'.format(len(self.error_list)))\n for i, error in enumerate(self.error_list):\n self.log.error('[{0}] {1}'.format(i, error))",
"def __showError(self, out):\n self.errorGroup.show()\n self.errors.insertPlainText(out)\n self.errors.ensureCursorVisible()\n \n if not self.__hgClient:\n # show input in case the process asked for some input\n self.inputGroup.setEnabled(True)\n self.inputGroup.show()",
"def set_errors(self, errors):\n self.errors = errors\n self.fill()",
"def _advance(self):\r\n # Disable interaction if no checker\r\n if self._checker is None:\r\n self._disableButtons()\r\n self.emit('check-done')\r\n return\r\n\r\n # Advance to next error, disable if not available\r\n try:\r\n self._checker.next()\r\n except StopIteration:\r\n self._disableButtons()\r\n self.error_text.set_text(\"\")\r\n self._fillSuggestionList([])\r\n self.replace_text.set_text(\"\")\r\n return\r\n self._enableButtons()\r\n \r\n # Display error context with erroneous word in red\r\n self.error_text.set_text('')\r\n iter = self.error_text.get_iter_at_offset(0)\r\n append = self.error_text.insert_with_tags_by_name\r\n \r\n\r\n lContext = self._checker.leading_context(self._numContext)\r\n tContext = self._checker.trailing_context(self._numContext)\r\n append(iter, lContext, 'fg_black')\r\n append(iter, self._checker.word, 'fg_red')\r\n append(iter, tContext, 'fg_black')\r\n\r\n # Display suggestions in the replacements list\r\n suggs = self._checker.suggest()\r\n self._fillSuggestionList(suggs)\r\n if suggs: self.replace_text.set_text(suggs[0])\r\n else: self.replace_text.set_text(\"\")",
"def _encountered_error(self, event):\n self._log.error(vlc.libvlc_errmsg())\n self.next()",
"def pop_error(self, error):\n messagebox.showerror('Error', error)",
"def print_error(self):\n print('\\n'.join(self.error_buffer))",
"def error(self, message):\n global ERRORS\n ERRORS.append(message)",
"def Advance(self):\r\n # Disable interaction if no checker\r\n if self._checker is None:\r\n self.EnableButtons(False)\r\n return False\r\n # Advance to next error, disable if not available\r\n try:\r\n self._checker.next()\r\n except StopIteration:\r\n self.EnableButtons(False)\r\n self.error_text.SetValue(\"\")\r\n self.replace_list.Clear()\r\n self.replace_text.SetValue(\"\")\r\n if self.IsModal(): # test needed for SetSpellChecker call\r\n # auto-exit when checking complete\r\n self.EndModal(wx.ID_OK)\r\n return False\r\n self.EnableButtons()\r\n # Display error context with erroneous word in red.\r\n # Restoring default style was misbehaving under win32, so\r\n # I am forcing the rest of the text to be black.\r\n self.error_text.SetValue(\"\")\r\n self.error_text.SetDefaultStyle(wx.TextAttr(wx.BLACK))\r\n lContext = self._checker.leading_context(self._numContext)\r\n self.error_text.AppendText(lContext)\r\n self.error_text.SetDefaultStyle(wx.TextAttr(wx.RED))\r\n self.error_text.AppendText(self._checker.word)\r\n self.error_text.SetDefaultStyle(wx.TextAttr(wx.BLACK))\r\n tContext = self._checker.trailing_context(self._numContext)\r\n self.error_text.AppendText(tContext)\r\n # Display suggestions in the replacements list\r\n suggs = self._checker.suggest()\r\n self.replace_list.Set(suggs)\r\n self.replace_text.SetValue(suggs and suggs[0] or '')\r\n return True",
"def failed_to_pause(self, failed_to_pause):\n\n self._failed_to_pause = failed_to_pause",
"def flash_form_errors(form):\n for field, errors in form.errors.items():\n for error in errors:\n flash('{0}: {1}'.format(field, error))",
"def fill(self):\n self._store.clear()\n warn = \"\\u26A0\"\n if sys.version_info.major < 3:\n warn = warn.decode('unicode-escape')\n\n elements = [(err.path, j, err.msg, err.is_error)\n for j, err in enumerate(self.errors)]\n elements.sort()\n\n for (path, idx, msg, is_error) in elements:\n if not is_error:\n path = \"<span foreground='darkgrey'>%s</span>\" % path\n\n msg = \"<span foreground='%s'>%s</span> \" % \\\n (\"red\" if is_error else \"orange\", warn) + msg\n self._store.append((path, idx, msg))",
"def error_explorer(errors):\n rows = [str([str(i) + \" : \" + str(e[i]) for i in e]) for e in errors]\n choice = True\n while choice is not None:\n choice = easygui.choicebox(choices=rows)\n if choice is None:\n pass\n else:\n err = errors[rows.index(choice)]\n msg = \"\"\n for k in err:\n msg += \"\\n \" + k + \":\\n\" + str(err[k]) + \"\\n\"\n easygui.msgbox(msg)",
"def printErrors(self):\n for filename in sorted(self.texterrors):\n fileerrors = self.texterrors[filename]\n print(\n \"\\n\",\n 70 * \"=\",\n \"\\n%s, %i possible errors found.\" % (filename, len(fileerrors)),\n \"Suppressing %i error codes: %s\"\n % (len(self.ignorecodes), \",\".join(self.ignorecodes)),\n \"\\n\",\n 70 * \"=\",\n )\n # print(fileerrors)\n for e in fileerrors:\n if e.name not in self.ignorecodes:\n print(\" \", e.name, e)\n for filename in self.imgerrors:\n fileerrors = self.imgerrors[filename]\n for e in fileerrors:\n print(filename)\n print(\" \", e)",
"def _disable_show_errors(self, val):\r\n if val == Qt.Unchecked:\r\n self._showErrorsOnLine.setChecked(False)",
"def display(error):\n\tif error is not None:\n\t\tflash(\"Error: \" + error)",
"def hide_output_error(self):\n\t\tself.output_message_label['text'] = ''",
"def add_error(self, result=None, hidden=False):\n if not hidden:\n self.scores['errors'] += 1\n if result is not None:\n self.error_results.append(result)\n else:\n self.scores['hidden_errors'] += 1\n if result is not None:\n self.hidden_error_results.append(result)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Raise an error message with the given error_type. The types are defined in the Constants. The error_details will be added to the error dialog, they can contain further information
|
def error(self, message, error_type = Constants.NOTICE, error_details = None):
if self._block_errors:
self._error_collection.append((message, error_type, error_details, time.time()))
return False
error_string = "An Error"
error_details_string = ""
# parse the error type to a string for logging
if error_type == Constants.NOTICE:
error_string = "A Notice"
elif error_type == Constants.FATAL:
error_string = "A Fatal Error"
# parse the details to a string
if isinstance(error_details, str):
error_details_string = error_details
elif my_utilities.is_iterable(error_details):
for key, value in enumerate(error_details):
error_details_string += value + "\n"
self.log("<b style='color: red;'>{0} occurred: {1}{2}</b>".format(error_string, str(message),
error_details_string))
if self._block_errors != True and (error_type == Constants.FATAL or error_type == Constants.NOTICE_ERROR):
self.view.showErrorDialog(message, error_details_string, error_type)
return True
|
[
"def error_type(self, error_type):\n allowed_values = [\"N/A\", \"QuerySyntaxError\", \"QueryExecutionError\", \"Timeout\"] # noqa: E501\n if (self._configuration.client_side_validation and\n error_type not in allowed_values):\n raise ValueError(\n \"Invalid value for `error_type` ({0}), must be one of {1}\" # noqa: E501\n .format(error_type, allowed_values)\n )\n\n self._error_type = error_type",
"def error(self, text):\n self.message('ERROR', text, color='red')",
"def display_error(title, error_message):\n messagebox.showerror(title=title, message=error_message)",
"def addError(self, status, errType):\n newError = {\"ExitStatus\" : status,\n \"Type\" : errType,\n \"Description\": \"\"}\n self.errors.append(newError)\n return newError",
"def subroutine_error_popup(subroutine, error, message):\n # ===== Error Window Definition ===== #\n error_layout = [\n [sg.Text(an.ERROR_INTRO.format(subroutine))],\n [sg.Text(message)],\n [sg.Text(an.ERROR_OUTRO, text_color='red')],\n [sg.Text('', font='default 10 italic', key='-COPY_DONE-', size=(40, 1))],\n [sg.Button('Copy to clipboard', key='-CLIPBOARD-'),\n sg.Button('Send as e-mail', key='-EMAIL-', bind_return_key=True, focus=True),\n sg.Exit('Close', key='close')]\n ]\n error_window = sg.Window('Subroutine Error', error_layout, modal=True)\n\n # ===== Handle Button Actions ===== #\n while True:\n event_error, values_error = error_window.Read()\n if event_error in [sg.WIN_CLOSED, 'close']:\n error_window.close()\n break\n\n # Copy trace to clipboard\n elif event_error == '-CLIPBOARD-':\n sg.clipboard_set('Version: {}\\n\\n{}\\n\\n{}'.format(__version__, str(error), message))\n error_window['-COPY_DONE-']('(Error message copied to clipboard.)')\n\n # Compose bug report as e-mail\n elif event_error == '-EMAIL-':\n query = {'subject': f'Error in {subroutine}',\n 'body': 'Version:\\n{}\\n\\nMessage:\\n{}\\n\\nComment:\\n'.format(__version__, message)}\n web_open(f'mailto:{__email__}?{urlencode(query, quote_via=quote)}', new=1)",
"def ShowEnvironmentError(msg, inst):\n\n details = {'msg':msg, 'type':inst.__class__.__name__}\n\n if inst.filename:\n details['filename'] = _('Filename: %s') % inst.filename\n\n if inst.errno is not None and inst.strerror is not None:\n details['error'] = '%s - %s' % (inst.errno, inst.strerror)\n else:\n details['error'] = str(inst.args)\n\n text = _(\"\"\"%(msg)s\n\nError type: %(type)s\nError code: %(error)s\n%(filename)s\"\"\") % details\n\n wx.MessageBox(text, _('Error'), wx.OK | wx.CENTRE | wx.ICON_ERROR)",
"def pop_error(self, error):\n messagebox.showerror('Error', error)",
"def showError(errormessage):\r\n messagebox.showerror(\"WinRAT\", errormessage)",
"def json_form_error_context(self, msg, type='internal'):\n # FIXME: check if type is valid\n self.status_code = 500\n return {'error': [[type, [msg]]]}",
"def usertype_error(l):\n simulate_loading_login_page(l)\n user_demographics = generate_user()\n user_demographics[\"usertype\"] = \"invalid_user_type\"\n register_and_expect_failure(l, user_demographics)\n l.interrupt()",
"def error_msg(self, value):\n self._error_msg = value",
"def GetRaiseTypeError(self):\n raise TypeError('RaiseTypeError Parameter')",
"def error(self, message):\n pass",
"def __show_error_dialog(self):\n dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_ERROR,\n gtk.BUTTONS_NONE, None)\n dialog.format_secondary_text(constants.MESSAGE_0012)\n dialog.set_markup(constants.MESSAGE_0011)\n dialog.add_button(gtk.STOCK_OK, 1)\n dialog.run()\n dialog.destroy()",
"def error(self, msg, raw=False):\n self.lastError = str(msg)\n self._msg(('' if raw else 'ERROR: ') + str(msg), self.ERROR, raw, self.RED)",
"def popup_error_message(caption, message):\n\n msg = wx.MessageDialog(None, message, caption, style=wx.ICON_ERROR|wx.OK)\n msg.ShowModal()\n msg.Destroy()",
"def error(msg):\n message(msg, flag='e')",
"def error(self):\n self.set_dayu_type(MAlert.ErrorType)\n return self",
"def display_error_message(title, message):\n msg = wx.MessageDialog(None, message, title, wx.OK | wx.ICON_WARNING)\n msg.ShowModal()\n msg.Destroy()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Carga los datos de las habitaciones en el listView
|
def listadohab(listhabitaciones):
try:
variables.listado = listar()
listhabitaciones.clear()
for registro in variables.listado:
listhabitaciones.append(registro[0:3])
except sqlite3.OperationalError as e:
print(e)
conexion.conex.rollback()
|
[
"def datalist(self,model):\n calender = {1:'Jan',2:'Feb',3:'Mar',4:'Apr',5:'May',6:'Jun',7:'Jul',8:'Aug',9:'Sep',10:'Oct',11:'Nov',12:'Dec'}\n data = model.objects.filter(owner=self.request.user).values().order_by('date')\n a= 0\n d= []\n \n for i in data:\n g={}\n for j in i:\n if j=='date':\n \n d_obj = i[j]\n d_day = d_obj.day\n d_month = calender[d_obj.month]\n d_year = d_obj.year\n g['label'] =str(d_month)+' '+str(d_day)+', '+str(d_year)\n \n elif j=='weight':\n w_obj = i[j]\n g['y'] = w_obj\n \n d.append(g)\n\n \n \n return d",
"def list_por_dato_recibido(dato):\n datos = ['################ RECIBOS ###############']\n bucle = 1\n model = Model()\n recibos = []\n recibos = model.obtener_objetos(Recibo)\n \n for rec in recibos:\n if rec.tecnico == dato:\n datos.append(\"{}- Fecha: {}\".format(bucle, rec.fecha))\n datos.append(\" Codigo : {}\".format(recibos.index(rec)))\n datos.append(\" Cliente: {}\".format(rec.cliente))\n datos.append(\" Tecnico: {}\".format(rec.tecnico))\n datos.append(\" Presupuesto: {}\".format(rec.presupuesto))\n datos.append(\" Validez: {} Dias\".format(rec.validez))\n datos.append(\" Valido: {}\".format(rec.calcular_validez()))\n datos.append(\" Observacion: \")\n datos.append(\" ----- : {}\".format(rec.observacion))\n datos.append(\" Estado: {}\".format(rec.estado))\n datos.append(\"\")\n datos.append(\"\")\n bucle += 1 \n list_datos(datos)",
"def visits_from_database(self):\n self.visit_table_data = self.VisitsCards.setup(self.disp_model[\"pid\"], self.sql)\n # Upload the data to the table\n self.visit_table.setRowCount(len(self.visit_table_data))\n # seems like we need to fill each item individually\n # loop across rows (each result) and then into columns (each value)\n for row_i, row in enumerate(self.visit_table_data):\n for col_i, value in enumerate(row):\n item = QtWidgets.QTableWidgetItem(str(value))\n self.visit_table.setItem(row_i, col_i, item)",
"def viewData(self):\n keys = ('Title', 'Year', 'imdbRating', 'Runtime', 'Plot', 'Genre', 'Poster', 'Director', 'Actors', 'Awards')\n\n # Search for user selection in database and API\n try:\n movie_title = self.L.get(self.L.curselection())\n self.cur.execute(\"SELECT title, year, rating, runtime, plot, genre_id, posterLink, director, actor, award FROM Movies WHERE Title = ?\", (movie_title,))\n movie_values = self.cur.fetchone()\n\n # check if selection is in the local database\n if movie_values is not None:\n movie = dict(zip(keys, tuple(movie_values)))\n movie['Runtime'] = str(movie['Runtime'])\n\n # fetch all genres from the db\n genres = []\n for genre_id in [int(x) for x in movie['Genre'].split(',')]:\n self.cur.execute('''SELECT genre FROM GENRES WHERE id = ?''', (genre_id,))\n genres.append(self.cur.fetchone()[0])\n movie['Genre'] = ', '.join(genres)\n\n # fetch data from API if not in database\n else:\n movie = requests.get(FETCH_FROM_URL.replace('<imdb id>', self.fetched_movies[movie_title])).json()\n movie = {key: movie[key] for key in keys}\n MovieDataWin(self, movie)\n except tk.TclError:\n print(\"Nothing was selected\")",
"def list_recibos():\n datos = ['################ RECIBOS ###############']\n bucle = 1\n model = Model()\n recibos = []\n recibos = model.obtener_objetos(Recibo)\n \n for rec in recibos:\n datos.append(\"{}- Fecha: {}\".format(bucle, rec.fecha))\n datos.append(\" Codigo : {}\".format(recibos.index(rec)))\n datos.append(\" Cliente: {}\".format(rec.cliente))\n datos.append(\" Tecnico: {}\".format(rec.tecnico))\n datos.append(\" Presupuesto: {}\".format(rec.presupuesto))\n datos.append(\" Validez: {} Dias\".format(rec.validez))\n datos.append(\" Valido: {}\".format(rec.calcular_validez()))\n datos.append(\" Observacion: \")\n datos.append(\" ----- : {}\".format(rec.observacion))\n datos.append(\" Estado: {}\".format(rec.estado))\n datos.append(\"\")\n datos.append(\"\")\n bucle += 1 \n list_datos(datos)",
"def perdidos_mapa(request):\n datos_adopciones=Datos_extravio.objects.all()\n lista=[ ]\n cantidad_encontrados= len(Datos_extravio.objects.filter(encontrado=True))\n for item in datos_adopciones:\n cada_dato={\"id\":item.identificativo ,\"fecha\": str(item.fecha), \"nombre\": item.nombre_animal,\n \"sexo\": item.sexo, \"animal\": item.animal,\"descripcion\": item.descripcion,\n \"link\":item.imagen.url ,\"extraviado\":str(item.encontrado),\n \"telefono\":item.numero_telefono_persona,\"ip\":item.ip_dispositivo, \n \"ubicacion\": {\"latitud\": item.latitud_perdido,\n \"longitud\": item.longitud_perdido\n }}\n lista.append(cada_dato) # agregamos a la lista\n data = {\"data\": lista,\"cant_encontrados\":cantidad_encontrados} # al final enviamos esto \n return render(request, \"perdidos.html\", data)",
"def CargarCMBComida(CMBComidas):\n i = 0\n cur.execute(\"SELECT * FROM Servicio\")\n row = cur.fetchone()\n list = Gtk.ListStore(str)\n list.append([row[1]])\n all_rows = cur.fetchall()\n for row in all_rows:\n i = i + 1\n list.append([row[1]])\n\n for name in list:\n CMBComidas.append_text(name[0])\n\n conex.commit()",
"def list_compras(update, context):\n\n\t# Auxiliary variables\n\ttoday = datetime.today().strftime(\"%d/%m/%Y\")\n\tfilename = \"data/compras_\"+datetime.today().strftime(\"%Y-%m\")+\".csv\"\n\n\t# List the purchases and prints the total expenses\n\ttry:\t# If the file exists\n\t\t# Loads the dataframe and calculates the category-based expenses\n\t\tcompras_df = pd.read_csv(filename)\n\t\tcosts_df = compras_df.groupby([\"type\"]).sum().reset_index()\n\n\t\t# Rendes and save the dataframe visualization\n\t\trender_table(compras_df)\n\t\tplt.savefig('tmp/compras.png', transparent=True, bbox_inches='tight')\n\n\t\t# Creates the response messages\n\t\tresponse_message1 = (\"*These are your shoppings until today (\"+today+\")*\\n\\n\")\n\t\tresponse_message2 = (\"*= Total Expenses =*\\n\" +\n\t\t\t\t\t\t\t \"\\n\".join([\"*{0}*: R${1:.2f}\".format(r['type'], r['cost']) for i,r in costs_df.iterrows()]) +\n\t\t\t\t\t\t\t \"\\n\\n*Total*: R$ {0:.2f}\".format(costs_df.cost.sum()))\n\n\t\t# Sends the message to the chat\n\t\tcontext.bot.send_message(chat_id=update.effective_chat.id,\n\t\t\t\t\t\t\t\t\ttext=markdownfy(response_message1), parse_mode=telegram.ParseMode.MARKDOWN_V2)\n\n\t\tcontext.bot.sendPhoto(chat_id=update.effective_chat.id, photo=open(\"tmp/compras.png\", 'rb'))\n\n\t\tcontext.bot.send_message(chat_id=update.effective_chat.id,\n\t\t\t\t\t\t\t\t\ttext=markdownfy(response_message2), parse_mode=telegram.ParseMode.MARKDOWN_V2)\n\n\texcept FileNotFoundError: # If the file does not exist\n\t\t# Sends the message to the chat\n\t\tcontext.bot.send_message(chat_id=update.effective_chat.id, text=\"There are still no shoppings on this month :3\")",
"def show_all_tournaments(self):\n all_tournaments = tournaments.find_all()\n list_tournaments = [(\"Nom\", \"Lieu\", \"Date\", \"Règle de jeu\", \"Description\")]\n for elt in all_tournaments:\n list_tournaments.append(\n [elt.name,\n elt.location,\n elt.date,\n elt.time_rule.name,\n elt.description])\n table_instance = AsciiTable(list_tournaments, \"Liste des tournois\")\n print(table_instance.table)",
"def mostrar_resultados(self,):\n self.items_entries[0].config(state=NORMAL)\n self.borrar_entrys()\n var_spinbox_valor = int(self.resultados_spinbox.get())-1\n print(\"el valor del indice spinbox es:\"+str(var_spinbox_valor))\n for x in range(0, cant_entries):\n self.items_entries[x].insert(\n 0, self.socio.var_resultado[var_spinbox_valor][x])\n self.items_entries[x].config()\n self.items_entries[0].config(state=DISABLED)",
"def show_agenda_items():",
"def news(dt):\n res = []\n print(\"News function\")\n for data in dt:\n res.append(News(data[1],data[0],data[2],data[3]))\n return res",
"def get_ocupadas():\n\n response.content_type = \"application/json\"\n return habitaciones_ocupadas(True)",
"def __select_list(self, widget, valor):\n\n self.actualizando = True\n\n self.panel.set_sensitive(False)\n\n for child in self.videos.get_children():\n self.videos.remove(child)\n child.destroy()\n\n new_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n new_box.show_all()\n self.videos.pack_start(\n new_box,\n True, True, 0)\n\n from Globales import get_data_directory\n import shelve\n\n dict_tube = shelve.open(\n os.path.join(get_data_directory(),\n \"List.tube\"))\n\n videos = []\n for item in dict_tube[valor].keys():\n videos.append(dict_tube[valor][item])\n\n dict_tube.close()\n\n GLib.idle_add(self.__add_videos, videos)",
"def build(self):\n # Columna[Y][X] construimos el tablero\n columnas=[]\n y=len(self.renglonesHoja)\n x=len(self.indices)\n for y0 in range(0,y):\n columnas.append([])\n for x0 in range(0,x):\n columnas[y0].append(0)\n self.columnas=columnas.copy()\n print(self.columnas)",
"def load_values(self):\n self.table.clearContents()\n offers = load_redis_values()\n self.table.setRowCount(len(offers))\n for index, offer in enumerate(offers):\n self.table.setItem(index, 0, qt.QTableWidgetItem(offer.id))\n self.table.setItem(index, 1, qt.QTableWidgetItem(offer.categoryId))\n self.table.setItem(index, 2, qt.QTableWidgetItem(offer.name))\n self.table.setItem(index, 3, qt.QTableWidgetItem('{} {}'.format(offer.price, offer.currencyId)))",
"def listaFasesDeProyecto(self, proyecto):\n thead = self.estiloHoja['Heading5']\n thead.alignment=TA_CENTER\n tbody = self.estiloHoja[\"BodyText\"]\n tbody.alignment=TA_LEFT\n cabecera = [Paragraph('Nombre',thead),Paragraph('Orden',thead),Paragraph('Estado',thead),Paragraph('Tipo de Item',thead)]\n contenido = [cabecera]\n lista = MgrProyecto().fasesDeProyecto(proyecto.nombre)\n tabla = Table(contenido)\n for f in lista:\n tipoDeItem = MgrTipoDeItem().filtrarXId(f.tipoDeItemId)\n contenido.append([Paragraph(f.nombre,tbody), Paragraph(str(f.orden),tbody), Paragraph(f.estado,tbody), Paragraph(tipoDeItem.nombre,tbody)])\n tabla = Table(contenido) \n tabla.setStyle(self.tablaStyle)\n return tabla",
"def juego(): \n tableros(tablero)\n ubicar_todo()\n #print(lista_final)\n tiros()",
"def para_consulta(self, ):\n var_categoria = self.consulta_lista.curselection()\n if len(var_categoria) == 0:\n self.var_texto_consulta.set(placeholder_consulta_label[4])\n self.borrar_entrys()\n self.items_botones[2].config(state=DISABLED) # boton modificar\n self.items_botones[3].config(state=DISABLED) # boton borrar\n self.resultados_spinbox.config(state=DISABLED)\n else:\n var_sql_columna = string_busqueda_sql[var_categoria[0]]\n print(\"La categoria a buscar es: \" +\n str(var_categoria[0]) + \" y tu busqueda es: \" + self.consulta_entry.get())\n var_consulta = self.consulta_entry.get()\n try:\n self.socio.db_conectada()\n self.socio.consultar_datos(var_sql_columna, var_consulta)\n if len(self.socio.var_resultado) >= 1:\n self.var_texto_consulta.set(\n \"\\n\\nHay \"+str(len(self.socio.var_resultado))+\" coincidencias!\\n\\n\")\n self.consulta_texto.grid(\n row=7, column=3, columnspan=2, sticky=\"S\")\n for x in self.socio.var_resultado:\n print(x)\n self.activar_resultados()\n else:\n self.var_texto_consulta.set(\n placeholder_consulta_label[3]\n )\n self.consulta_texto.grid(\n row=7, column=3, columnspan=2, sticky=\"S\")\n except:\n messagebox.showerror(title=\"Consulta de Datos\",\n message=\"Error en la Consulta\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
busca los turnos para una fecha
|
def turno_centro_fecha(self, centro_id, fecha):
return Turnos.query.filter(
and_(Turnos.dia == fecha, Turnos.centro_id == centro_id)
).all()
|
[
"def rangoFechas():\n anio = int(strftime(\"%Y\", gmtime()))\n mes = int(strftime(\"%m\", gmtime()))\n l = []\n for x in [0]:\n \n diff = mes - x\n if diff <= 0:\n l.append([anio - 1, 12+ diff])\n else:\n l.append([anio, diff])\n return l",
"def getFechaTurno(self, formato=0):\n if self.fechaturno!=None:\n if formato==0:\n return self.fechaturno\n elif formato==1:\n return mysql2dmy(str(self.fechaturno).split(\" \")[0])\n elif formato==2:\n return self.fechaturno.strftime(\"%H:%M\")\n elif formato==3:\n print \"Aun no implementado, devolviendo el horario comun\"\n return str(self.fechaturno).split(\" \")[1].rstrip()\n elif formato==4:\n print \"Devolviendo en formato redondeado de hora\"\n return (self.fechaotorg).strftime(\"%H\")+\":00\"\n \n else:\n return self.fechaturno",
"def _make_dates(self):\n format_date = lambda x: datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S.%fZ')\n split_date = lambda x: {'year': x.year, 'month': x.month, 'day': x.day, 'hour': x.hour, 'minute': x.minute, 'second': x.second}\n date_df = pd.DataFrame(list(map(lambda x: split_date(format_date(x)), self.games['added'])))\n self.games = self.games.join(date_df, how='outer')",
"async def hanukkah_festival(self, ctx: commands.Context) -> None:\n hanukkah_dates = await self.get_hanukkah_dates()\n self.hanukkah_dates_split(hanukkah_dates)\n hanukkah_start_day = int(self.hanukkah_days[0])\n hanukkah_start_month = int(self.hanukkah_months[0])\n hanukkah_start_year = int(self.hanukkah_years[0])\n hanukkah_end_day = int(self.hanukkah_days[8])\n hanukkah_end_month = int(self.hanukkah_months[8])\n hanukkah_end_year = int(self.hanukkah_years[8])\n\n hanukkah_start = datetime.date(hanukkah_start_year, hanukkah_start_month, hanukkah_start_day)\n hanukkah_end = datetime.date(hanukkah_end_year, hanukkah_end_month, hanukkah_end_day)\n today = datetime.date.today()\n # today = datetime.date(2019, 12, 24) (for testing)\n day = str(today.day)\n month = str(today.month)\n year = str(today.year)\n embed = Embed()\n embed.title = 'Hanukkah'\n embed.colour = Colours.blue\n if day in self.hanukkah_days and month in self.hanukkah_months and year in self.hanukkah_years:\n if int(day) == hanukkah_start_day:\n now = datetime.datetime.utcnow()\n now = str(now)\n hours = int(now[11:13]) + 4 # using only hours\n hanukkah_start_hour = 18\n if hours < hanukkah_start_hour:\n embed.description = (f\"Hanukkah hasnt started yet, \"\n f\"it will start in about {hanukkah_start_hour-hours} hour/s.\")\n return await ctx.send(embed=embed)\n elif hours > hanukkah_start_hour:\n embed.description = (f'It is the starting day of Hanukkah ! '\n f'Its been {hours-hanukkah_start_hour} hours hanukkah started !')\n return await ctx.send(embed=embed)\n festival_day = self.hanukkah_days.index(day)\n number_suffixes = ['st', 'nd', 'rd', 'th']\n suffix = ''\n if int(festival_day) == 1:\n suffix = number_suffixes[0]\n if int(festival_day) == 2:\n suffix = number_suffixes[1]\n if int(festival_day) == 3:\n suffix = number_suffixes[2]\n if int(festival_day) > 3:\n suffix = number_suffixes[3]\n message = ''\n for _ in range(1, festival_day + 1):\n message += ':menorah:'\n embed.description = f'It is the {festival_day}{suffix} day of Hanukkah ! \\n {message}'\n await ctx.send(embed=embed)\n else:\n if today < hanukkah_start:\n festival_starting_month = hanukkah_start.strftime('%B')\n embed.description = (f\"Hanukkah has not started yet. \"\n f\"Hanukkah will start at sundown on {hanukkah_start_day}th \"\n f\"of {festival_starting_month}.\")\n else:\n festival_end_month = hanukkah_end.strftime('%B')\n embed.description = (f\"Looks like you missed Hanukkah !\"\n f\"Hanukkah ended on {hanukkah_end_day}th of {festival_end_month}.\")\n\n await ctx.send(embed=embed)",
"def fechasSemana(hoy= date.today()):\n\t#hoy = date.today()\n\ta_viernes = hoy.weekday() - 4\n\tif hoy.weekday() < 4 :a_viernes += 7\n\t\n\tviernes = hoy - timedelta(days=a_viernes) #El viernes pasado\n\tdias = [\n\t\t\tviernes + timedelta(days=i) for i in range(0,7) \n\t\t\t]\n\treturn dias",
"def pedidosPorPeriodo(self,fechaIni,fechaFin):",
"def abnt2_fecha_chave():\n\tif press_key(ABNT2_FECHA_COLCHETE) and press_shift():\n\t\treturn True",
"def armarDic_toRealDate(df):\n\n\t#fechaInicioReal = datetime.strptime(df[\"realtime_start\"][0], '%Y-%m-%d')\n\n\tfechaInicioReal = df[\"realtime_start\"][0]\n\n\ttenemosDiferencia = False #Variable boolean para saber si ya calcule la diferencia (quiero hacerla para el primer caso que es cuando estoy en los datos que se actualizan normalmente, no los cargado al arranque)\n\tdiferenciaDias = None # Aca voy a tener la diferencia de dias entre el dato y la carga\n\t################\n\t#date_end = datetime.strptime(date_end, '%Y-%m-%d')#Cambio el tipo de dato de la fecha. De string a datetime\n\n\t\n\t############################\n\n\tdic_date = {} # Voy a guardar para cada date todos los values que tiene y su real date asociada.\n\t#El formato va a ser diccionario con key date y value arreglo de [realtime, value]. Osea me quedan\n\t#Para date todos sus values posibles\n\tfor i in range(0,len(df)): \n\t\t\n\t\t#d = datetime.strptime(df[\"date\"][i], '%Y-%m-%d') \n\t\t\n\t\td = df[\"date\"][i]\n\t\t\n\n\t\tif d in dic_date:\n\n\t\t\t#dic_date[d].append([datetime.strptime(df[\"realtime_start\"][i], '%Y-%m-%d') ,df[\"value\"][i]])\n\t\t\tdic_date[d].append([df[\"realtime_start\"][i] ,df[\"value\"][i]])\n\t\t\n\t\telif d not in dic_date:\t\n\n\t\t\tdic_date[d] = []\n\t\t\t\n\t\t\t#dic_date[d].append([datetime.strptime(df[\"realtime_start\"][i], '%Y-%m-%d') ,df[\"value\"][i]])\n\t\t\tdic_date[d].append([df[\"realtime_start\"][i] ,df[\"value\"][i]])\n\t\t\n\t\telse:\n\t\t\n\t\t\traise Exception(\"Error toRealDate.py armando diccionario de fechas. El date no esta y tampoco esta.\")\n\n\t\tif fechaInicioReal < df[\"date\"][i] and not tenemosDiferencia: # Cuando llego al caso de que son datos cargados en vivo y no los que se cargaron todos juntos al inicio calculo la diferencia\n\t\t\t\n\t\t\ttenemosDiferencia = True\n\t\t\n\t\t\tdiferenciaDias = (df[\"realtime_start\"][i] -df[\"date\"][i]).days\n\t\t\t\n\n\n\n\tfor date in dic_date: # Para los datos cargados todos juntos les armo un realtime estimado fake de diferenciaDias mas entre su date\n\t\t\n\t\tif date <\tfechaInicioReal:\n\n\t\t\tvalue = dic_date[date][0][1]\n\n\t\t\treal_estimado = date + timedelta(days=diferenciaDias)\n\t\t\t\n\t\t\tdic_date[date].append([real_estimado ,value])\t\t\n\n\treturn dic_date",
"def make_timeline_data(self,user):\n annos = json.loads(self.user_annos.get(user))\n dates = [a['updated'] for a in annos]\n dates = [parser.parse(date) for date in dates]\n dates.sort()\n dates = dates\n \n first = dates[0]\n last = dates[-1]\n \n def perdelta(start, end, delta):\n curr = start\n while curr < end:\n yield curr.strftime('%Y-%m-%d')\n curr += delta\n \n day_dict = defaultdict(int)\n for date in dates:\n day = date.strftime('%Y-%m-%d')\n day_dict[day] += 1\n \n for day in perdelta(first, last, timedelta(days=1)):\n if day_dict.has_key(day) == False:\n day_dict[day] = 0\n \n days = day_dict.keys()\n days.sort()\n counts = [day_dict[day] for day in days]\n return counts, days",
"def get_orbit_dates(dates):\n sorted_dates = sorted(dates)\n result = []\n for d in sorted_dates:\n if len(result) == 0 or d - result[-1][\"to\"] > timedelta(hours=1):\n result.append({\"from\": d, \"to\": d}) # new orbit\n else:\n result[-1][\"to\"] = d # same orbit\n\n return result",
"def time_to_text(words, time):\n\n H = time.hour\n M = time.minute\n\n # Start Text\n text = \"ES IST\"\n word_leds = [words['TEXT']['ES'], words['TEXT']['IST']]\n corner_leds = []\n minutes = 0\n\n # Space\n text += \" \"\n\n # Minutes\n if 0 <= M < 5:\n text += \"\"\n minutes = M\n elif 5 <= M < 10 or 55 <= M <= 59:\n text += \"FÜNF\"\n word_leds.append(words['MINUTES']['FUENF'])\n if M < 10:\n minutes = M - 5\n else:\n minutes = M - 55\n elif 10 <= M < 15 or 50 <= M < 55:\n text += \"ZEHN\"\n word_leds.append(words['MINUTES']['ZEHN'])\n if M < 15:\n minutes = M - 10\n else:\n minutes = M - 50\n elif 15 <= M < 20 or 45 <= M < 50:\n text += \"VIERTEL\"\n word_leds.append(words['MINUTES']['VIERTEL'])\n if M < 20:\n minutes = M - 15\n else:\n minutes = M - 45\n elif 20 <= M < 25 or 40 <= M < 45:\n text += \"ZWANZIG\"\n word_leds.append(words['MINUTES']['ZWANZIG'])\n if M < 25:\n minutes = M - 20\n else:\n minutes = M - 40\n elif 25 <= M < 30:\n text += \"FUENF VOR HALB\"\n word_leds.append(words['MINUTES']['FUENF'])\n word_leds.append(words['TEXT']['VOR'])\n word_leds.append(words['TEXT']['HALB'])\n minutes = M - 25\n elif 30 <= M < 35:\n text += \"HALB\"\n word_leds.append(words['TEXT']['HALB'])\n minutes = M - 30\n elif 35 <= M < 40:\n text += \"FUENF NACH HALB\"\n word_leds.append(words['MINUTES']['FUENF'])\n word_leds.append(words['TEXT']['NACH'])\n word_leds.append(words['TEXT']['HALB'])\n minutes = M - 35\n\n # Space\n text += \" \"\n\n # Sign\n if 5 <= M < 25:\n text += \"NACH\"\n word_leds.append(words['TEXT']['NACH'])\n elif 40 <= M <= 59:\n text += \"VOR\"\n word_leds.append(words['TEXT']['VOR'])\n\n # Space\n text += \" \"\n\n # Hours\n if M >= 25:\n H += 1\n\n if H > 12:\n H = H - 12\n\n if H == 1 and M >= 5:\n text += \"EINS\"\n word_leds.append(words['HOURS']['EINS'])\n elif H == 1 and M < 5:\n text += \"EIN\"\n word_leds.append(words['HOURS']['EIN'])\n elif H == 2:\n text += \"ZWEI\"\n word_leds.append(words['HOURS']['ZWEI'])\n elif H == 3:\n text += \"DREI\"\n word_leds.append(words['HOURS']['DREI'])\n elif H == 4:\n text += \"VIER\"\n word_leds.append(words['HOURS']['VIER'])\n elif H == 5:\n text += \"FÜNF\"\n word_leds.append(words['HOURS']['FUENF'])\n elif H == 6:\n text += \"SECHS\"\n word_leds.append(words['HOURS']['SECHS'])\n elif H == 7:\n text += \"SIEBEN\"\n word_leds.append(words['HOURS']['SIEBEN'])\n elif H == 8:\n text += \"ACHT\"\n word_leds.append(words['HOURS']['ACHT'])\n elif H == 9:\n text += \"NEUN\"\n word_leds.append(words['HOURS']['NEUN'])\n elif H == 10:\n text += \"ZEHN\"\n word_leds.append(words['HOURS']['ZEHN'])\n elif H == 11:\n text += \"ELF\"\n word_leds.append(words['HOURS']['ELF'])\n elif H == 12 or H == 0:\n text += \"ZWÖLF\"\n word_leds.append(words['HOURS']['ZWOELF'])\n\n # UHR\n if M < 5:\n # Space\n text += \" \"\n text += \"UHR\"\n word_leds.append(words['TEXT']['UHR'])\n\n # Space\n if minutes != 0:\n text += \" \"\n\n # Dots\n if minutes == 1:\n text += \"PUNKT1\"\n corner_leds.append(words['MINUTES']['PUNKT1'])\n if minutes == 2:\n text += \"PUNKT2\"\n corner_leds.append(words['MINUTES']['PUNKT2'])\n if minutes == 3:\n text += \"PUNKT3\"\n corner_leds.append(words['MINUTES']['PUNKT3'])\n if minutes == 4:\n text += \"PUNKT4\"\n corner_leds.append(words['MINUTES']['PUNKT4'])\n\n text = re.sub(' +', ' ', text)\n word_leds = [item for sublist in word_leds for item in sublist]\n corner_leds = [item for sublist in corner_leds for item in sublist]\n return text, word_leds, corner_leds",
"def TriDates(self, UneDate, DateDepart, Jours):\n UneDate = self.ConvertieDate(UneDate)\n DateFin = self.ajoutDate(DateDepart, Jours)\n if UneDate <= DateFin and UneDate >= DateDepart:\n self.DateTrie.append(UneDate)\n print(\"Ajouté !\")",
"def turno(self, Pokemon2, cadena_1_ataque, cadena_2_ataque):\n\n\t\twhile(self.barras > 0) and (Pokemon2.barras > 0):\n\t\t\t#imprime los puntos_de_salud de cada pokemon\n\t\t\tprint(f\"\\n{self.nombre}\\t\\tPS\\t{self.puntos_de_salud}\")\n\t\t\tprint(f\"{Pokemon2.nombre}\\t\\tPS\\t{Pokemon2.puntos_de_salud}\\n\")\n\n\t\t\t# POKEMON 1\n\t\t\t\n\t\t\tprint(f\"Adelante {self.nombre}!\")\n\t\t\tfor i , x in enumerate(self.movimientos):\n\t\t\t\tprint(f\"{i+1}.\", x)\n\t\t\tindex = int(input(\"Elige un movimiento: \"))\n\t\t\timprimir_con_retraso(f\"\\n{self.nombre} uso {self.movimientos[index-1]}!\")\n\t\t\ttime.sleep(1)\n\t\t\timprimir_con_retraso(cadena_1_ataque)\n\n\t\t\t# Determinar el dano\n\t\t\tPokemon2.barras -= self.ataque\n\t\t\tPokemon2.puntos_de_salud = \"\"\n\n\t\t\tfor j in range(int(Pokemon2.barras+.1*Pokemon2.defensa)):\n\t\t\t\tPokemon2.puntos_de_salud += \"=\"\n\n\t\t\ttime.sleep(1)\n\t\t\tprint(f\"\\n{self.nombre}\\t\\tPS\\t{self.puntos_de_salud}\")\n\t\t\tprint(f\"{Pokemon2.nombre}\\t\\tPS\\t{Pokemon2.puntos_de_salud}\\n\")\n\t\t\ttime.sleep(.5)\n\n\t\t\t# verificar si Pokemon se debilito\n\n\t\t\tif Pokemon2.barras <= 0:\n\t\t\t\timprimir_con_retraso(\"\\n...\" + Pokemon2.nombre + \" se debilito.\")\n\t\t\t\tbreak\n\n\n\t\t\t# POKEMONS 2\n\t\t\tprint(f\"Adelante {Pokemon2.nombre}!\")\n\t\t\tfor i , x in enumerate(Pokemon2.movimientos):\n\t\t\t\tprint(f\"{i+1}.\", x)\n\t\t\tindex = int(input(\"Elige un movimiento: \"))\n\t\t\timprimir_con_retraso(f\"\\n{Pokemon2.nombre} uso {Pokemon2.movimientos[index-1]}!\")\n\t\t\ttime.sleep(1)\n\t\t\timprimir_con_retraso(cadena_2_ataque)\n\n\t\t\t#determinar el dano\n\t\t\tself.barras -= Pokemon2.ataque\n\t\t\tself.puntos_de_salud = \"\"\n\n\t\t\t#agregar barras adicionales mas defensa boost\n\t\t\tfor j in range(int(self.barras+.1*self.defensa)):\n\t\t\t\tself.puntos_de_salud += \"=\"\n\n\t\t\ttime.sleep(1)\n\t\t\tprint(f\"\\n{self.nombre}\\t\\tPS\\t{self.puntos_de_salud}\")\n\t\t\tprint(f\"{Pokemon2.nombre}\\t\\tPS\\t{Pokemon2.puntos_de_salud}\\n\")\n\t\t\ttime.sleep(.5)\n\n\t\t\t# verificar si Pokemon2 se debilito\n\n\t\t\tif self.barras <= 0:\n\t\t\t\timprimir_con_retraso(\"\\n...\" + self.nombre + \" se debilito.\")\n\t\t\t\tbreak",
"def get_clases_dia(fecha,aula=None,profesor=None): \n if aula:\n print \"Vamos a listar las de aula %s\"%aula\n ret = Clase.objects.filter(hora_inicio__gte=fecha,hora_fin__lte=fecha+datetime.timedelta(days=1),aula=aula)\n elif profesor:\n print \"Vamos a listar las de prfesort %s\"%profesor\n ret = Clase.objects.filter(hora_inicio__gte=fecha,hora_fin__lte=fecha+datetime.timedelta(days=1),profesor=profesor)\n else:\n ret = Clase.objects.filter(hora_inicio__gte=fecha,hora_fin__lte=fecha+datetime.timedelta(days=1))\n print \"Encontradas %s clases el dia %s\"%(ret.count(),fecha)\n return ret",
"def ticketing(self, log_date, id_day_tracking, id_fase, log):\n # Ricavo la somma delle ore gia' lavorate durante una giornata da un dipendente escludendo eventualmente gli\n # id_day_tracking associati all'id_fase dati in input\n cursor = self.connection.cursor()\n result = cursor.execute(\"\"\"\n SELECT SUM(ore_lavoro)\n FROM ticket\n WHERE id_day_tracking = {0}\n AND NOT id_fase = {1}\n \"\"\".format(id_day_tracking, id_fase))\n sum_ore_lavoro = -1\n for row in result.fetchall():\n sum_ore_lavoro = row[0]\n if sum_ore_lavoro == -1 or not sum_ore_lavoro:\n sum_ore_lavoro = 0\n self.output_logger('Sommma ore lavoro: {0}'.format(sum_ore_lavoro))\n\n # Cerco se esiste gia' un ticket per il giorno lavorativo associato alla fase in valutazione per ricavarne l'id\n # e le ore precedentemente inserite.\n result = cursor.execute(\"\"\"\n SELECT id_ticket, ore_lavoro, ore_straordinari\n FROM ticket\n WHERE id_day_tracking = {0}\n AND id_fase = {1}\n \"\"\".format(id_day_tracking, id_fase))\n ticket_found = False\n for row in result.fetchall():\n # Ticket trovato.\n ticket_found = True\n id_ticket = row[0]\n old_ore_lavoro = row[1]\n old_ore_straordinari = row[2]\n self.output_logger('Id ticket {0} contiene {1} ore di lavoro e {2} di straordinari'.format(id_ticket, old_ore_lavoro, old_ore_straordinari))\n\n # Rimuovo la fase trovata dalla lista di quelli del dipendente se e' presente\n if id_fase in self.fasi_recuperate:\n self.fasi_recuperate.remove(id_fase)\n\n if old_ore_lavoro + old_ore_straordinari != log['log_time'] or sum_ore_lavoro + old_ore_lavoro != 8:\n # Se le ore non combaciano oppure sono state eseguite modifiche su altri ticket tali per cui le ore di\n # lavoro sforano le 8 ore, si esegue un update.\n if self.is_saturday(log_date):\n ore_lavoro = 0\n ore_straordinari = log['log_time']\n else:\n ore_lavoro, ore_straordinari = self.ricalcola_ore(log['log_time'], sum_ore_lavoro)\n\n try:\n cursor.execute(\"\"\"\n UPDATE ticket\n SET ore_lavoro = {0}, ore_straordinari = {1}\n WHERE id_ticket = {2}\n \"\"\".format(ore_lavoro, ore_straordinari, id_ticket))\n self.connection.commit()\n except cx_Oracle.DatabaseError as e:\n cursor.close()\n error_msg = '\\tUpdate del id_ticket {0} con id_day_tracking {1} e id_fase {2} per il giorno {3} non riuscita: {4}'.format(id_ticket, id_day_tracking, id_fase, log_date, e)\n self.report_error(error_msg)\n return\n self.output_logger('Ticket aggiornato a {0} ore di lavoro e {1} di straordinari'.format(ore_lavoro, ore_straordinari))\n\n if not ticket_found:\n # Ticket non trovato, eseguo un'insert.\n if self.is_saturday(log_date):\n ore_lavoro = 0\n ore_straordinari = log['log_time']\n else:\n ore_lavoro, ore_straordinari = self.ricalcola_ore(log['log_time'], sum_ore_lavoro)\n\n next_val = self.get_id('SELECT seq_ticket.nextval from dual')\n try:\n cursor.execute(\"\"\"\n INSERT INTO ticket (id_stato_validazione, id_ticket, id_day_tracking, id_fase, ore_lavoro, ore_straordinari)\n VALUES (1, {0}, {1}, {2}, {3}, {4})\n \"\"\".format(next_val, id_day_tracking, id_fase, ore_lavoro, ore_straordinari))\n self.connection.commit()\n except cx_Oracle.DatabaseError as e:\n cursor.close()\n error_msg = '\\tCreazione del ticket con id_day_tracking {0} e id_fase {1} per il giorno {2} non riuscita.\\n\\t\\t\\tOre da loggare: {3}\\n\\t\\t\\tStraordinari da loggare: {4}\\n\\t\\t\\t{5}'.format(id_day_tracking, id_fase, log_date, ore_lavoro, ore_straordinari, e)\n self.report_error(error_msg)\n return\n self.output_logger('*** CREATO Id ticket {0} con {1} ore di lavoro e {2} di straordinari'.format(next_val, ore_lavoro, ore_straordinari))\n cursor.close()",
"def get_games_today():\n td = datetime.datetime.today()\n yd = datetime.datetime.today() - datetime.timedelta(days=1)\n\n games_yesterday = mlbgame.day(year=yd.year, month=yd.month, day=yd.day)\n games_today = mlbgame.day(year=td.year, month=td.month, day=td.day)\n \n games = games_today + games_yesterday\n score_boards = []\n for g in games:\n sb = ScoreBoard(g)\n score_boards.append(sb)\n \n return score_boards",
"def escribir_bitacora(bitacora):\n print('Escribiendo bitácora')\n with open(os.path.join('bitacoras', fecha+'.log'), 'w') as f:\n for info in bitacora:\n f.write('{:45} | {}\\n'.format(info[0], info[1]))",
"def display_tournament_date_view(self, list_t):\n\n self.commands.display_message(\n \"CHESS TOURNAMENT \\n\\n \"\n \"\\t Tournaments displayed by date of beginning: \")\n\n for tournament in list_t:\n self.commands.display_message(\n f\"{tournament['beginning_date']} to \"\n f\"{tournament['ending_date']}, type of game: \"\n f\"Name: {tournament['name']} take place in: \"\n f\"{tournament['location']}, start on: \"\n f\"{tournament['type_of_game']}, status: \"\n f\"{tournament['status']}. \")\n self.commands.display_message(\"\")",
"def __toDay(self):\n log(\"MState __toDay\",4)\n \n self.timerOn = False\n for player in self.players:\n player.timerOn = False\n \n self.time = \"Day\"\n self.day = self.day + 1\n self.mainComm.cast(\"Uncertainty dawns, as does this day\")\n\n # First, check stripper blocks\n self.blocked_ids = []\n for stripper in [p for p in self.players if p.role == \"STRIPPER\"]:\n if not stripper.target == None and not stripper.target.id in self.blocked_ids:\n self.blocked_ids.append(stripper.target.id)\n\n # If mafia has a target\n if not self.mafia_target in [None, self.null]:\n # Doctor is alive and saved the target\n target_saved = False\n for p in self.players:\n if (p.role == \"DOCTOR\") and (not p.target == None) and (p.target.id == self.mafia_target.id):\n if p.id in self.blocked_ids:\n self.mainComm.send(\"You were distracted\",p.id)\n else:\n target_saved = True\n if \"DOC\" in self.pref.book[\"know_if_saved\"]:\n self.mainComm.send(\"Your save was successful!\",p.id)\n if \"SELF\" in self.pref.book[\"know_if_saved\"]:\n self.mainComm.send(\"You were saved!\", p.target.id)\n if target_saved:\n if self.pref.book[\"know_if_saved\"] == \"ON\":\n msg = (\"Tragedy has struck! {} is ... wait! They've been saved by \"\n \"the doctor! Someone must pay for this! Vote to kill \"\n \"somebody!\").format(self.mainComm.getName(self.mafia_target.id))\n else:\n msg = (\"A peculiar feeling drifts about... everyone is still alive...\")\n self.mainComm.cast(msg)\n self.record(\"SAVED\")\n # Doctor couldn't save the target\n else:\n if self.day == 0:\n return True\n try:\n msg = (\"Tragedy has struck! {} is dead! Someone must pay for this! \"\n \"Vote to kill somebody!\").format(self.mainComm.getName(self.mafia_target.id))\n self.mainComm.cast(msg)\n self.__kill(self.mafia_target)\n except Exception:\n pass\n # Mafia has no target\n else:\n msg = (\"A peculiar feeling drifts about... everyone is still alive...\")\n self.mainComm.cast(msg)\n\t\t\t\n # If milky is still alive and has given milk\n for p in self.players:\n if p.role == \"MILKY\" and (not p.target in [None, self.null]) and p.target in self.players:\n if p.id in self.blocked_ids:\n self.mainComm.send(\"You were distracted\", p.id)\n else:\n self.giveNewMilk(p, p.target)\n\n # If cop is still alive and has chosen a target\n for p in self.players:\n if p.role == \"COP\" and (not p.target in [None, self.null]):\n if p.id in self.blocked_ids:\n self.mainComm.send(\"You were distracted\", p.id)\n else:\n name = self.mainComm.getName(p.target.id)\n if (p.target.role in MAFIA_ROLES and not p.target.role == \"GODFATHER\") or p.target.role == \"MILLER\":\n team = \"MAFIA\"\n else:\n team = \"NOT MAFIA\"\n msg = \"{} is {}\".format(name, team)\n self.mainComm.send(msg,p.id)\n self.record(' '.join([\"INVESTIGATE\",p.id,p.role,str(p.target)]))\n\n self.record(\"DAY \" + str(self.day))\n self.__clearTargets()\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
busca el id de bloque por su hora de inicio
|
def find_by_hora_inicio(self, hora):
bloque = Bloque.query.filter_by(hora_inicio=hora).first()
return bloque
|
[
"def siguiente(self,id):\n consulta = \"select * from socios m \" \\\n \"where m.idsocio = (select min(idsocio) from socios s \" \\\n \"where s.idsocio > %s);\"\n try:\n datos = AccesoDatos()\n cur = datos.conectar()\n cur.execute(consulta,(id))\n d = cur.fetchone()\n socio = Socio(d[1],d[2],d[3],d[0])\n datos.desconectar()\n except OperationalError as e:\n raise Exception(\"ERROR FATAL\")\n except Exception as a:\n raise Exception(\"Error al conectar a la base de datos\")\n print(\"ID : \", socio.id, \"\\nNombre: \", socio.nombre, \"\\nApellido: \", socio.apellido, \"\\nDNI: \", socio.dni)",
"def Huachiclave(self):\n\n query = \"\"\"SELECT timestamp,huachiclave,cantidad,entregado FROM huachilate WHERE entregado = '0' ORDER BY timestamp\"\"\"\n\n query2 = \"\"\"INSERT INTO huachilate (timestamp,huachiclave,cantidad,entregado) VALUES (?,?,?,?)\"\"\"\n\n resultado = self.cursor.execute(query).fetchall()\n\n if resultado == []:\n \n timestamp = time.time()\n \n huachiclave = \"\".join(random.choices(string.ascii_letters + string.digits,k = 7))\n\n cantidad = random.randint(5000,50000)\n\n self.cursor.execute(query2,(timestamp,huachiclave,cantidad,0))\n\n self.conn.commit()\n\n return (timestamp,huachiclave,cantidad,0)\n \n else:\n return resultado[-1]",
"def get_scheduling_block_ids():\n ids = [key.split('/')[-1]\n for key in DB.keys(pattern='scheduling_block/*')]\n return sorted(ids)",
"def anterior(self,id):\n consulta = \"select * from socios m \" \\\n \"where m.idsocio = (select min(idsocio) from socios s \" \\\n \"where s.idsocio < %s);\"\n try:\n datos = AccesoDatos()\n cur = datos.conectar()\n cur.execute(consulta,(id))\n d = cur.fetchone()\n socio = Socio(d[1],d[2],d[3],d[0])\n datos.desconectar()\n except OperationalError as e:\n raise Exception(\"ERROR FATAL\")\n except Exception as a:\n raise Exception(\"Error al conectar a la base de datos\")\n print(\"ID : \", socio.id, \"\\nNombre: \", socio.nombre, \"\\nApellido: \", socio.apellido, \"\\nDNI: \", socio.dni)",
"def test_humangenes_id_get(self):\n pass",
"def _get_time_from_id(self) -> datetime:\n return datetime.fromtimestamp(int(self.id.split(' ')[0]) / 1e3)",
"def GetBlockByID(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_scheduling_block(block_id):\n return json.loads(DB.get('scheduling_block/{}'.format(block_id)))",
"def ban_id(self):\n return self._ban_id",
"def _get_interval_id(t):\n if t < MIN_TIME_BUDGET:\n return 0\n\n if ((t-MIN_TIME_BUDGET) % BUDGET_TIME_INTVL) == 0:\n return int((t-MIN_TIME_BUDGET) % BUDGET_TIME_INTVL)\n\n return int((t-MIN_TIME_BUDGET) % BUDGET_TIME_INTVL) + 1",
"def getid(self, bno):\r\n return self.breakpt[bno]['id']",
"def get_block(height):\n r = requests.get(getBlockHeight + str(int(height)))\n if r.status_code != requests.codes.ok:\n return\n try:\n miner_id = r.json()['generator']\n block_id = r.json()['block']\n except KeyError:\n miner_id = None\n block_id = None\n\n return miner_id, block_id",
"def test_cms_block_repository_v1_get_by_id_get(self):\n pass",
"def __get_block_number(self):\n block_number = self.db.query(IDXPersonalInfoBlockNumber).first()\n if block_number is None:\n return 0\n else:\n return block_number.latest_block_number",
"def main():\n \n # Directorio donde guardaremos los bloques\n dir = 'archivos_formateados/'\n if not os.path.exists(dir):\n os.mkdir(dir)\n\n # Variables inicializadas que usare\n inicio = 0\n final = 578704 #578703\n\n #Creacion de archivos .json semejantes a un diccionario\n for i in range (inicio,final):\n bloque = str(i)\n \n # Hacemos una request para obtener el bloque\n peticion = 'http://api.coinsecrets.org/block/' + bloque\n resp = requests.get(peticion)\n #time.sleep(1)\n datos_bloque = resp.json()\n\n # Abrimos un archivo y volcamos la informacion formateada\n with open(dir + bloque + '.json', 'w') as file_dict:\n json.dump(datos_bloque, file_dict, indent = 4)\n file_dict.close()\n\n print 'Bloque',bloque, 'almacenado'",
"def __init__(self,nombre,droga,obraSocial,plan,importe):\n self.nombreMedicamento = nombre\n self.droga = droga\n self.obraSocial = obraSocial\n self.plan = plan\n self.importe = importe\n self.fechaVenta = datetime.now().date()\n self.horaVenta = datetime.now().time()",
"def __init__(self, id_inc, asunto, descripcion):\n self.id_inc = id_inc\n self.asunto = asunto\n self.descripcion = descripcion\n self.estado = 'Abierta'",
"def get_bos_id(self):\n return self.sp.bos_id()",
"def mem_id(self) -> int:\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
devuelve los bloques ocupados en un centro en una fecha
|
def bloques_ocupados(self, centro_id, fecha):
return (
db.session.query(Bloque)
.join(Bloque.turnos)
.filter(
and_(
and_(Turnos.dia == fecha, Turnos.centro_id == centro_id),
and_(Turnos.turno_id == Bloque.id, Turnos.estado != "CANCELADO"),
)
)
.all()
)
|
[
"def pedidosPorPeriodo(self,fechaIni,fechaFin):",
"def rangoFechas():\n anio = int(strftime(\"%Y\", gmtime()))\n mes = int(strftime(\"%m\", gmtime()))\n l = []\n for x in [0]:\n \n diff = mes - x\n if diff <= 0:\n l.append([anio - 1, 12+ diff])\n else:\n l.append([anio, diff])\n return l",
"def curvaBono(self, fecha):\n\n riesgo = self.get_riesgo()\n moneda = self.get_moneda()\n cn = self.get_cn()\n\n # Se usan curvas por parametros para los casos USD de todo riesgo y CLP de riesgo AAA\n if( ((riesgo == 'AAA' or riesgo == 'A') and moneda == 'CLP') or moneda == 'USD'):\n cb = \"SELECT * FROM [dbAlgebra].[dbo].[TdCurvaNS] WHERE Tipo = 'IF#\" + moneda + \"' AND Fecha = '\" + fecha + \"' ORDER BY Fecha ASC\"\n \n elif(riesgo == 'AA' and moneda == 'CLP'):\n cb = \"SELECT * FROM [dbAlgebra].[dbo].[TdCurvasSector] WHERE TipoCurva LIKE '%\" + moneda + \"#\" + riesgo + \"#Consolidado#Prepagables' AND Fecha = '\" + fecha + \"' ORDER BY Fecha ASC\"\n \n else:\n cb = \"SELECT * FROM [dbAlgebra].[dbo].[TdCurvasSector] WHERE TipoCurva LIKE '%\" + moneda + \"#\" + riesgo + \"#Corporativos#No Prepagables' AND Fecha = '\" + fecha + \"' ORDER BY Fecha ASC\"\n \n cb = pd.read_sql(cb, cn)\n if (cb.empty): raise Exception('Para la moneda ' + moneda + ' con riesgo ' + riesgo + ' no se encontró curva.')\n return cb",
"def cargaListaNomCamposBDA(self):\r\n lista=str.split(self.nomTabla,\".\")\r\n esquema=lista[0]\r\n tabla=lista[1]\r\n self.listaNomCampos=self.oUtiles.oConsultasPg.sacaNombresCamposTabla_lista(esquema, tabla)\r\n self.listaNomCampos=self.oUtiles.oUtilidadesListas.eliminaEltosLista(self.listaNomCampos,[\"geom\",\"archivo\"], False)",
"async def today(ctx):\n try:\n a = await requests.get(cam_url+stats_endpoint)\n except aiohttp.client_exceptions.ClientOSError:\n await ctx.send(\"```{}```\".format(error_unreachable))\n return\n a = await a.text()\n periods = []\n for line in a.split(\"\\n\"):\n if \"|\" in line:\n debut, fin = [l.strip() for l in line.split(\"|\")]\n debut = datetime.fromisoformat(debut)\n fin = datetime.fromisoformat(fin)\n if fin.date() == datetime.now().date():\n l = debut.strftime(\"%H:%M\")\n l += \" → \"\n l += fin.strftime(\"%H:%M\")\n periods.append(l)\n else:\n debut = datetime.fromisoformat(line)\n if debut.date() != datetime.now().date():\n l = debut.strftime(\"%d/%m/%y %H:%M\")\n else:\n l = debut.strftime(\"%H:%M\")\n l += \" → \"\n l += now_term\n periods.append(l)\n\n msg = \"\\n\".join(periods)\n if len(msg) != 0:\n e = Embed(title=occupation_title, description=msg)\n else:\n e = Embed(title=occupation_title,\n description=no_occupation)\n await ctx.send(embed=e)",
"def split_by_date(self):\n\t\tvacations = []\n\t\tif self.end_date==self.start_date:\n\t\t\tvacations.append(self)\n\t\telse:\n\t\t\tdelta = self.end_date - self.start_date\n\t\t\tfor day in range(0, delta.days+1):\n\t\t\t\tnew_date = self.start_date + timedelta(days=day)\n\t\t\t\tvac = Vacation(id=self.id, healer=self.healer, start_date=new_date, end_date=new_date)\n\t\t\t\tif day == 0:\n\t\t\t\t\tvac.start_time = self.start_time\n\t\t\t\t\tvac.end_time = 1440\n\t\t\t\telif day == delta.days:\n\t\t\t\t\tvac.start_time = 0\n\t\t\t\t\tvac.end_time = self.end_time\n\t\t\t\telse:\n\t\t\t\t\tvac.start_time = 0\n\t\t\t\t\tvac.end_time = 1440\n\t\t\t\tvacations.append(vac)\n\t\treturn vacations",
"def quadratura_ore(self):\n for email_dip, date_list in self.ore_decimali.iteritems():\n for log_date, ore_decimali in self.ore_decimali[email_dip].iteritems():\n ore_da_assegnare = int(ore_decimali)\n if ore_da_assegnare > 0:\n self.work_log[email_dip][log_date][0]['log_time'] = ore_da_assegnare + self.work_log[email_dip][log_date][0].get('log_time', 0)",
"def run(self):\n\n # Verifica se podemos iniciar o processamento do balcao\n with self.running_lock:\n if self.stopping or self.running:\n raise Exception(\"Balcao ainda em processamento\")\n\n self.running = True\n self.run_inicio = time.time()\n self.run_fim = None\n self.stopping = False\n\n while not self.stopping:\n # Verifica se tempos passageiros para atender\n if any(self.fila):\n with self.fila_lock:\n cur_passageiro = self.fila[0]\n else:\n # Este sleep so funciona quando nao temos fila, de outro modo nao existe interrupcao entre passageiros\n time.sleep(Balcao.DURACAO_MINUTO)\n continue\n\n # Calcula o tempo que vamos demorar a atender o passageiro\n velocidade_atendimento = random.randint(self.bags_min, self.bags_max)\n tempo_atendimento = cur_passageiro.bags / velocidade_atendimento * Balcao.DURACAO_MINUTO\n\n # Esperamos pela passagem desse tempo\n time.sleep(tempo_atendimento)\n\n hora_atendimento = time.time()\n\n # TODO: Apenas para debug\n minutos = int(tempo_atendimento // Balcao.DURACAO_MINUTO)\n segundos = int(tempo_atendimento % Balcao.DURACAO_MINUTO * 600)\n print(\"Balcao {:3} atendeu passageiro {:3} com {:3} sacos em {:02d}:{:02d}\".format(\n self.numero,\n cur_passageiro.numero,\n cur_passageiro.bags,\n minutos,\n segundos\n )\n )\n # TODO: fim de apenas para debug\n\n # Removemos o passageiro processado da lista de pendentes\n with self.fila_lock:\n self.fila.remove(cur_passageiro)\n\n # E acrescentamos a lista de processados, depois de colocar os dados do tempo de atendimento\n cur_passageiro.tempo_espera = hora_atendimento - cur_passageiro.entrada - tempo_atendimento\n cur_passageiro.tempo_atendimento = tempo_atendimento\n self.passageiros_atendidos.append(cur_passageiro)\n\n # Marca o fim do processamento da fila\n with self.running_lock:\n self.running = False\n self.run_fim = time.time()",
"def construye_gap_liquidez(banco):\n cuantos = 24 / sb.periodo\n \n if cuantos == 24: # periodo mensual\n gap = {0:banco.saldo(),\n 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0, 12:0,\n 13:0, 14:0, 15:0, 16:0, 17:0, 18:0, 19:0, 20:0, 21:0, 22:0, 23:0, 24:0,\n 25:0, 26:0, 27:0, 28:0, 29:0, 30:0, 31:0, 32:0, 33:0, 34:0, 35:0, 36:0,\n 37:0, 38:0, 39:0, 40:0, 41:0, 42:0, 43:0, 44:0, 45:0, 46:0, 47:0, 48:0}\n elif cuantos == 8: # periodo trimestral\n gap = {0:banco.saldo(),\n 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0,\n 9:0, 10:0, 11:0, 12:0, 13:0, 14:0, 15:0, 16:0}\n else: # periodo semestral\n gap = {0:banco.saldo(),\n 1:0, 2:0, 3:0, 4:0,\n 5:0, 6:0, 7:0, 8:0}\n\n for op in banco.operaciones():\n vcto = op.vencimiento() - banco.tiempo_actual()\n if vcto > 0:\n gap[vcto] += op.flujo(True)\n def gap_liquidez(plazo = 0, acumulado = True):\n if plazo == 0:\n return gap\n else:\n if acumulado:\n acum = 0\n for i in range(0, plazo + 1):\n acum += gap[i]\n return acum\n else:\n return gap[plazo]\n return gap_liquidez",
"def complement(self, freeblock):\n copy = self.normalized()\n comp = Agenda()\n desc = freeblock.desc\n cur_time = freeblock.begin #arrow date and time \n for appt in copy.appts:\n if appt < freeblock:\n continue\n if appt > freeblock:\n if cur_time < freeblock.end:\n comp.append(Appt(cur_time,freeblock.end, desc))\n cur_time = freeblock.end\n break\n if cur_time < appt.begin:\n # print(\"Creating free time from\", cur_time, \"to\", appt.begin)\n comp.append(Appt(cur_time, appt.begin, desc))\n cur_time = max(appt.end,cur_time)\n \n if cur_time < freeblock.end:\n # print(\"Creating final free time from\", cur_time, \"to\", freeblock.end)\n comp.append(Appt(cur_time, freeblock.end, desc))\n return comp",
"def comprueba_temporal(hora, n_horas, umbrales, clima):\r\n # Se comprueba si hay un temporal antes de n_horas para los umbrales\r\n # introducidos\r\n comprobacion = {}\r\n\r\n for j in umbrales:\r\n n = 0\r\n # Datos de clima en las proximas n_horas para el agente j\r\n data_nhoras = clima[j][hora:hora+n_horas]\r\n # Añado un +1 para que considere el último valor porque Python\r\n # considera el conjunto [) no inlcuyendo el último valor\r\n for k in data_nhoras:\r\n if k > umbrales[j]['valor']:\r\n n += 1\r\n\r\n if n >= umbrales[j]['dur'] and not data_nhoras.empty:\r\n comprobacion[j] = c.TEMPORAL_ANTES_N_HORAS\r\n\r\n # Si no hay temporal antes de n_horas, se comprueba si hay un temporal\r\n # justo en n_horas\r\n else:\r\n if clima[j][hora+n_horas] > umbrales[j]['valor']:\r\n # Del mismo modo añado un +2 al final del vector temp_nhoras\r\n # para que considere el ultimo valor\r\n temp_nhoras = clima[j][hora+n_horas:hora+n_horas+umbrales[j]['dur']]\r\n val = (temp_nhoras > umbrales[j]['valor'])\r\n if val.all():\r\n comprobacion[j] = c.TEMPORAL_N_HORAS\r\n else:\r\n comprobacion[j] = c.NO_TEMPORAL\r\n else:\r\n comprobacion[j] = c.NO_TEMPORAL\r\n\r\n return comprobacion",
"def tendencias(self):\n \n # Determina el cruce dorado\n m_=self.get_sma(50)\n l_=self.get_sma(200)\n l=l_[-1]\n m=m_[-1]\n self.tendencia_ascendente= False\n self.tendencia_descendente= False\n if l<m:\n self.golden_cross= True\n if (m/l-1)*100>10:\n self.tendencia_ascendente= True\n self.tendencia_descendente= False\n else:\n self.golden_cross= False\n if -(m/l-1)*100>10:\n self.tendencia_ascendente= False\n self.tendencia_descendente= True",
"def start(self):\n self.colisiones = [[0,0],[0,0],[0,0]] #X(a,b), Y(a,b), Z(a,b) --> A ------- B\n t = 0 \n self.VxA = [] #Listas que indican que particulas han chocado con la pared(pueden repetirse)\n self.VxB = []\n self.VyA = []\n self.VyB = []\n self.VzA = []\n self.VzB = []\n while t <= self.t:\n for i in range(len(self.particulas)):\n self.particulas[i].xyz[0] += self.dt * self.particulas[i].v[0]\n self.particulas[i].xyz[1] += self.dt * self.particulas[i].v[1]\n self.particulas[i].xyz[2] += self.dt * self.particulas[i].v[2]\n x = self.particulas[i].hit_x(self.cubo.a,self.cubo.b) # [1,0] si hit A/ [0,1] si hit B\n y = self.particulas[i].hit_y(self.cubo.a,self.cubo.b)\n z = self.particulas[i].hit_z(self.cubo.a,self.cubo.b)\n self.colisiones[0][0] += x[0]\n self.colisiones[0][1] += x[1]\n self.colisiones[1][0] += y[0]\n self.colisiones[1][1] += y[1]\n self.colisiones[2][0] += z[0]\n self.colisiones[2][1] += z[1]\n if x[0] == 1:#Debido a esto la cantidad de memoria usada es mayor en cada unidad de tiempo. \n self.VxA.append(self.particulas[i].v[0])\n if x[1] == 1:\n self.VxB.append(self.particulas[i].v[0])\n if y[0] == 1:\n self.VyA.append(self.particulas[i].v[1])\n if y[1] == 1:\n self.VyB.append(self.particulas[i].v[1])\n if z[0] == 1:\n self.VzA.append(self.particulas[i].v[2])\n if z[1] == 1:\n self.VzB.append(self.particulas[i].v[2])\n t += self.dt\n \n self.colisionesT = 0 #Colisiones totales\n for i in range(3):\n for k in range(2):\n self.colisionesT += self.colisiones[i][k]",
"def fix_days_from_old_cl_repo(td, sday, eday, old_cl_repo) :\n ti = l1.TradingDayIterator(sday)\n day1 = ti.yyyymmdd()\n barr = []\n tda = []\n col = []\n\n TRADING_HOURS = 23\n end_hour = 17\n bar_sec = 5\n\n while day1 <=eday :\n if day1 not in td :\n print \"read \", day1, \" from olc_cl_repo\"\n utc_e = int(l1.TradingDayIterator.local_ymd_to_utc(day1, end_hour,0,0))\n utc_s = utc_e - TRADING_HOURS*3600\n y = int(day1[:4]) - 1998\n old_cl_bar = old_cl_repo[y]\n i=np.searchsorted(old_cl_bar[:, 0], float(utc_s+bar_sec)-1e-6)\n j=np.searchsorted(old_cl_bar[:, 0], float(utc_e)-1e-6)\n N = (utc_e-utc_s)/bar_sec\n if j - i + 1 == N :\n barr.append(old_cl_bar[i:j+1, :])\n tda.append(day1)\n col.append(repo.kdb_ib_col)\n print 'get from old_cl_repo '\n else :\n print 'cannot find ', utc_s, ' to ', utc_e, ' from the old_cl_repo, skipping'\n\n ti.next()\n day1=ti.yyyymmdd()\n return barr, tda, col",
"def turno_centro_fecha(self, centro_id, fecha):\n return Turnos.query.filter(\n and_(Turnos.dia == fecha, Turnos.centro_id == centro_id)\n ).all()",
"def analizar_bomberman(self):\n conuslta_al_bm_por_posicion = self.bomberman.get_pos()\n consultar_id_casilla_del_bomberman = self.bomberman.get_id_casilla()\n # Le pregunta al bomberman donde está\n for i in range(0, len(self.estallidos)):\n respuesta_de_estallido = self.estallidos[i].comparacion(conuslta_al_bm_por_posicion)\n \n if respuesta_de_estallido == 1 and consultar_id_casilla_del_bomberman != [0,0] and consultar_id_casilla_del_bomberman != [0,1] and consultar_id_casilla_del_bomberman != [1,0]:\n self.bomberman.respawnear()\n\n for index,pos in enumerate(self.white_walkers):\n consultar_id_casilla_de_ww = self.white_walkers[index].get_id_casilla()\n\n if consultar_id_casilla_de_ww == consultar_id_casilla_del_bomberman and consultar_id_casilla_del_bomberman != [0,0] and consultar_id_casilla_del_bomberman != [0,1] and consultar_id_casilla_del_bomberman != [1,0]:\n self.bomberman.respawnear()\n\n portals = self.portal[0].get_id_casilla()\n bomermans = self.bomberman.set_id_casilla()\n if portals == bomermans and len(self.white_walkers) == 0:\n print('Ganastes, pasas al siguiente nivel: ',self.nivel_o_dificultad + 1)\n self.controlador.resetear_nivel(self.nivel_o_dificultad + 1)",
"def rebond():\n global balle, compteur\n x0, y0, x1, y1 = canvas.coords(balle[0])\n if x0 <= 0 or x1 >= 600:\n balle[1] = -balle[1]\n compteur += 1\n print(compteur)\n if y1 <= 0:\n canvas.coords(balle[0], (x0, 360, x1, 400))\n if y0 >= 400:\n canvas.coords(balle[0], (x0, -40, x1, 0))",
"def posicion_de_bombas(self):\n if len(self.bombas) != 0:\n for i in range(0,len(self.bombas)):\n self.list_de_coords_aux_2.append(self.bombas[i].set_coords())\n\n return self.list_de_coords_aux_2",
"def realizar_movimiento(nueva_posicion_cabeza, tablero, posicion_serpiente, posicion_fruta, especial_largo):\n\tcolor_verde = '\\033[92m'\n\tcolor_normal = '\\033[0m'\n\tposicion_serpiente.insert(0, nueva_posicion_cabeza)\n\tif posicion_fruta != posicion_serpiente[0] and especial_largo != 1: #no llamo al metodo remover cola si se uso el especial de crecer y no si comio fruta\n\t\tremover_cola_vieja(tablero, posicion_serpiente)\t\n\tif especial_largo == -2: #removemos dos veces la cola si se uso el especial de decrecer 2\n\t\tfor i in range(2): remover_cola_vieja(tablero, posicion_serpiente)\t\n\tfor parte in posicion_serpiente:\n\t\tif parte is posicion_serpiente[0]: tablero[parte[0]][parte[1]] = color_verde + \"o\" + color_normal; continue\n\t\ttablero[parte[0]][parte[1]] = color_verde + \"#\" + color_normal"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
devuelve todos los centros
|
def all(self):
centros = Centro.query.all()
return centros
|
[
"def melhores_alunos_de_todos():\n mensagem = '========= MELHORES ALUNOS ========='\n print(f'\\n\\033[1;31m{mensagem}\\033[m')\n for nome_aluno in melhores_alunos:\n print(f\"\\033[0;34m{nome_aluno.center(len(mensagem))}\\033[m\")\n print(f'\\033[1;31m{\"=\" * len(mensagem)}\\033[m')",
"def center(self):\n for atoms in self:\n atoms.center()",
"def center(cloud):\n c = sum(cloud) / len(cloud)\n # ===========================================================================\n # print c\n #===========================================================================\n return [p - c for p in cloud]",
"def __update_centers(self):\n \n centers = [[] for i in range(len(self.__clusters))];\n \n for index in range(len(self.__clusters)):\n point_sum = [0] * len(self.__pointer_data[0]);\n \n for index_point in self.__clusters[index]:\n point_sum = list_math_addition(point_sum, self.__pointer_data[index_point]);\n \n centers[index] = list_math_division_number(point_sum, len(self.__clusters[index]));\n \n return centers;",
"def aprobados_paginado(self, page, per_page):\n centros = Centro.query.filter(\n and_(Centro.estado == \"Aceptado\", Centro.activo == True)\n ).paginate(page=page, per_page=per_page, error_out=False)\n return centros",
"def imprimir_camino(padres,min_indice,min_ciudad,distancias):\n padre = padres[min_indice]\n distancia = distancias[min_indice]\n\n rta = [min_ciudad]\n\n prox = min_ciudad\n while prox != None:\n rta.append(padre[prox])\n prox = padre[prox]\n if rta[-1] == None: rta.pop()\n\n print(FLECHA.join(rta[::-1]))",
"def centers(self, sort=True):\n if sort:\n centers = sorted(map(list, self.clusters))\n else:\n centers = list(map(list, self.clusters))\n return centers",
"def get_datacenters_list():\n return util.get(abs_link=False)",
"def get_cluster_centers(self):\n pass",
"def getAllCenters(left, right):\n\tleftCenters = []\n\trightCenters = []\n\ti = 0\n\twhile(i < len(left)):\n\t\tcentersLeft = getCenters(cv2.imread(left[i], 0))\n\t\tcentersRight = getCenters(cv2.imread(right[i], 0))\n\t\tleftCenters.append(centersLeft)\n\t\trightCenters.append(centersRight)\n\t\ti += 1\n\treturn (leftCenters, rightCenters)",
"def clusterizar_camaras():\n if connection.is_connected():\n cur = connection.cursor()\n q = \"SELECT * FROM CamarasTrafico;\"\n cur.execute(q)\n\n data = cur.fetchall()\n\n for d in data:\n\n id = d[0]\n longitud = d[2]\n latitud = d[3]\n\n cluster = coordenadas_a_cluster(longitud, latitud, modelo)\n\n sql = f'UPDATE CamarasTrafico SET cluster = {cluster} WHERE id_camara = {id};'\n\n cur.execute(sql)\n\n connection.commit()\n\n print(id, cluster)",
"def poblar_cluster():\n\n lista = modelo.cluster_centers_.tolist()\n\n if connection.is_connected():\n cur = connection.cursor()\n i = 0\n for l in lista:\n longitud = l[0]\n latitud = l[1]\n\n sql = f'UPDATE Cluster SET longitud={longitud}, latitud={latitud} WHERE id_cluster={i}'\n\n cur.execute(sql)\n\n connection.commit()\n i += 1",
"def usuarios_conectados():\n\n global my_user\n print(\"Actualizando clientes conectados.\")\n usuarios = api.get_AllUser()\n lista_usarios = []\n\n for user in usuarios:\n if user['Estado'] == '1':\n # Anadimos todos los users menos el propio.\n if user['Nombre'] != my_user:\n lista_usarios.append(user['Nombre'])\n\n if len(lista_usarios) == 0:\n lista_usarios = ['- Vacio -']\n\n return lista_usarios",
"def docker_service_cloud_list(self):\n dockerserverobjs = Cloudmeshdocker.objects()\n if len(dockerserverobjs) == 0:\n print(\"No cloud exist yet\")\n return\n\n print(\"Listing existing cloud..\")\n for server in dockerserverobjs:\n print(server.dockerserver)",
"def centre(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.center\", \r\n self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_list(val, float, float, float)",
"def elimina_todas_citas_ap_a(self,autor):\n for lista in self.citas:\n for dic in lista:\n for c, v in dic.items():\n if c == autor:\n self.citas.remove(lista)\n return True",
"def organizations():",
"def _updateCentroids(self) -> None:\n self.centroids_OLD = self.centroids_NEW[self.centroids_NEW[:, 2] >= 0, :2]\n self.centroids_NEW = None",
"def get_cluster_centers(self):\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
devuelve todos los centros aprobados y paginado
|
def aprobados_paginado(self, page, per_page):
centros = Centro.query.filter(
and_(Centro.estado == "Aceptado", Centro.activo == True)
).paginate(page=page, per_page=per_page, error_out=False)
return centros
|
[
"def listaProyectos_a_iniciar(self,page=1):\n try:\n proy = DBSession.query(Proyecto).filter_by(iniciado=False).order_by(Proyecto.id_proyecto)\n usuario = DBSession.query(Usuario).filter_by(nombre_usuario=request.identity['repoze.who.userid']).first()\n proyectos=[]\n for p in proy:\n if usuario.proyectos.count(p)==1:\n proyectos.append(p)\n currentPage = paginate.Page(proyectos, page, items_per_page=10)\n except SQLAlchemyError:\n flash(_(\"No se pudo acceder a Proyectos! SQLAlchemyError...\"), 'error')\n redirect(\"/admin\")\n except (AttributeError, NameError):\n flash(_(\"No se pudo acceder a Proyectos! Hay Problemas con el servidor...\"), 'error')\n redirect(\"/admin\")\n\n return dict(proyectos=currentPage.items, page='listaProyectos_a_iniciar', currentPage=currentPage)",
"def get_centers_information(self):\n self.driver.get(self.url)\n disagree_button = self.driver.find_element_by_id(\"didomi-notice-disagree-button\")\n disagree_button.click()\n time.sleep(0.5)\n\n for page_index in range(4):\n page_buttons = self.driver.find_elements_by_css_selector(\"a.seo-magical-link\")\n page_button = page_buttons[page_index]\n page_button.click()\n centers = self.driver.find_elements_by_css_selector(\"div.dl-search-result\")\n for center in centers:\n try: # Pharmacie pas encore prise en compte, format de \"informations\" different\n center_title = center.find_element_by_css_selector(\"div.dl-search-result-title\")\n center_calendar = center.find_element_by_css_selector(\"div.dl-search-result-calendar\")\n center_informations = center.find_element_by_css_selector(\"div.dl-search-result-content\")\n center_url = center.find_element_by_css_selector(\"a.dl-button-primary.dl-button.js-search-result-path\").get_attribute(\"href\")\n t_calendar = time.time() + TIME_OUT\n while not center_calendar.text:\n center_calendar = center.find_element_by_css_selector(\"div.dl-search-result-calendar\")\n center_calendar.location_once_scrolled_into_view # returns dict of X, Y coordinates\n if time.time() > t_calendar:\n break\n while not center_informations.text:\n center_informations = center.find_element_by_css_selector(\"div.dl-search-result-specialities\")\n center.informations.location_once_scrolle_into_view\n \n center_name, center_type = self.format_title(center_title.text)\n calendar = self.format_date(center_calendar.text)\n location, vaccination_rooms = self.format_informations(center_informations.text)\n \n self.centers_dict[center_name] = {\n \"type\": center_type,\n \"vaccination rooms\": vaccination_rooms,\n \"calendar\": calendar,\n \"location\": location,\n \"url\": center_url,\n }\n except:\n pass\n \n self.driver.quit()",
"def retrieve_cursantes(self, token, carrera, anio=None):\n headers = self.get_headers(token)\n url = app.config['CURSANTES_URL'].format(carrera) \n response = requests.get(url + str(anio) + '/' if anio else url, headers=headers)\n if response.status_code == 200:\n return response.text\n else:\n return []",
"def ls(self, prefix='', offset=0, limit=20):\n if not prefix:\n prefix = ''\n\n params = {\n 'action': 'query',\n 'list': 'allpages',\n 'apprefix': prefix,\n 'aplimit': limit + 1\n }\n params.update(DEFAULT_PARAMS)\n resp = json.loads(self.opener.open(API_ROOT, urllib.urlencode(params)).read())\n page_json = resp['query']['allpages']\n page_list = [p['title'] for p in page_json]\n return page_list",
"def _get_all_pages(self, path, params):\n merged_pages = []\n skip = 0\n while 1:\n params['$skip'] = skip\n body = self._transport.get(path, params)\n merged_pages.extend(body)\n if len(body) < self.MAX_PAGE_LENGTH:\n break\n skip += self.MAX_PAGE_LENGTH\n return merged_pages",
"def do_pagination(self, request, queryset):\n limit_max = getattr(settings, 'WAGTAILAPI_LIMIT_MAX', 20)\n\n try:\n offset = int(request.GET.get('offset', 0))\n assert offset >= 0\n except (ValueError, AssertionError):\n raise BadRequestError(\"offset must be a positive integer\")\n\n try:\n limit = int(request.GET.get('limit', min(20, limit_max)))\n\n if limit > limit_max:\n raise BadRequestError(\"limit cannot be higher than %d\" % limit_max)\n\n assert limit >= 0\n except (ValueError, AssertionError):\n raise BadRequestError(\"limit must be a positive integer\")\n\n start = offset\n stop = offset + limit\n\n return queryset[start:stop]",
"def products_organic(request):\n products = Product.objects.filter(category=\"4\")\n paginator = Paginator(products, 24)\n\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(request, 'products_organic.html', {'products': products,'page_obj': page_obj})",
"def recargarDominiosDenegados(self):\n modulo_logger.log(logging.DEBUG, \"Recargando dominios denegados\")\n self.dominios_denegados = []\n respuesta = self.cursor.execute(\n 'select url from dominios_denegados where usuario=?', (self.id, )\n ).fetchall()\n for fila in respuesta:\n self.dominios_denegados.append(fila[0])",
"def paginate(docs, per_page=10):\n return [docs[i: i + per_page] for i in range(0, len(docs), per_page)]",
"def paging_results(self):\n\n return 10",
"def total_de_paginas(self):\n if len(self.documentos):\n return ceil(\n self.dados.get(\"num_docs\", 0)\n / self.quantidade_documentos_por_pagina\n )\n return 0",
"def __paginate_x_offsets(self, colsperpage):\n #fix soon. should not use .level\n liloffset = self.report_opts.littleoffset\n x_page_offsets = {0:0} #change me to [] ???\n for box in self.boxes:\n x_index = box.level[0]\n x_page = x_index // colsperpage\n if x_page not in x_page_offsets and x_index % colsperpage == 0:\n x_page_offsets[x_page] = box.x_cm - liloffset\n if x_page >= self.x_pages:\n self.x_pages = x_page+1\n return x_page_offsets",
"def __pago_retrasado(self):\n\n # Disminuir en un mes los pagos requeridos para dar un periodo de gracia\n # debido a retrasos o errores en el sistema, incluyendo aquellos\n # ocasionados por instituciones externas\n monto_proyectado = self.payment * (self.prediccion_pagos_actuales() - 1)\n return monto_proyectado - self.pagado()",
"def all_products(request):\n products = Product.objects.all()\n paginator = Paginator(products, 24)\n\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(request, 'products.html', {'page_obj': page_obj, 'products': products})",
"def recargarDominiosPublicamenteDenegados(self):\n modulo_logger.log(logging.DEBUG,\n \"Recargando dominios publicamente denegados\")\n conexion = sqlite3.connect(config.PATH_DB)\n cursor = conexion.cursor()\n self.dominios_publicamente_denegados = []\n respuesta = cursor.execute(\n 'select url from dominios_publicamente_denegados'\n ).fetchall()\n for fila in respuesta:\n self.dominios_publicamente_denegados.append(fila[0])\n conexion.close()",
"def recargarDominiosPermitidos(self):\n modulo_logger.log(logging.DEBUG, \"Recargando dominios permitidos\")\n self.dominios_permitidos = []\n respuesta = self.cursor.execute(\n 'select url from dominios_permitidos where usuario=?', (self.id, )\n ).fetchall()\n for fila in respuesta:\n self.dominios_permitidos.append(fila[0])",
"def paginate(self):\n return self._paginate",
"def parse_overview_page1(self, response):\n\t\tcomm = response.meta['comm'] # the private/commercial indicator\n\t\t#cityid = response.meta['cityid'] # the id of the city of which we look for the ads (as string)\n\t\t# find the number of pages in total and open all other pages from 1,...,last page\n\t\tif len(response.xpath('//li[@class=\"pageno\"]/a[@class=\"nothing\"]/strong')) > 1:\n\t\t\tnumpages = int(response.xpath('//li[@class=\"pageno\"]/a[@class=\"nothing\"]/strong[2]/text()').extract()[0])\n\t\t\tfor pageno in xrange(1,numpages+1):\n\t\t\t\t# we have to re-post our form for the filter settings\n\t\t\t\t#request = FormRequest.from_response(response, formdata={'classtype': 'of', 'comm': str(comm), 'pageno': str(pageno), 'cityid': cityid},\n\t\t\t\t#\t\t\t\t\t\t\t\t\tcallback=self.parse_overview_page2)\n\t\t\t\trequest = FormRequest.from_response(response, formdata={'classtype': 'of', 'comm': str(comm), 'pageno': str(pageno)},\n\t\t\t\t\t\t\t\t\t\t\t\t\tcallback=self.parse_overview_page2)\n\t\t\t\trequest.meta['comm'] = comm\n\t\t\t\tyield request\n\t\t\t\t# find the immoscout ads for this site\n\t\t\t\trequest = scrapy.Request('http://www.quoka.de/qs/qpc/xmlSearch.php?search=&view=quoka&platform=desktop&catid=27_2710&maxresults=20&page=' +str(pageno)+\n\t\t\t\t\t\t\t\t\t\t'&output=json&oe=UTF-8', callback=self.parse_immoscout)\n\t\t\t\trequest.meta['comm'] = comm\n\t\t\t\tyield request\n\t\telse:\n\t\t\t# in this case there is no \"Seite 1 von n\", so we simply scrape this page\n\t\t\trequest = scrapy.Request(response.url, callback=self.parse_overview_page2)\n\t\t\trequest.meta['comm'] = comm\n\t\t\tyield request",
"def paginate(self, paginate):\n self._paginate = paginate"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
valida que el centro no exista antes de agregarlo a la bd
|
def validate_centro_creation(self, nombre, direccion, municipio):
centro = Centro.query.filter(
and_(
and_(Centro.municipio == municipio, Centro.direccion == direccion),
Centro.nombre == nombre,
)
).first()
return centro
|
[
"def check_db_entry(self):\n raise NotImplementedError",
"def testCampoNombreLineaBase(self):\n try:\n self.linea_base.nombre = None\n self.DBSession.flush()\n except IntegrityError:\n pass\n else:\n self.fail(\"Se esperaba un IntegrityError! Verificar...\")",
"def _check_legal_index(self, row, col):\n return 0 <= row and row < self._size and\\\n 0 <= col and col < self._size",
"def build_inicial(self,TARGET,build):\r\n if build==\"tablespace_dados\":\r\n try:\r\n cursor = self.new_cursor_dbmon()\r\n cursor.execute(\"insert into ora_h_tablespace select * from ora_tablespace where target=:vTARGET\",vTARGET=TARGET)\r\n self.conn.commit()\r\n cursor.execute(\"delete from ora_tablespace where target=:vTARGET\",vTARGET=TARGET)\r\n self.conn.commit()\r\n except cx_Oracle.DatabaseError as e:\r\n error, = e.args\r\n #Gravando erro target,alarme,descricao\r\n print(\"[!!! ERRO !!!][db_dbmon][processa_tablespace][\" + TARGET + \"] \" + str(error.code) + str(error) )\r\n return(0)",
"def CheckFirstTime(self, data):\n cursor = self._conn.cursor()\n sqli = \"\"\"select * from attendance where cedula = %s and fecha = %s and hora = %s and estado = %s\"\"\"\n #logging.info(\"Ejecutando query %s\" % sqli)\n #logging.info(\"datos %s,%s,%s, %s\" % data)\n try:\n cursor.execute(sqli, data)\n self._conn.commit()\n except:\n self._conn.rollback()\n cursor.execute(sqli, data)\n self._conn.commit()\n \n result = cursor.fetchall()\n\n if result:\n #logging.info(\"el dato ya existe en la base de datos %s, %s, %s, %s\" % data)\n return False\n else:\n #logging.info(\"primera instancia del dato en la base de datos %s, %s, %s, %s\" % data)\n return True",
"def validate_tipo_centro(tipo):\n if tipo == \"0\":\n flash(\"No ha seleccionado un tipo de centro válido\")\n return False\n else:\n return True",
"def ExisteRelacion(self,dr,usuario):\n bRetorno=False\n query=db.GqlQuery(\"select * from Relacion where usuario=:1 and doctor=:2\",usuario, dr)\n if query.count()>0:\n bRetorno=1\n else: #no existe, entonces valido que NO sea un dr\n instanciaDr=ValidoDoctor()\n if instanciaDr.ExisteDr(usuario.usuario):#es un Dr. no se puede grabar ese tipo de relaciones, es pecado\n bRetorno=True\n return bRetorno",
"def check_room_exists(self, row, col):\n return 0 <= row < self.__rowCount and 0 <= col < self.__colCount",
"def testJotdNotEmpty(self):\n curs.execute(\"select count(*) from jotd;\")\n messageCt = curs.fetchone()[0]\n self.assertGreater(messageCt, 0, \"jotd Table is empty\")",
"def test_insert_counselor__empty(self):\n self.cmd.insert_counselor({})\n objs = CounselingAgency.objects.all()\n self.assertTrue(len(objs) == 0)",
"def check_db(self):\n if not os.path.exists(self.db_base_path):\n raise DatabaseDoesNotExist",
"def validate_table(self, table):\n print(\"Check if {} table is available in the database ...\".format(table))\n try:\n self.execute_psql_command('''SELECT * FROM {};'''.format(table), False)\n except psycopg2.Error:\n if table == self._db_data_table:\n print(\"{} table is missing from database\".format(table))\n self.table_creation(table)",
"def test_missingDay(self):\n with self.settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok'):\n spot = Spot.objects.create(name='testing hours')\n has_error = False\n try:\n hours = SpotAvailableHours.objects.create(spot=spot, start_time=\"01:30\", end_time=\"02:30\")\n except:\n has_error = True\n\n self.assertEquals(has_error, True, \"Doesn't allow hours to be stored without a day\")",
"def has_valid_barcode(cursor):\n\n query = \"SELECT COUNT(item_barcode) FROM fact_spl_book_checkout WHERE LENGTH(item_barcode) != 13\"\n cursor.execute(query)\n row = cursor.fetchone()\n\n assert row[0] == 0, \"Item barcode should be 13 digits long\"",
"def _check_for_emptyroom(self,ckb,pdf):\n if self.emptyroom:\n if pdf.get(\"emptyroom\"):\n ckb.SetForegroundColour(self._emptyroom_colour)\n ckb.SetToolTip(ckb.GetToolTip().GetTip() + \"\\n maked as <empty room> file\")\n return True\n ckb.SetForegroundColour(wx.BLACK)\n return False",
"def find_center(self):\n return False",
"def test_missingStart(self):\n with self.settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok'):\n spot = Spot.objects.create(name='testing hours')\n has_error = False\n try:\n hours = SpotAvailableHours.objects.create(spot=spot, day=\"m\", end_time=\"01:30\")\n except:\n has_error = True\n\n self.assertEquals(has_error, True, \"Doesn't allow hours to be stored without a start time\")",
"def test_find_by_reg_id_invalid(session):\n collateral = VehicleCollateral.find_by_registration_id(300000000)\n assert not collateral",
"def _checkFdbEntryMiss(self):\n result = self.duthost.command(\"show mac\")\n out = result['stdout']\n pytest_assert(self.arp_entry[self.dst_ip].lower() not in out.lower(), \"{} present in FDB\"\n .format(self.arp_entry[self.dst_ip]))\n logger.info(\"'{}' not present in fdb as expected\".format(self.arp_entry[self.dst_ip]))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
modifica el centro a publicado o despublicado boolean
|
def update_publicado(self, centro_id, publicado):
centro = Centro().find_by_id(centro_id)
if publicado == "True":
centro.publicado = True
else:
centro.publicado = False
db.session.commit()
|
[
"def IsPublic(self) -> bool:",
"def IsNotPublic(self) -> bool:",
"def find_center(self):\n return False",
"def idudeddvivacanampragrhyam(self):\n self.Pragrhya = False\n # PMS: 1.1.11. IdUdeddvivacanam pragfhyam\n if self.External and self.Pada1 in pragrhya_set:\n self.Pragrhya = True",
"def is_public(self):\n\n return (self.status == STATUS_PUBLISHED and \\\n self.pub_date <= datetime.utcnow())",
"def dominioPublicamentePermitido(self, url):\n try:\n dominio = url.split('/')[2]\n return dominio in self.dominios_publicamente_permitidos\n except:\n modulo_logger.log(logging.ERROR,\n \"Error al tratar de obtener el dominio desde la url: %s\" % url)\n return True",
"def isCenterSet(self) -> \"SbBool\":\n return _coin.SoBoundingBoxCache_isCenterSet(self)",
"def _start_point_is_set(self):\n if self.circle_profile.center.x == 0:\n return False\n else:\n return True",
"def runs_pubd(self):\n return self.is_root or not (self.is_hosted or only_one_pubd)",
"def set_publicity(obj, public):\n if public: obj.Acl().put(ACL='public-read')\n else: obj.Acl().put(ACL='private')",
"def isCenterSet(self) -> \"SbBool\":\n return _coin.SoGetBoundingBoxAction_isCenterSet(self)",
"def is_hosted(self):\n return self.hosted_by is not None",
"def isPublic(self, *args) -> \"SbBool\":\n return _coin.SoNodekitCatalog_isPublic(self, *args)",
"def is_public(self):\n return isinstance(self._key, Public) and not isinstance(self._key, Private)",
"def dominioPublicamenteDenegado(self, url):\n try:\n dominio = url.split('/')[2]\n return dominio in self.dominios_publicamente_denegados\n except:\n modulo_logger.log(logging.ERROR,\n \"Error al tratar de obtener el dominio desde la url: %s\" % url)\n return False",
"def is_public(self):\n return self.get_published_images().exists()",
"def internal(self):\n return self.remote == site_config.params.INTERNAL_REMOTE",
"def cmd_papublic(self, data, client, cmd=None):\n if not data or data not in ('on', 'off'):\n client.message('^7Invalid or missing data, try !help papublic')\n return\n\n if data == 'on':\n self.console.setCvar('g_password', '')\n self.console.say('^7public mode: ^2ON')\n self.console.queueEvent(self.console.getEvent('EVT_CLIENT_PUBLIC', '', client))\n\n elif data == 'off':\n newpassword = self._papublic_password\n if self._pass_lines is not None:\n i = random.randint(0, len(self._pass_lines) - 1)\n newpassword = self._pass_lines[i]\n\n for i in range(0, self._randnum):\n newpassword += str(random.randint(1, 9))\n\n self.debug('private password set to: %s' % newpassword)\n\n if newpassword is None:\n client.message('^1ERROR: ^7could not set public mode off because \\\n there is no password specified in the config file')\n return\n\n self.console.setCvar('g_password', '%s' % newpassword)\n self.console.say('^7public mode: ^1OFF')\n client.message('^7password is \\'^4%s^7\\'' % newpassword)\n client.message('^7type ^5!mapreload^7 to apply change')\n self.console.write('bigtext \"^7Server going ^3PRIVATE ^7soon!!\"')\n self.console.queueEvent(self.console.getEvent('EVT_CLIENT_PUBLIC', newpassword, client))",
"def isPhysical(self,uid):\n return( self.id2node[uid].group==\"Physical\" )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
elimina un centro pasandolo a activo=False
|
def eliminar(self, id):
turno = Turnos().turno_centro(id)
for x in turno:
x.estado = "CANCELADO"
centro = Centro().find_by_id(id)
centro.activo = False
db.session.commit()
return centro
|
[
"def eliminar_pieza(self, cantidad_a_eliminar):\n pass",
"def suppressionfich(self):\n\n os.remove(self.chfich)",
"def removerRoupa(self):\n\n id_roupa = super().validarInteiro(self=Cadastro, mensagem=\"Digite o id da roupa a ser pesquisada: \")\n \n super().executarQuery(self=Cadastro, query=\"DELETE FROM roupas WHERE id_roupa = {:d}\".format(id_roupa))",
"def elimina_squadra(self, event):\n self.Disable()\n ViewEliminaSquadra(parent=self, title='Elimina Squadra')",
"def eliminar(self):\r\n #muestra los reistros actuales\r\n resgistros_estatus=self.obj_conexion.mostrar_registros()\r\n if resgistros_estatus:\r\n print(Fore.GREEN+\" Lista de registros actuales\"+Fore.RESET)\r\n print(Fore.LIGHTMAGENTA_EX+str(self.obj_conexion.mostrar_registros())+Fore.RESET)\r\n #pedirle al usuario que ingrese el nombre del equipo a eliminar\r\n nombre=input(Fore.YELLOW+\"Ingresa el nombre del equipo a eliminar: \"+Fore.RESET)\r\n #llama a la funcion eliminar_registro de la clase conexion_equipos\r\n estatus=self.obj_conexion.eliminar_registro(nombre)\r\n #si el estatus es true\r\n if estatus:\r\n print(Fore.GREEN+\" Registro eliminado correctamente\\n\"+Fore.RESET)\r\n else:\r\n print(Fore.WHITE,Back.RED+\" Registro no eliminado, no se encontro coeincidencias con lo ingresado\"+Fore.RESET,Back.RESET)\r\n else:\r\n print(Fore.WHITE,Back.RED+\" No hay registros en el base de datos, Debe agregar un registro primero\"+Fore.RESET,Back.RESET)\r\n opcion_valida=input(Fore.YELLOW+\" Desea crear un nuevo registro? (S/N): \"+Fore.RESET)\r\n if opcion_valida.upper() in [\"S\"]:\r\n self.registra()\r\n else:\r\n pass",
"def delete_lega(self, event):\n self.Disable()\n ViewDeleteLega(parent=self, title='Delete Lega')",
"def desactiver(self):\n self.est_activee = False",
"def removeElement(self):",
"def elimina(self):\n\tindex =self.ui.grilla.currentIndex()\n\tdata = self.ui.grilla.model()\n\tpeli = data.item(index.row(),0).peli\n\tiD = str(peli['id'])\n\tresp = QtGui.QMessageBox.question(self, \"Pregunta\",\"Desea realmente eliminar la pelicula seleccionada?\",QtGui.QMessageBox.Ok,QtGui.QMessageBox.No)\n if resp == QtGui.QMessageBox.Ok:\n Modelo_pelicula.borrar(iD);\n self.cargar_peliculas();",
"def pedir_etiqueta_a_eliminar():\r\n #Pedir el nombre de la etiqueta\r\n diccionario = guardar_en_diccionarios.guardar_en_diccionarios()\r\n etiqueta = input(\"Ingrese el nombre de la nota que desea eliminar: \")\r\n if etiqueta == \"\":\r\n etiqueta = \"Sin etiqueta\"\r\n etiqueta = validar_etiqueta(etiqueta)\r\n #Mostrarle al usuario las opciones a borrar\r\n for i,elem in enumerate(diccionario[etiqueta]):\r\n print(\"{}-{}\".format(i+1,elem))\r\n cantidad = len(diccionario[etiqueta])\r\n #Preguntar qué opciones quiere borrar e imprimirla\r\n texto_a_borrar = input(\"Ingrese el número de la opción que desea borrar: \")\r\n texto_a_borrar = validar_numero(texto_a_borrar,cantidad)\r\n print(\"[{}]\".format(etiqueta))\r\n print(\"-{}\".format(diccionario[etiqueta][int(texto_a_borrar)-1]))\r\n eliminar = input(\"Eliminar [s/n]: \")\r\n #Llamar función para que elimine el valor\r\n eliminar_etiqueta(etiqueta,texto_a_borrar,eliminar)",
"def delete(self, conn: Conexao, comandoPersonalizado: ComandoPersonalizado):",
"def delete_ride():",
"def remove_piece(s, (x,y)):\n\t\ts.matrix[x][y].occupant = None",
"def debe_eliminarse(self, item):\n imagen_item = conf.PAREJAS_X_ITEMS[self.nombre_imagen_grande]\n return item.nombre_imagen in (imagen_item, conf.PISTOLA)",
"def remove_entry(self):\n print('Removes fstab entry ')",
"def destructionDuVaisseau(self):\n canva.delete(self.vaisseaux)\n self.vivant=False\n return 'perdu'",
"def delete_button(self):\n self.physics_object.clear_forces()\n self.window.physics_canvas.delete_physics_object(self.physics_object)\n self.del_win()",
"def delete(self):\n self.set_key(\"\")\n self.set_current(0, 0)\n #self.keys_list.getContext().getControl(\"btnAssign\").setEnable(True)",
"def remove(self):\n print(\"Remove tab\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
takes a queryset of regpersons and a search string returns a filtered queryset with filters acted upon core_ids
|
def search_core_ids(regpersons_queryset, search_string, as_of_date=None):
core_id_fields = ['national_id', 'birth_reg_id', 'workforce_id',
'beneficiary_id']
search_strings = tokenize_search_string(search_string)
q_filter = Q()
for search_string in search_strings:
for field in core_id_fields:
q_filter |= Q(**{"%s__icontains" % field: search_string})
results = regpersons_queryset.filter(q_filter)
results = as_of_date_filter(results, as_of_date=None)
# redundant just for documentation
return results
|
[
"def filter_persons(query, *, queryset=None):\n if queryset is None:\n queryset = models.Person.objects.all()\n\n if query:\n for token in query.strip().split(' '):\n queryset = queryset.filter(\n Q(last_name__icontains=token)\n | Q(first_name__icontains=token)\n | Q(middle_name__icontains=token))\n\n # Leave only full string matches\n normalized_query = _normalize(query)\n if ' ' in normalized_query:\n # TODO(artemtab): optimize this further. how?\n ids = [person.pk for person in queryset if\n normalized_query in _normalize(person.full_name)]\n queryset = models.Person.objects.filter(pk__in=ids)\n\n return queryset",
"def direct_field_search(queryset, field_names, search_string, as_of_date=None):\r\n # Split the string in case of first name, surname e.t.c\r\n search_strings = tokenize_search_string(search_string)\r\n q_filter = Q()\r\n for search_string in search_strings:\r\n for field in field_names:\r\n q_filter |= Q(**{\"%s__icontains\" % field: search_string})\r\n results = queryset.filter(q_filter)\r\n\r\n # results = as_of_date_filter(results, as_of_date=None)\r\n # redundant just for documentation\r\n # filter already applied on regpersons\r\n return results",
"def buscar(criterio):\n if criterio:\n p_criterio = criterio.split(\" \")\n qset = Q()\n for i in p_criterio:\n qset = qset & (\n Q(primer_apellido__icontains=i) | Q(segundo_apellido__icontains=i) | Q(\n primer_nombre__icontains=i) | Q(\n segundo_nombre__icontains=i) | Q(numero_documento__icontains=i))\n return Persona.objects.filter(qset).distinct()",
"def search(self):\n user = self.get_search_phrase()\n if self.requester.is_staff:\n res = TempCharge.objects.for_reseller(self.reseller).filter(user__first_name__icontains=user)\n if user:\n res = res.filter(user__first_name__icontains=user)\n else:\n res = TempCharge.objects.own(self.req)\n if user:\n res = res.filter(charger__first_name__icontains=user)\n return res",
"def get_queryset(self): # noqa: C901\n succinct = self.request.query_params.get('succinct')\n\n if succinct:\n queryset = Lease.objects.succinct_select_related_and_prefetch_related()\n else:\n queryset = Lease.objects.full_select_related_and_prefetch_related()\n\n if self.action != 'list':\n return queryset\n\n # Simple search\n identifier = self.request.query_params.get('identifier')\n search = self.request.query_params.get('search')\n\n if identifier is not None or search is not None:\n if search is None:\n search_string = identifier\n search_by_other = False\n else:\n search_string = search\n search_by_other = True\n\n looks_like_identifier = bool(re.match(r'[A-Z]\\d{4}-\\d+$', search_string.strip(), re.IGNORECASE))\n\n # Search by identifier or parts of it\n if len(search_string) < 3:\n identifier_q = Q(identifier__type__identifier__istartswith=search_string)\n elif len(search_string) == 3:\n identifier_q = Q(identifier__type__identifier__iexact=search_string[:2],\n identifier__municipality__identifier=search_string[2:3])\n elif len(search_string) < 7:\n district_identifier = search_string[3:5]\n if district_identifier == '0':\n identifier_q = Q(identifier__type__identifier__iexact=search_string[:2],\n identifier__municipality__identifier=search_string[2:3],\n identifier__district__identifier__in=range(0, 10))\n else:\n if district_identifier == '00':\n district_identifier = '0'\n else:\n district_identifier = district_identifier.lstrip('0')\n\n identifier_q = Q(identifier__type__identifier__iexact=search_string[:2],\n identifier__municipality__identifier=search_string[2:3],\n identifier__district__identifier__startswith=district_identifier)\n elif looks_like_identifier:\n district_identifier = search_string[3:5]\n if district_identifier == '00':\n district_identifier = '0'\n else:\n district_identifier = district_identifier.lstrip('0')\n\n identifier_q = Q(identifier__type__identifier__iexact=search_string[:2],\n identifier__municipality__identifier=search_string[2:3],\n identifier__district__identifier=district_identifier,\n identifier__sequence__startswith=search_string[6:])\n else:\n identifier_q = Q()\n\n other_q = Q()\n\n # Search also by other fields if the search string is clearly not a lease identifier\n if search_by_other and not looks_like_identifier:\n # Address\n other_q |= Q(lease_areas__addresses__address__icontains=search_string)\n\n # Property identifier\n other_q |= Q(lease_areas__identifier__icontains=search_string)\n normalized_identifier = normalize_property_identifier(search_string)\n if search_string != normalized_identifier:\n other_q |= Q(lease_areas__identifier__icontains=normalized_identifier)\n\n # Tenantcontact name\n other_q |= Q(tenants__tenantcontact__contact__name__icontains=search_string)\n\n if ' ' in search_string:\n tenant_name_parts = search_string.split(' ', 2)\n other_q |= (\n Q(tenants__tenantcontact__contact__first_name__icontains=tenant_name_parts[0]) &\n Q(tenants__tenantcontact__contact__last_name__icontains=tenant_name_parts[1])\n )\n other_q |= (\n Q(tenants__tenantcontact__contact__first_name__icontains=tenant_name_parts[1]) &\n Q(tenants__tenantcontact__contact__last_name__icontains=tenant_name_parts[0])\n )\n else:\n other_q |= Q(tenants__tenantcontact__contact__first_name__icontains=search_string)\n other_q |= Q(tenants__tenantcontact__contact__last_name__icontains=search_string)\n\n # Lessor\n other_q |= Q(lessor__name__icontains=search_string)\n other_q |= Q(lessor__first_name__icontains=search_string)\n other_q |= Q(lessor__last_name__icontains=search_string)\n\n # Date\n try:\n search_date = parse(search_string, parserinfo=parserinfo(dayfirst=True))\n if search_date:\n other_q |= Q(start_date=search_date.date())\n other_q |= Q(end_date=search_date.date())\n except ValueError:\n pass\n\n queryset = queryset.filter(identifier_q | other_q)\n\n # Advanced search\n search_form = LeaseSearchForm(self.request.query_params)\n\n if search_form.is_valid():\n if search_form.cleaned_data.get('tenant_name'):\n tenant_name = search_form.cleaned_data.get('tenant_name')\n\n # Tenantcontact name\n q = Q(tenants__tenantcontact__contact__name__icontains=tenant_name)\n\n if ' ' in tenant_name:\n tenant_name_parts = tenant_name.split(' ', 2)\n q |= (\n Q(tenants__tenantcontact__contact__first_name__icontains=tenant_name_parts[0]) &\n Q(tenants__tenantcontact__contact__last_name__icontains=tenant_name_parts[1])\n )\n q |= (\n Q(tenants__tenantcontact__contact__first_name__icontains=tenant_name_parts[1]) &\n Q(tenants__tenantcontact__contact__last_name__icontains=tenant_name_parts[0])\n )\n else:\n q |= Q(tenants__tenantcontact__contact__first_name__icontains=tenant_name)\n q |= Q(tenants__tenantcontact__contact__last_name__icontains=tenant_name)\n\n if search_form.cleaned_data.get('tenantcontact_type'):\n q &= Q(tenants__tenantcontact__type__in=search_form.cleaned_data.get(\n 'tenantcontact_type'))\n\n if search_form.cleaned_data.get('only_past_tenants'):\n q &= Q(tenants__tenantcontact__end_date__lte=datetime.date.today())\n\n if search_form.cleaned_data.get('tenant_activity'):\n if search_form.cleaned_data.get('tenant_activity') == 'past':\n q &= Q(tenants__tenantcontact__end_date__lte=datetime.date.today())\n\n if search_form.cleaned_data.get('tenant_activity') == 'active':\n # No need to filter by start date because future start dates are also considered active\n q &= (\n Q(tenants__tenantcontact__end_date=None) |\n Q(tenants__tenantcontact__end_date__gte=datetime.date.today())\n )\n\n queryset = queryset.filter(q)\n\n if search_form.cleaned_data.get('sequence'):\n queryset = queryset.filter(identifier__sequence=search_form.cleaned_data.get('sequence'))\n\n if search_form.cleaned_data.get('lease_start_date_start'):\n queryset = queryset.filter(start_date__gte=search_form.cleaned_data.get('lease_start_date_start'))\n\n if search_form.cleaned_data.get('lease_start_date_end'):\n queryset = queryset.filter(start_date__lte=search_form.cleaned_data.get('lease_start_date_end'))\n\n if search_form.cleaned_data.get('lease_end_date_start'):\n queryset = queryset.filter(end_date__gte=search_form.cleaned_data.get('lease_end_date_start'))\n\n if search_form.cleaned_data.get('lease_end_date_end'):\n queryset = queryset.filter(end_date__lte=search_form.cleaned_data.get('lease_end_date_end'))\n\n # Filter by active / expired only when only one of the options is set\n if bool(search_form.cleaned_data.get('only_active_leases')) ^ bool(\n search_form.cleaned_data.get('only_expired_leases')):\n if search_form.cleaned_data.get('only_active_leases'):\n # No need to filter by start date because future start dates are also considered active\n queryset = queryset.filter(Q(end_date__isnull=True) | Q(end_date__gte=datetime.date.today()))\n\n if search_form.cleaned_data.get('only_expired_leases'):\n queryset = queryset.filter(end_date__lte=datetime.date.today())\n\n if 'has_geometry' in search_form.cleaned_data:\n if search_form.cleaned_data.get('has_geometry') is True:\n queryset = queryset.filter(lease_areas__geometry__isnull=False)\n\n if search_form.cleaned_data.get('has_geometry') is False:\n queryset = queryset.filter(lease_areas__geometry__isnull=True)\n\n if search_form.cleaned_data.get('property_identifier'):\n property_identifier = search_form.cleaned_data.get('property_identifier')\n normalized_identifier = normalize_property_identifier(property_identifier)\n\n queryset = queryset.filter(\n Q(lease_areas__identifier__icontains=property_identifier) | Q(\n lease_areas__identifier__icontains=normalized_identifier)\n )\n\n if search_form.cleaned_data.get('address'):\n queryset = queryset.filter(\n lease_areas__addresses__address__icontains=search_form.cleaned_data.get('address'))\n\n if search_form.cleaned_data.get('lease_state'):\n queryset = queryset.filter(state__in=search_form.cleaned_data.get('lease_state'))\n\n if search_form.cleaned_data.get('business_id'):\n queryset = queryset.filter(\n tenants__tenantcontact__contact__business_id__icontains=search_form.cleaned_data.get('business_id'))\n\n if search_form.cleaned_data.get('national_identification_number'):\n nat_id = search_form.cleaned_data.get('national_identification_number')\n queryset = queryset.filter(\n tenants__tenantcontact__contact__national_identification_number__icontains=nat_id)\n\n if search_form.cleaned_data.get('lessor'):\n queryset = queryset.filter(lessor=search_form.cleaned_data.get('lessor'))\n\n if search_form.cleaned_data.get('contract_number'):\n queryset = queryset.filter(contracts__contract_number__icontains=search_form.cleaned_data.get(\n 'contract_number'))\n\n if search_form.cleaned_data.get('decision_maker'):\n queryset = queryset.filter(decisions__decision_maker=search_form.cleaned_data.get(\n 'decision_maker'))\n\n if search_form.cleaned_data.get('decision_date'):\n queryset = queryset.filter(decisions__decision_date=search_form.cleaned_data.get(\n 'decision_date'))\n\n if search_form.cleaned_data.get('decision_section'):\n queryset = queryset.filter(decisions__section=search_form.cleaned_data.get(\n 'decision_section'))\n\n if search_form.cleaned_data.get('reference_number'):\n reference_number = search_form.cleaned_data.get('reference_number')\n queryset = queryset.filter(Q(reference_number__icontains=reference_number) | Q(\n decisions__reference_number__icontains=reference_number))\n\n if search_form.cleaned_data.get('invoice_number'):\n queryset = queryset.filter(\n invoices__number__icontains=search_form.cleaned_data.get('invoice_number'))\n\n return queryset.distinct()",
"def _filter_commaseparated_field(self, field, values, queryset):\n field_query = \"%s__icontains\" % field\n filters = map(lambda v: Q(**{field_query: v}), values)\n filters = reduce(operator.or_, filters, Q())\n return queryset.filter(filters)",
"def _filter_by_multiple_fields(self, queryset, fields, value):\n\n query = Q()\n for field in fields:\n query |= Q(**{field + \"__icontains\": value})\n\n return queryset.filter(query).distinct()",
"def search_with_filtering():\n\n print \"SEARCH WITH FILTERING\"\n\n studp20 = [{'field':\"long_name\", 'comp':\"icontains\", 'value':\"20 Studiepoeng\"}]\n studp10 = [{'field':\"long_name\", 'comp':\"icontains\", 'value':\"10 Studiepoeng\"}]\n\n print \"Filter long_name contains 20 studiepoeng\"\n for subject in SimplifiedSubject.search(logincookie, orderby=['-short_name'],\n filters=studp20)['items']:\n print ' ', subject['short_name'], ':', subject['long_name']\n\n print \"Filter long_name contains 10 studiepoeng\"\n for subject in SimplifiedSubject.search(logincookie, orderby=['-short_name'],\n filters=studp10)['items']:\n print ' ', subject['short_name'], ':', subject['long_name']",
"def search(query, model):\n query = query.strip()\n LOGGER.debug(query)\n sqs = SearchQuerySet()\n results = sqs.raw_search(\"{}*\".format(query)).models(model)\n if not results:\n results = sqs.raw_search(\"*{}\".format(query)).models(model)\n if not results:\n results = sqs.raw_search(\"*{}*\".format(query)).models(model)\n\n return [o.pk for o in results]",
"def search_for_people(string):\n list = Member.objects.filter(Q(user__first_name__icontains=string)\n | Q(user__last_name__icontains=string))\n return create_dictionary(list)",
"def search(request):\n term = request.GET.get('term', '')\n if not term:\n raise Http404\n\n workgroups = Workgroup.objects.filter(\n make_megafilter(Workgroup, term))[:15]\n workgroups = [{\n 'label': str(workgroup),\n 'pk': workgroup.id} for workgroup in workgroups]\n return HttpResponse(json.dumps(workgroups))",
"def search_obj(request):\n obj_type = request.GET.get('obj_type', '')\n term = request.GET.get('term', '')\n if not (obj_type and term):\n raise Http404\n\n Klass, FormKlass = get_klasses(obj_type)\n\n records = Klass.objects.filter(make_megafilter(Klass, term))[:15]\n records = [{'label': str(record), 'pk': record.pk} for record in records]\n\n return HttpResponse(json.dumps(records))",
"def get_persons(institution_id=None, production_id=None):\n args, conditions = [], []\n append_condition(institution_id, 'institutionId = ?', args, conditions)\n # __append_condition__(production_id, 'productionId = ?', args, conditions)\n if production_id is not None:\n print('Filtering persons by production id is not implemented yet.')\n # TODO: Write another query that filters by participation in productions.\n else:\n pass # TODO: Push the normal query under here.\n dbc.execute('''SELECT * FROM Persons ''' + SQLPrepare.where_and(conditions) + ';', args)\n return convert_query(dbc, ['id', 'fName', 'lName', 'institutionId', 'deleted', 'lastUpdated', 'icon'])",
"def filter_companies(request):\n #\n # ~Q matches everything\n #\n q = Q()\n\n if request.GET.get('tld', None):\n q = (q & Q(tld=request.GET['tld']))\n\n if request.GET.get('size', None):\n q = (q & Q(empcnt=request.GET['size']))\n\n if request.GET.get('name', None):\n q = (q & Q(name__icontains=request.GET['name']))\n\n if request.GET.get('vacation', None):\n days = int(request.GET['vacation'])\n q = (q & Q(vacationaccrual__year__exact=1) & \\\n Q(vacationaccrual__days__gte=days) & \\\n Q(vacationaccrual__days__lt=days + 5))\n\n query_set = Company.objects.filter(q)\n\n #\n # The following filters are complex because we have to match on multiple values. So \n # if we're filtering on tags #highered and #education that Count(companytag) should\n # be equal to 2.\n #\n # Reference: \n # http://stackoverflow.com/questions/10067171/check-for-multiple-values-in-a-m2m-relationship-in-django\n #\n if request.GET.get('tags', None):\n tags = request.GET.get('tags').split()\n for tag in tags:\n query_set = query_set.filter(companytag__tag__name__in=[tag])\n\n if request.GET.get('awards', None):\n awards = request.GET.get('awards').split()\n for award in awards:\n query_set = query_set.filter(companyaward__award__id__in=[award]).distinct()\n\n return query_set",
"def search_obj(request, get_klasses_fn):\n obj_type = request.GET.get('obj_type', '')\n term = request.GET.get('term', '')\n if not (obj_type and term):\n raise Http404\n\n Klass, FormKlass, FQDNFormKlass = get_klasses_fn(obj_type)\n\n records = Klass.objects.filter(make_megafilter(Klass, term))[:15]\n records = [{'label': str(record), 'pk': record.pk} for record in records]\n\n return HttpResponse(json.dumps(records))",
"def apply_search(self, queryset):\n description_ctx = {'title_filter': ''}\n\n self.form = self.form_class(self.request.GET)\n\n if not self.form.is_valid():\n self.description = self.description_template % description_ctx\n return queryset\n\n data = self.form.cleaned_data\n\n if data.get('name'):\n queryset = queryset.filter(\n name__icontains=data['name']).distinct()\n description_ctx['name_filter'] = _(\n \" including an item with title matching '%s'\") % data['name']\n\n self.description = self.description_template % description_ctx\n\n return queryset",
"def filter_by_search_fields(self, search_fields, operator='and'):\n where_clause, parameters = self._build_where_clause(search_fields, operator=operator)\n sql = f'select * from {self.get_table()} where {where_clause}'\n cursor = self._execute(sql, parameters=tuple(parameters))\n items = [self.get_dataclass()(**row) for row in cursor]\n return items",
"def filter_locations(self, queryset, name, values):\n locations = values.split(',')\n return queryset.filter(locations__short_name__in=locations).distinct()",
"def do_field_filtering(self, request, queryset):\n fields = set(self.get_api_fields(queryset.model)).union({'id'})\n\n for field_name, value in request.GET.items():\n if field_name in fields:\n field = getattr(queryset.model, field_name, None)\n\n if isinstance(field, _TaggableManager):\n for tag in value.split(','):\n queryset = queryset.filter(**{field_name + '__name': tag})\n\n # Stick a message on the queryset to indicate that tag filtering has been performed\n # This will let the do_search method know that it must raise an error as searching\n # and tag filtering at the same time is not supported\n queryset._filtered_by_tag = True\n else:\n queryset = queryset.filter(**{field_name: value})\n\n return queryset"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes a queryset and a list of field names that the search string can act on.
|
def direct_field_search(queryset, field_names, search_string, as_of_date=None):
# Split the string in case of first name, surname e.t.c
search_strings = tokenize_search_string(search_string)
q_filter = Q()
for search_string in search_strings:
for field in field_names:
q_filter |= Q(**{"%s__icontains" % field: search_string})
results = queryset.filter(q_filter)
# results = as_of_date_filter(results, as_of_date=None)
# redundant just for documentation
# filter already applied on regpersons
return results
|
[
"def _filter_by_multiple_fields(self, queryset, fields, value):\n\n query = Q()\n for field in fields:\n query |= Q(**{field + \"__icontains\": value})\n\n return queryset.filter(query).distinct()",
"def filter_in_string(queryset: QuerySet, field_name: str, values: list):\n if not values:\n return queryset\n # We check both content and length in order to return only exact matches\n icontains = f\"{field_name}__icontains\"\n condition = reduce(operator.or_, [Q(**{icontains: value}) for value in values])\n return queryset.filter(condition).all()",
"def _filter_commaseparated_field(self, field, values, queryset):\n field_query = \"%s__icontains\" % field\n filters = map(lambda v: Q(**{field_query: v}), values)\n filters = reduce(operator.or_, filters, Q())\n return queryset.filter(filters)",
"def do_field_filtering(self, request, queryset):\n fields = set(self.get_api_fields(queryset.model)).union({'id'})\n\n for field_name, value in request.GET.items():\n if field_name in fields:\n field = getattr(queryset.model, field_name, None)\n\n if isinstance(field, _TaggableManager):\n for tag in value.split(','):\n queryset = queryset.filter(**{field_name + '__name': tag})\n\n # Stick a message on the queryset to indicate that tag filtering has been performed\n # This will let the do_search method know that it must raise an error as searching\n # and tag filtering at the same time is not supported\n queryset._filtered_by_tag = True\n else:\n queryset = queryset.filter(**{field_name: value})\n\n return queryset",
"def select_fields(self, bundle, queryset):\n if hasattr(bundle.request, 'GET'):\n selectedFields = bundle.request.GET.get('fields')\n # If selectedFields has data turn it into a list\n if selectedFields:\n selectedFields = selectedFields.split(',')\n # only() to select only the requested fields\n # select_related() to generate SQL joins on OneToMany relations and call just one query\n # Django will take care of incorrect args (i.e. passing a non-relation field to select_related())\n queryset = queryset.select_related(*selectedFields).only(*selectedFields)\n return queryset",
"def apply_search(self, queryset):\n description_ctx = {'title_filter': ''}\n\n self.form = self.form_class(self.request.GET)\n\n if not self.form.is_valid():\n self.description = self.description_template % description_ctx\n return queryset\n\n data = self.form.cleaned_data\n\n if data.get('name'):\n queryset = queryset.filter(\n name__icontains=data['name']).distinct()\n description_ctx['name_filter'] = _(\n \" including an item with title matching '%s'\") % data['name']\n\n self.description = self.description_template % description_ctx\n\n return queryset",
"def list_search_fields(self):",
"def filter_by_search_fields(self, search_fields, operator='and'):\n where_clause, parameters = self._build_where_clause(search_fields, operator=operator)\n sql = f'select * from {self.get_table()} where {where_clause}'\n cursor = self._execute(sql, parameters=tuple(parameters))\n items = [self.get_dataclass()(**row) for row in cursor]\n return items",
"def search_fields(self, fields, page_start=1, page_end=None, number_results=None, order_by=[], **args):\r\n try:\r\n logger.debug('CommonDAO.searchFields :: pageStart: %s pageEnd: %s' % (page_start, page_end) )\r\n logger.debug('CommonDAO.searchFields :: numberResults: %s disablePaging: %s' % (number_results, args['disable_paging']) )\r\n if (args.has_key('disablePaging') and not args['disablePaging']) or not args.has_key('disablePaging'):\r\n iStart = (page_start-1)*number_results\r\n if page_end is None:\r\n iEnd = iStart+number_results\r\n else:\r\n iEnd = iStart + number_results*(page_end-page_start+1)\r\n logger.debug('CommonDAO.searchFields :: iStart: %s iEnd: %s' % (iStart, iEnd) )\r\n dbObj = self._processRelated()\r\n \"\"\"if len(orderBy) != 0:\r\n dbObj = self.model.objects.order_by(*orderBy)\"\"\"\r\n logger.debug( self._resolveDbName() )\r\n logger.debug('CommonDAO.searchFields :: args: %s' % (args) )\r\n if (args.has_key('disablePaging') and not args['disablePaging']) or not args.has_key('disablePaging'):\r\n logger.debug('CommonDAO.searchField:: iStart: %s iEnd: %s' % (iStart, iEnd) )\r\n if args.has_key('disable_paging'):\r\n del args['disable_paging']\r\n if len(order_by) == 0:\r\n xpList = dbObj.using(self._resolveDbName()).filter(**args)[iStart:iEnd].values_list(*fields)\r\n else:\r\n xpList = dbObj.using(self._resolveDbName()).filter(**args).order_by(*order_by)[iStart:iEnd].values_list(*fields)\r\n else:\r\n logger.debug('CommonDAO.searchField:: Have no paging, we get all the data...')\r\n if args.has_key('disable_paging'):\r\n del args['disable_paging'] \r\n if len(order_by) == 0:\r\n xpList = dbObj.using(self._resolveDbName()).filter(**args).values_list(*fields)\r\n else:\r\n xpList = dbObj.using(self._resolveDbName()).filter(**args).order_by(*order_by).values_list(*fields)\r\n \"\"\"if len(orderBy) != 0:\r\n xpList.orderBy(*orderBy)\"\"\"\r\n return xpList\r\n except Exception as e:\r\n raise XpMsgException(e, _('Error in searching fields in model ') + str(self.model), origin='data')",
"def apply_spread_search_filter(active_options, query_list):\n \n option_name = 'search'\n if option_name in active_options:\n search_term = active_options[option_name]\n \n # Since this is a search filter, any value is valid except for a blank.\n if len(search_term) > 0:\n \n # OR together the Q objects for each field we want to search\n search_query = [Q(title__icontains=search_term) |\n Q(description__icontains=search_term)]\n \n # Append this query to the list so that the overal query will be AND'd together.\n query_list += search_query",
"def filter_fields(\n field_names: Iterable[str],\n include: Iterable[str] = None,\n exclude: Iterable[str] = None,\n readonly: Iterable[str] = None,\n) -> Tuple[Set[str], Set[str]]:\n field_names = set(field_names)\n\n include = set(include or EMPTY)\n if include:\n field_names.intersection_update(include)\n\n exclude = set(exclude or EMPTY)\n if exclude:\n field_names.difference_update(exclude)\n\n readonly = set(readonly or EMPTY)\n if readonly:\n readonly.intersection_update(field_names)\n\n return field_names, readonly",
"def apply_card_search_filter(active_options, query_list):\n \n option_name = 'search'\n if option_name in active_options:\n search_term = active_options[option_name]\n \n # Any value is valid except nothing\n if len(search_term) > 0:\n # OR together Q objects for all the fields to search, since a match on any of them is ok.\n search_query = [Q(title__icontains=search_term) |\n Q(caption__icontains=search_term) |\n Q(description__icontains=search_term)]\n \n query_list += search_query",
"def check_search_fields(meta, fields):\n\t\tif not meta.search_fields:\n\t\t\treturn\n\n\t\t# No value fields should not be included in search field\n\t\tsearch_fields = [field.strip() for field in (meta.search_fields or \"\").split(\",\")]\n\t\tfieldtype_mapper = { field.fieldname: field.fieldtype \\\n\t\t\tfor field in filter(lambda field: field.fieldname in search_fields, fields) }\n\n\t\tfor fieldname in search_fields:\n\t\t\tfieldname = fieldname.strip()\n\t\t\tif (fieldtype_mapper.get(fieldname) in no_value_fields) or \\\n\t\t\t\t(fieldname not in fieldname_list):\n\t\t\t\tfrappe.throw(_(\"Search field {0} is not valid\").format(fieldname))",
"def filter_queryset(self, value, queryset, field_name):\n filter_kwargs = {\n \"%s__%s\" % (self.lookup_field or field_name, self.lookup): value\n }\n return qs_filter(queryset, **filter_kwargs)",
"def filter_fields(included_fields, excluded_fields, model_name, fields):\r\n # Sanity check to make sure contradictory parameters\r\n if included_fields is not None and excluded_fields is not None:\r\n raise ValueError('included AND excluded fields specified')\r\n # Here we filter out all the field not related to this particular model\r\n included_fields = [name.split('.')[-1] for name in included_fields if name.startswith(model_name + '.')] if included_fields else None\r\n excluded_fields = [name.split('.')[-1] for name in excluded_fields if name.startswith(model_name + '.')] if excluded_fields else None\r\n # Then we filter according to the directives.\r\n if included_fields is not None:\r\n fields = filter(lambda field: field.name in included_fields, fields)\r\n elif excluded_fields is not None:\r\n fields = filter(lambda field: field.name not in excluded_fields, fields)\r\n return fields",
"def set_must_match_in_list(self, field: str, values: list) -> None:\n terms = {\n \"terms\": {\n field: values\n }\n }\n self.query[\"query\"][\"bool\"][\"filter\"].append(terms)",
"def build_autocomplete_view_with_queryset(queryset):\n class PersonAutocompleteViewset(viewsets.ModelViewSet):\n http_method_names = [\"get\"]\n permission_classes = (permissions.IsAuthenticated, )\n serializer_class = serializers.ThesesPersonSerializer\n pagination_class = PersonAutocompletePagination\n\n def get_queryset(self):\n qs = queryset.objects.select_related(\n \"user\"\n ).annotate(\n _full_name=Concat(\n \"user__first_name\", Value(\" \"), \"user__last_name\"\n )\n ).order_by(\"_full_name\")\n name_filter = self.request.query_params.get(\"filter\", \"\").strip()\n if name_filter:\n qs = qs.filter(_full_name__icontains=name_filter)\n return qs.all()\n return PersonAutocompleteViewset",
"def expando_filter(model_qs, **kwargs):\n for k,v in kwargs.items():\n try:\n k, lookup = k.split('__', 1)\n lookup = '__' + lookup\n except ValueError:\n lookup = ''\n\n kw = { 'key': k, 'value' + lookup: v }\n pks_ = Expando.objects.filter(**kw).values_list('object_pk', flat=True)\n try:\n pks = pks & set(pks_)\n except UnboundLocalError:\n pks = set(pks_)\n\n return model_qs.filter(pk__in=pks)",
"def invoke_search_plugins(data_dict, field_types):\n query_dict = {\n 'select': [],\n 'sort': [],\n 'where': []\n }\n for plugin in plugins.PluginImplementations(interfaces.IDatastore):\n query_dict = plugin.datastore_search(\n {}, data_dict, field_types, query_dict\n )\n clauses = []\n values = []\n for clause_and_values in query_dict['where']:\n clauses.append('(' + clause_and_values[0] + ')')\n values += clause_and_values[1:]\n\n where_clause = u' AND '.join(clauses)\n if where_clause:\n where_clause = u'WHERE ' + where_clause\n\n if 'ts_query' in query_dict and query_dict['ts_query']:\n ts_query = query_dict['ts_query']\n else:\n ts_query = ''\n\n return ts_query, where_clause, values"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Render the main page with a list of experiment runs.
|
def show_runs():
# return render_template("runs.html", runs=data.runs(), type=type)
return render_template("runs.html", runs=[], type=type)
|
[
"def experiment():\n return render_template('experiment.html', array = array)",
"def index():\r\n redis = _get_redis_connection()\r\n return render_template('split/index.html',\r\n experiments=Experiment.all(redis)\r\n )",
"def render_index(request):\n all_setups = logic.get_all_setups()\n all_punchlines = logic.get_all_punchlines()\n\n context = {\n 'setups': all_setups,\n 'punchlines': all_punchlines\n }\n return render(request, 'jokes/index.html', context)",
"def main():\n return render_template(\"index.html\", title=\"Intro Screen\", url=os.getenv(\"URL\"))",
"def main():\n\treturn render_template(\"plot.html\")",
"def view_game():\n\n return render_template(\"pages/index.html\")",
"def get_running_tests(request: HttpRequest) -> HttpResponse:\n context = {\n \"title\": \"Запущенные тесты\",\n }\n return render(request, \"main/lecturer/runningTests.html\", context)",
"def tests(payload):\n return(render_template('tests/jasmine.html', payload=payload))",
"def serve_main_page():\n return render_template('index.html')",
"def render_output_website():\n from jinja2 import Environment, PackageLoader\n env = Environment(loader=PackageLoader('looksee', 'templates'))\n connection = tasa.store.connection\n containers = {}\n uris = connection.hgetall('container_uris')\n for key, value in uris.items():\n containers[key] = {'uri': value}\n pipe = connection.pipeline()\n queue_names = ['masscan', 'masscan_out', 'rfb_print']\n for name in queue_names:\n pipe.llen(name)\n queue_lengths = pipe.execute()\n queues = zip(queue_names, queue_lengths)\n for key in uris.keys():\n pipe.hgetall('container_' + key)\n for key, value in zip(uris.keys(), pipe.execute()):\n containers[key]['files'] = value\n template = env.get_template('container_index.html')\n rendered = template.render(containers=containers, queues=queues)\n with open('www/index.html', 'w') as f:\n f.write(rendered)\n template = env.get_template('inner_index.html')\n for key in containers.keys():\n rendered = template.render(base_uri=containers[key]['uri'],\n files = containers[key]['files'])\n with open('www/%s.html' % key, 'w') as f:\n f.write(rendered)",
"def homepage():\n standings_data = fetch_standings()\n return render_template('index.html',\n standings=standings_data['data'],\n gameweek=standings_data['gameweek'],\n status=standings_data['status'],\n gameweeks=standings_data['gameweek'])",
"def index():\n items = get_hotshots()\n return render_template('index.html', hotshots=items)",
"def project_overview(request, project_name):\n\n runs = []\n\n # Find all runs and how many errors for each of them\n for run in Run.objects.filter(project_name=project_name).order_by('date'):\n errors = 0\n\n for file in File.objects.filter(run=run).order_by('filename'):\n errors += len(RunError.objects.filter(file=file))\n\n runs.append({'run_obj': run, 'errors': errors})\n\n if not len(runs):\n raise Http404\n\n return render_to_response('project_overview.html', {'runs': runs})",
"def render():\n from flask import g\n\n less()\n jst()\n\n app_config_js()\n\n compiled_includes = []\n\n for rule in app.app.url_map.iter_rules():\n rule_string = rule.rule\n name = rule.endpoint\n\n if name == 'static' or name.startswith('_'):\n print 'Skipping %s' % name\n continue\n\n if rule_string.endswith('/'):\n filename = 'www' + rule_string + 'index.html'\n elif rule_string.endswith('.html'):\n filename = 'www' + rule_string\n else:\n print 'Skipping %s' % name\n continue\n\n dirname = os.path.dirname(filename)\n\n if not (os.path.exists(dirname)):\n os.makedirs(dirname)\n\n print 'Rendering %s' % (filename)\n\n with app.app.test_request_context(path=rule_string):\n g.compile_includes = True\n g.compiled_includes = compiled_includes\n\n view = app.__dict__[name]\n content = view()\n\n compiled_includes = g.compiled_includes\n\n with open(filename, 'w') as f:\n f.write(content.encode('utf-8'))",
"def render_index():\n return render_template('0-index.html')",
"def runtest():\n pwd = os.path.abspath(os.path.dirname(__file__))\n response = json.loads(request.body.read())\n testCases = (str(response['testCases'])).split(',')\n testCases.pop()\n _runner = (str(response['Runner']))\n _buildName = (str(response['buildName']))\n _userId = (str(response['userId']))\n _testPlanId = (str(response['testPlanId']))\n totalTestCases = len(testCases)\n if _runner == 'HTMLTestRunner':\n if totalTestCases == 0:\n return \"Select testcases to run..\"\n else:\n shutil.rmtree(pwd+'/Output/')\n os.mkdir(pwd+'/Output/')\n listOfTestSuiteNames = getTestSuiteNames(testCases)\n for testSuite in listOfTestSuiteNames:\n suite = unittest.TestSuite()\n for testCase in testCases:\n testSuiteName = ((str(testCase).split(' '))[0]).split('.')[-1]\n if testSuite == testSuiteName:\n _testSuiteName = ((str(testCase)).split(' ')[0])[1:]\n classObj = my_import(_testSuiteName)\n _testCaseName = ((((str(testCase)).split(' ')[1])[:-1]).split('='))[1]\n suite.addTest(classObj(_testCaseName))\n _testModuleName = testSuiteName#((str(testSuite).split(\".\")[-1])[0:-2]) \n _output = open(pwd+\"/Output/\"+_testModuleName+\".html\",\"w\")\n HTMLRunner = HTMLTestRunner.HTMLTestRunner(stream=_output,title=_testModuleName,description=\"Test case's for the module \"+_testModuleName)\n HTMLRunner.run(suite)\n subprocess.Popen(['python',pwd+\"/ExtLib/Statistics.py\",\"Test Automation\",pwd+\"/Output/\"])\n IndexMaker = HTMLIndexCreator.HTMLIndexCreator(pwd+\"/Output/\")\n IndexMaker.makeHTMLIndexFile() \n return \"Test completed.....\"\n else:\n return \"The specified runner does not exist.\"",
"def render_game_page():\n\n return render_template('tanks.html')",
"def index(request):\n\treturn render(request, \"learning_logs/index.html\")",
"def lecturer_all_tests_results(request: HttpRequest) -> HttpResponse:\n context = {\n \"title\": \"Результаты тестирований\",\n \"subjects\": Subject.objects.all(),\n \"lecturers\": User.objects.filter(groups__name=\"lecturer\"),\n }\n return render(request, \"main/lecturer/testsResults.html\", context)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Launch TensorBoard for a given run ID and log ID of that run.
|
def run_tensorboard(run_id, tflog_id):
data = current_app.config["data"]
# optimisticaly suppose the run exists...
run = data.get_run(run_id)
base_dir = Path(run["experiment"]["base_dir"])
log_dir = Path(run["info"]["tensorflow"]["logdirs"][tflog_id])
# TODO ugly!!!
if log_dir.is_absolute():
path_to_log_dir = log_dir
else:
path_to_log_dir = base_dir.joinpath(log_dir)
path_to_log_dir = str(path_to_log_dir)
if not os.path.exists(path_to_log_dir):
# This run was not on this machine
path_to_log_dir = '/tmp/sacredboard'
# Add a subfolder for this run id
runfolder = path_to_log_dir + '/{}'.format(run_id)
if not os.path.exists(runfolder):
os.makedirs(runfolder)
# copy all artifacts there
for artifact in run['artifacts']:
file = data.get_artifact(artifact['file_id'])
basename = file.filename.split('/')[-1]
with open(os.path.join(runfolder, basename), 'wb') as f:
f.write(file.read())
port = int(tensorboard.run_tensorboard(str(path_to_log_dir), port=int(current_app.config["port"])+1))
url_root = request.url_root
url_parts = re.search("://([^:/]+)", url_root)
redirect_to_address = url_parts.group(1)
return redirect("http://%s:%d" % (redirect_to_address, port))
|
[
"def create(\n cls,\n tensorboard_run_id: str,\n tensorboard_experiment_name: str,\n tensorboard_id: Optional[str] = None,\n display_name: Optional[str] = None,\n description: Optional[str] = None,\n labels: Optional[Dict[str, str]] = None,\n project: Optional[str] = None,\n location: Optional[str] = None,\n credentials: Optional[auth_credentials.Credentials] = None,\n request_metadata: Sequence[Tuple[str, str]] = (),\n create_request_timeout: Optional[float] = None,\n ) -> \"TensorboardRun\":\n if display_name:\n utils.validate_display_name(display_name)\n\n if labels:\n utils.validate_labels(labels)\n\n display_name = display_name or tensorboard_run_id\n\n api_client = cls._instantiate_client(location=location, credentials=credentials)\n\n parent = utils.full_resource_name(\n resource_name=tensorboard_experiment_name,\n resource_noun=TensorboardExperiment._resource_noun,\n parse_resource_name_method=TensorboardExperiment._parse_resource_name,\n format_resource_name_method=TensorboardExperiment._format_resource_name,\n parent_resource_name_fields={Tensorboard._resource_noun: tensorboard_id},\n project=project,\n location=location,\n )\n\n gapic_tensorboard_run = gca_tensorboard_run.TensorboardRun(\n display_name=display_name,\n description=description,\n labels=labels,\n )\n\n _LOGGER.log_create_with_lro(cls)\n\n tensorboard_run = api_client.create_tensorboard_run(\n parent=parent,\n tensorboard_run=gapic_tensorboard_run,\n tensorboard_run_id=tensorboard_run_id,\n metadata=request_metadata,\n timeout=create_request_timeout,\n )\n\n _LOGGER.log_create_complete(cls, tensorboard_run, \"tb_run\")\n\n return cls(\n tensorboard_run_name=tensorboard_run.name,\n credentials=credentials,\n )",
"async def launchbyid(self, ctx, *args):\n if not can_answer(ctx):\n return\n launchid = False\n for arg in args:\n if str(arg).isdigit():\n launchid = int(arg)\n if launchid:\n launch = launchlibrary.Launch.fetch(api, id=launchid)[0]\n launchname = launch.name\n launchstatus = launch.get_status().description\n launchtime_tz = launch.net\n tz = launchtime_tz.tzname()\n launchtime = launchtime_tz.replace(tzinfo=None)\n msg = '**__{0}__**\\n{1}\\nNET {2} {3}\\n'\n msg = msg.format(launchname, launchstatus, launchtime, tz)\n for arg, formatter in (('-r', reasons), ('-d', description), ('-v', videourl)):\n if arg in args:\n msg = formatter(msg, launch)\n else:\n msg = \"No ID provided.\"\n await send(ctx, msg, args)",
"def test_start_run_tensorboard(self):\n pass",
"def launch(self):\n # Make it easy to run TensorBoard inside other programs, e.g. Colab.\n server = self._make_server()\n thread = threading.Thread(target=server.serve_forever, name='GRTensorBoard')\n thread.daemon = True\n thread.start()\n return server.get_url()",
"def start(ctx, file): # pylint:disable=redefined-builtin\n specification = None\n job_content = None\n if file:\n specification = check_polyaxonfile(file, log=False).specification\n\n if specification:\n # pylint:disable=protected-access\n check_polyaxonfile_kind(specification=specification, kind=kinds.TENSORBOARD)\n job_content = specification.raw_data\n\n user, project_name = get_project_or_local(ctx.obj.get('project'))\n group = ctx.obj.get('group')\n experiment = ctx.obj.get('experiment')\n if experiment:\n try:\n response = PolyaxonClient().experiment.start_tensorboard(\n username=user,\n project_name=project_name,\n experiment_id=experiment,\n content=job_content,\n is_managed=True,\n )\n obj = 'experiment `{}`'.format(experiment)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not start tensorboard experiment `{}`.'.format(experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n elif group:\n try:\n response = PolyaxonClient().experiment_group.start_tensorboard(\n username=user,\n project_name=project_name,\n group_id=group,\n content=job_content,\n is_managed=True,\n )\n obj = 'group `{}`'.format(group)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not start tensorboard group `{}`.'.format(group))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n else:\n try:\n response = PolyaxonClient().project.start_tensorboard(\n username=user,\n project_name=project_name,\n content=job_content,\n is_managed=True,\n )\n obj = 'project `{}`'.format(project_name)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not start tensorboard project `{}`.'.format(project_name))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n if response.status_code == 200:\n Printer.print_header(\"A tensorboard for this {} is already running on:\".format(obj))\n click.echo(get_tensorboard_url(user=user,\n project_name=project_name,\n experiment=experiment,\n group=group))\n sys.exit(0)\n\n if response.status_code != 201:\n Printer.print_error('Something went wrong, Tensorboard was not created.')\n sys.exit(1)\n\n Printer.print_success('Tensorboard is being deployed for {}'.format(obj))\n indentation.puts(\"It may take some time before you can access tensorboard.\\n\")\n indentation.puts(\"Your tensorboard will be available on:\\n\")\n with indentation.indent(4):\n indentation.puts(get_tensorboard_url(user, project_name, experiment, group))",
"def print_failure_log_for_run(host: str, run_id: str, namespace: str):\n client = kfp.Client(host=host)\n run = client.get_run(run_id=run_id)\n workflow_manifest = json.loads(run.pipeline_runtime.workflow_manifest)\n if kube_utils.PodPhase(\n workflow_manifest['status']['phase']) != kube_utils.PodPhase.FAILED:\n return\n\n k8s_client = kube_utils.make_core_v1_api()\n pods = [i for i in workflow_manifest['status']['nodes'] if i['type'] == 'Pod']\n for pod in pods:\n if kube_utils.PodPhase(pod['phase']) != kube_utils.PodPhase.FAILED:\n continue\n display_name = pod['displayName']\n pod_id = pod['id']\n\n log = k8s_client.read_namespaced_pod_log(\n pod_id, namespace=namespace, container='main')\n for line in log.splitlines():\n logging.info('%s:%s', display_name, line)",
"def run_task(id):\n task_args = args_from_id(id, task_args_ranges)\n logging.info('Running task using args %s', task_args)\n task_fun(task_args)",
"def load_board(self, _id: any = None):\r\n pass",
"def load_wandb_run(run_id,\n entity,\n project,\n wandb_api=None):\n if wandb_api is None:\n wandb_api = wandb.Api()\n\n # Path to Weights and Biases run\n wandb_path = f'{entity}/{project}/{run_id}'\n # Load the run\n run = wandb_api.run(wandb_path)\n return run",
"def get_workflow_run_by_id(self, workflowid: str, workflowbuildid: str, workflowrunid: str, query_params: Dict[str, object] = None) -> WorkflowRun:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"workflowid\": workflowid,\n \"workflowbuildid\": workflowbuildid,\n \"workflowrunid\": workflowrunid,\n }\n\n path = Template(\"/catalog/v2alpha2/workflows/${workflowid}/builds/${workflowbuildid}/runs/${workflowrunid}\").substitute(path_params)\n url = self.base_client.build_url(path)\n response = self.base_client.get(url, params=query_params)\n return handle_response(response, WorkflowRun)",
"def _log_in_tensorboard(self, steps: int, logs: List[LogData]):\n\n for log in logs:\n if log.type == \"scalar\":\n self._summary_writer.add_scalar(log.name, log.value, steps)\n if log.type == \"image\":\n self._summary_writer.add_image(log.name, log.value, steps)",
"def log_and_launch(self, config: Any, command: str = \"\"):\n # Extract params for this launch\n params = config.get(\"mlflow\") or {}\n launches = params.get(\"launches\") or []\n launches = [launches] if not isinstance(launches, list) else launches\n launch = launches[0] if launches else {}\n log_artifacts = launch.get(\"log_artifacts\", True)\n log_parameters = launch.get(\"log_parameters\", True)\n path_config = launch.get(\"path_config\", \"config.json\")\n path_command = launch.get(\"path_command\", \"launch.txt\")\n include_keys = launch.get(\"include_keys\")\n ignore_keys = launch.get(\"ignore_keys\")\n\n # Log artifacts\n if log_artifacts:\n LOGGER.info(f\"Logging artifacts {path_config} and {path_command}\")\n dir_artifacts = tempfile.mkdtemp()\n with Path(dir_artifacts, path_config).open(\"w\") as file:\n json.dump(config, file, indent=4)\n with Path(dir_artifacts, path_command).open(\"w\") as file:\n file.write(f\"fromconfig {path_config} - {command}\")\n mlflow.log_artifacts(local_dir=dir_artifacts)\n\n # Log parameters\n if log_parameters:\n LOGGER.info(\"Logging parameters\")\n params = get_params(config, ignore_keys, include_keys)\n for idx in range(0, len(params), 100):\n mlflow.log_params(dict(params[idx : idx + 100]))\n\n # Update config (remove used params if successive launches)\n launches = launches[1:] if launches else []\n launches = launches if launches else [{}]\n launches[0] = fromconfig.utils.merge_dict({\"log_artifacts\": False, \"log_parameters\": False}, launches[0])\n config = fromconfig.utils.merge_dict(config, {\"mlflow\": {\"launches\": launches}})\n self.launcher(config=config, command=command)",
"def start_run(model, version, experiment='default'):\n if model is None or model.strip() == '' or not re.match(LABEL_PATTERN, model):\n raise AssertionError('Please provide a valid name for your model.It can contain ' +\n 'lower-case alpha-numeric characters and dashes only.')\n\n if not re.match(LABEL_PATTERN, model):\n raise AssertionError('name is invalid. It can contain ' +\n 'lower-case alpha-numeric characters and dashes only.')\n\n if experiment is None:\n experiment = 'default'\n\n if experiment != 'default':\n if experiment.strip() == '' or not re.match(LABEL_PATTERN, experiment):\n raise AssertionError('experiment is invalid. It can contain ' +\n 'lower-case alpha-numeric characters and dashes only.')\n\n if version <= 0:\n raise AssertionError('version must be greater than zero')\n\n run_id = str(uuid4())\n\n if settings.state == 'local':\n tracking_session = TrackingSession(model, version, experiment, run_id, LocalState())\n elif settings.state == \"remote\":\n tracking_session = TrackingSession(model, version, experiment, run_id, RemoteState())\n\n return tracking_session",
"def subrun(self, subrunId, domList):\n if self.__runIface is None:\n if not self.__connectToDAQRun():\n return\n\n if len(domList) > 0:\n action = 'Starting'\n else:\n action = 'Stopping'\n self.__log.info('%s subrun %d.%d' %\n (action, self.__runNumber, subrunId))\n\n ret = self.__runIface.flasher(subrunId, domList)\n if ret != 1: return \"New subrun FAILED. See pDAQ logs for more info.\"\n return \"OK\"",
"def submit_run(self, json):\n response = self._do_api_call(SUBMIT_RUN_ENDPOINT, json)\n return response['run_id']",
"def run_train_model(self,model,X_train,y_train,X_test,y_test,model_path,logs_path,plots_path,activate_tensorboard=0,run_id=0,tl_type='full_fine_tune'):\t\t\t\n\t\timport tensorflow as tf\n\t\tfrom sklearn.model_selection import train_test_split\n\t\tfrom tensorflow.keras.models import load_model\n\t\tfrom tensorflow.keras.callbacks import ModelCheckpoint\n\t\tfrom tensorflow.keras.callbacks import TensorBoard\n\n\t\tmodel_file_path=model_path+'/trained_model_'+str(run_id)+'.h5'\n\t\t\n\t\t#X_train, X_test, y_train, y_test = train_test_split(X_in, Y_out, test_size = self.split_ratio)\n\t\tprint(\"Data Split Completed\")\n\t\t\n\t\t#Checkpointer to save the best model\n\t\tcheckpointer = tf.keras.callbacks.ModelCheckpoint(model_file_path, verbose=1, save_best_only=True,monitor='val_loss',save_weights_only=True)\n\t\t\n\t\tcallbacks=[checkpointer]\n\t\t\n\t\tif(activate_tensorboard==1):\n\t\t\t#Activating Tensorboard for Visualization\n\t\t\ttensorboard = TensorBoard(log_dir=logs_path,histogram_freq=1, write_graph=True, write_images=True)\n\t\t\tcallbacks=[checkpointer,tensorboard]\n\t\t\n\t\t#tensorboard = TensorBoard(log_dir=logs_path,histogram_freq=1, write_graph=True, write_images=True)\n\n\t\thistory=model.fit(x=X_train, y=y_train, validation_data=(X_test,y_test), epochs=self.epochs, batch_size=self.batch_size,callbacks=callbacks)\n\t\t\n\t\ttrainviz=TrainViz()\n\t\t#trainviz.training_plot(history,plots_path,run_id)\n\t\t\n\t\tif(tl_type=='variable_lr'):\n\t\t\tinference_model=load_model(model_file_path, custom_objects={'LRMultiplier': LRMultiplier})\n\t\telse:\n\t\t\tmodel.load_weights(model_file_path)\n\t\t\n\t\tprint('Compiling test metrics...')\n\t\ty_pred=model.predict(X_test)\n\n\t\tmetrics_eval=MetricsEval()\n\t\teval_metrics_reg,accuracy_metrics_df_reg=metrics_eval.metrics_eval_base(y_pred[0],y_test[0],logs_path)\n\t\teval_metrics_cla,accuracy_metrics_df_cla=metrics_eval.metrics_eval_classification(y_pred[1],y_test[1],logs_path)\n\n\t\treturn model,eval_metrics_reg,accuracy_metrics_df_reg,eval_metrics_cla,accuracy_metrics_df_cla",
"def test_start_run():\n api = 'runs/start/'\n\n data = {\n 'name': 'my test run',\n 'product': {\n 'name': 'TestCube',\n 'team': {\n 'name': 'ATeam'\n }\n },\n # optional, if provided will be a link in run page\n 'source': {\n 'name': 'Jenkins',\n 'link': 'http://jenkins/run'\n },\n # optional, run variables can be saved to reset purpose\n 'variables': dumps(dict(environ)),\n }\n\n response = requests.post(url=root + api,\n auth=auth,\n json=data)\n\n result = response.json()\n assert result['success'], response.text\n run = result['run']\n print(run)",
"def get_runs_by_id(self, config_id):\n\t\td = self.data[config_id]\n\n\t\truns = []\n\t\tfor b in d.results.keys():\n\t\t\ttry:\n\t\t\t\terr_logs = d.exceptions.get(b, None)\n\n\t\t\t\tif d.results[b] is None:\n\t\t\t\t\tr = Run(config_id, b, None, None , d.time_stamps[b], err_logs)\n\t\t\t\telse:\n\t\t\t\t\tr = Run(config_id, b, d.results[b]['loss'], d.results[b]['info'] , d.time_stamps[b], err_logs)\n\t\t\t\truns.append(r)\n\t\t\texcept:\n\t\t\t\traise\n\t\truns.sort(key=lambda r: r.budget)\n\t\treturn(runs)",
"def list(\n cls,\n tensorboard_experiment_name: str,\n tensorboard_id: Optional[str] = None,\n filter: Optional[str] = None,\n order_by: Optional[str] = None,\n project: Optional[str] = None,\n location: Optional[str] = None,\n credentials: Optional[auth_credentials.Credentials] = None,\n ) -> List[\"TensorboardRun\"]:\n\n parent = utils.full_resource_name(\n resource_name=tensorboard_experiment_name,\n resource_noun=TensorboardExperiment._resource_noun,\n parse_resource_name_method=TensorboardExperiment._parse_resource_name,\n format_resource_name_method=TensorboardExperiment._format_resource_name,\n parent_resource_name_fields={Tensorboard._resource_noun: tensorboard_id},\n project=project,\n location=location,\n )\n\n tensorboard_runs = super()._list(\n filter=filter,\n order_by=order_by,\n project=project,\n location=location,\n credentials=credentials,\n parent=parent,\n )\n\n for tensorboard_run in tensorboard_runs:\n tensorboard_run._sync_time_series_display_name_to_id_mapping()\n\n return tensorboard_runs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Stop all TensorBoard instances launched by Sacredboard.
|
def close_tensorboards():
stop_all_tensorboards()
return "Stopping tensorboard"
|
[
"def _tensorboard_kill(self):\n print('Closing current session of tensorboard.')\n if sys.platform == 'win32':\n os.system(\"taskkill /f /im tensorboard.exe\")\n elif sys.platform == 'linux':\n os.system('pkill tensorboard')\n else:\n print('No running instances of tensorboard.')",
"def test_stop_run_tensorboard(self):\n pass",
"def stop(ctx, yes):\n user, project_name = get_project_or_local(ctx.obj.get('project'))\n group = ctx.obj.get('group')\n experiment = ctx.obj.get('experiment')\n\n if experiment:\n obj = 'experiment `{}`'.format(experiment)\n elif group:\n obj = 'group `{}`'.format(group)\n else:\n obj = 'project `{}/{}`'.format(user, project_name)\n\n if not yes and not click.confirm(\"Are sure you want to stop tensorboard \"\n \"for {}\".format(obj)):\n click.echo('Existing without stopping tensorboard.')\n sys.exit(1)\n\n if experiment:\n try:\n PolyaxonClient().experiment.stop_tensorboard(\n username=user,\n project_name=project_name,\n experiment_id=experiment)\n Printer.print_success('Tensorboard is being deleted')\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not stop tensorboard {}.'.format(obj))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n elif group:\n try:\n PolyaxonClient().experiment_group.stop_tensorboard(\n username=user,\n project_name=project_name,\n group_id=group)\n Printer.print_success('Tensorboard is being deleted')\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not stop tensorboard {}.'.format(obj))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n else:\n try:\n PolyaxonClient().project.stop_tensorboard(\n username=user,\n project_name=project_name)\n Printer.print_success('Tensorboard is being deleted')\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not stop tensorboard {}.'.format(obj))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)",
"def stop(self):\n for thread in self.threads:\n thread.stop()\n self.topology = None",
"def stop(self):\n\t\tPopen([\"screen\", \"-S\", self.name, \"-X\", \"quit\"])",
"def stop(self):\n for shell in self._shells.values():\n shell.disconnect()",
"def stopController(self):\n self.thread_shutdown_timer = timeit.default_timer()\n self.running = False",
"def stop(self):\n c = Controller()\n instance_id = c.instance.id\n c.terminate_instance()\n\n print('Successfully shut down instance: ' + instance_id)",
"def stop(self):\n \n try:\n self.clear()\n del self.da\n del self.cl\n del self.la\n del self.lv\n \n except Exception as E:\n if self.debug:\n print(\"4x4x4 LED Cube stop error: \",E)",
"def stop_services(self):",
"def stopAll(self):\n for core in self.core_list:\n try:\n core[2].send('shutdown')\n except AttributeError:\n pass\n \n for core in self.core_list:\n try:\n core[1].join()\n except AttributeError:\n pass",
"def shutdown(self):\n for action in self.actions:\n action.unregister()",
"def stop(self):\n self.tasks.stop()",
"def close(self):\n if hasattr(self, '_tensorboard'):\n self._tensorboard.close()",
"def stop_threads(self):\n for thread in self.threads.values():\n thread.stop()",
"def cleanup_consoles(self):\n for c in self.consoles:\n c.kill()\n self.consoles = []",
"def _do_stop(self):\n self.controller.delete_nodes(self.nodes)\n self.logger.info(self.nodes)\n self.nodes = []\n self.logger.info(\"All nodes deleted\")\n self.state = self.S_STOPPED",
"def stop(self):\n if self.run_matrx_api:\n if self.verbose:\n print(\"Shutting down Matrx api\")\n _ = requests.get(\"http://localhost:\" + str(api._port)\n + \"/shutdown_API\")\n self.api_info[\"api_thread\"].join()\n\n if self.run_matrx_visualizer:\n if self.verbose:\n print(\"Shutting down Matrx visualizer\")\n _ = requests.get(\"http://localhost:\"\n + str(visualization_server.port)\n + \"/shutdown_visualizer\")\n self.matrx_visualizer_thread.join()",
"def iterRun_stop(self):\n self.quad.stop_thread()\n self.ctrl.stop_thread()\n self.pathReady = False\n self.iterRunGo = False\n print(\"controller stopped\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate a random first name.
|
def gen_first_name(ucase=2, lcase=2, gender=False):
gen_name = {}
if not gender:
if random.randrange(1,100) > GENDER_BIAS:
gender = 'f'
else:
gender = 'm'
_name = None
_male_name_seed = random.randrange(1, 90040)
_female_name_seed = random.randrange(1500, 90024) #1500? Too many Patricia's
if gender == 'f':
_name = female_name_tuples[bisect.bisect(FEMALE_NAME_KEYS, _female_name_seed)][1]
else:
_name = male_name_tuples[bisect.bisect(MALE_NAME_KEYS, _male_name_seed)][1]
_random = random.randrange(0, 100)
if _random < ucase:
gen_name['given_name'] = _name
gen_name['case'] = 'u'
elif _random > 100 - lcase:
gen_name['given_name'] = _name.swapcase()
gen_name['case'] = 'l'
else:
gen_name['given_name'] = _name.title()
gen_name['case'] = 'p'
gen_name['gender'] = gender
return gen_name
|
[
"def get_random_name():\n first_name = get_rnd('first_name')\n last_name = get_rnd('last_name')\n username = first_name[0:2] + last_name[0:6]\n return (\"%s\" % username.lower(), \"%s %s\" % (first_name, last_name))",
"def create_random_surname(self):\n surname = ''\n for _ in range(self.NAME_LENGTH):\n surname += choice(ascii_letters)\n return surname",
"def generate_surname() -> str:\n\n surnames = data.names.get_surnames()\n max_index = len(surnames) - 1\n index = random.randint(0, max_index)\n\n return surnames[index]",
"def create_random_name(self):\n name = ''\n for _ in range(self.NAME_LENGTH):\n name += choice(ascii_letters)\n return name",
"def generate_given_name(gender: Gender) -> str:\n\n gender_index = 0 if gender == gender.male else 1\n name_index = random.randint(0, len(data.names.get_given_names()) - 1)\n\n return data.names.get_given_names()[name_index][gender_index]",
"def generate_random_username():\n return os.urandom(100).hex()[:RANDOM_USERNAME_LENGTH]",
"def _generate_shortname(cls):\n return ''.join([cls.letters[random.randrange(0, cls.num_letters)] for idx in range(0, cls.SHORTNAME_LEN)])",
"def generate_name(first_letter):\n\n # validate input\n assert(first_letter in string.ascii_lowercase)\n # keep a list of all the generated letters (starting with the given first letter)\n letters = [first_letter]\n\n with torch.no_grad():\n # use the given first letter to start the process\n x = helpers.letter_to_onehot(first_letter)\n # convert to the shape that the LSTM module requires as input\n x = x.view(1, 1, -1)\n # the first hidden input will be zeros since we're starting a new\n # sequence, or new name\n hidden = None\n\n # loop until ternimal character is predicted\n while True:\n y_pred, hidden = model(x, hidden)\n # here we can choose between deterministic or random conversion\n # from prediction logits to letter\n letter = pred_to_letter_rand(y_pred)\n # stop the process when the terminal character is predicted\n if letter == '_':\n break\n # add this predicted letter to the list of letters\n letters.append(letter)\n # convert predicted letter to a onehot so it can be used as\n # input for the next step (with the required shape)\n x = helpers.letter_to_onehot(letter).view(1, 1, -1)\n\n # put all the predicted letters together to get a name\n name = ''.join(letters)\n # validate output type\n assert(type(name) is str)\n # make sure we didn't add in some strange characters by accident\n for letter in name:\n assert(letter in string.ascii_lowercase)\n # names should start with a capital letter\n name = name.capitalize()\n\n return name",
"def rand_name(self, name='', prefix=None):\n randbits = str(random.randint(1, 0x7fffffff))\n rand_name = randbits\n if name:\n rand_name = name + '-' + rand_name\n if prefix:\n rand_name = prefix + '-' + rand_name\n return rand_name",
"def first_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"first_name\")",
"def generate_user_name():\n connection = mysql.get_db()\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM users WHERE username IS NULL\")\n users_obj = convert_objects(cursor.fetchall(), cursor.description)\n cursor.close()\n counter = random.randint(1, 101)\n for user in users_obj:\n # Set username. It will be\n # [first letter of firstname][lastname without spaces/special charcters][a number to differentiate]\n user_name = \"\"\n if 'first_name' in user and user['first_name'] is not None:\n user_name += user[\"first_name\"][:1]\n if 'last_name' in user and user['last_name'] is not None:\n # https://stackoverflow.com/questions/5843518/remove-all-special-characters-punctuation-and-spaces-from-string\n user_name += ''.join(e for e in user[\"last_name\"] if e.isalnum())\n user_name += str(counter)\n counter += 1\n put_cursor = connection.cursor()\n put_cursor.execute(\"UPDATE users SET username=%s WHERE id=%s\", (user_name, user['id']))\n connection.commit()\n return make_response(\"OK\", HTTPStatus.OK)",
"def get_first_name(self):\r\n return self._first_name",
"def gen_personal_email(first_name, last_name):\n domains = ['gmail.com', 'yahoo.com', 'hotmail.com',\n 'icloud.com', 'aol.com', 'outlook.com']\n\n domain_seed = random.randrange(0, len(domains))\n\n first_seed = random.randrange(0, 2)\n account = ''\n if first_seed == 0:\n account = '{0}.{1}@'.format(first_name, last_name)\n elif first_seed == 1:\n account = '{0}{1}@'.format(first_name[:1], last_name)\n else:\n account = '{0}{1}@'.format(first_name, last_name[:1])\n\n return account + domains[domain_seed]",
"def generate_uuid(self, user_name):\n\t\timport random\n\t\tuuid = \"\"\n\t\tfirst_name, last_name = user_name.split()\n\t\tuuid += first_name[0]",
"def gen_name(length):\n seed()\n return ''.join(choice(ascii_lowercase) for _ in xrange(length))",
"def generate_name(race, gender):\n\n # raise BaseException(os.getcwd())\n if os.getcwd() == '/app':\n base_path = '.'\n else:\n base_path = '..'\n\n if gender == globals.NONBINARY:\n with open(os.path.join(base_path, 'core', 'data', 'races', race,\n 'first_male.txt')) as f: first_names = f.readlines()\n with open(os.path.join(base_path, 'core', 'data', 'races', race,\n 'first_female.txt')) as f: first_names.extend(f.readlines())\n\n else:\n with open(os.path.join(base_path, 'core', 'data', 'races', race, 'first_{}.txt'.format(gender))) as f: first_names = f.readlines()\n\n with open(os.path.join(base_path, 'core', 'data', 'races', race, 'last.txt')) as f: last_names = f.readlines()\n\n return {\n 'first': first_names[random.randint(0, len(first_names)-1)].rstrip(),\n 'last': last_names[random.randint(0, len(last_names)-1)].rstrip()\n }",
"def generate_name(max_chars: int):\n return \"\".join([\n random.choice(string.ascii_letters + string.digits)\n for n in range(max_chars)\n ])",
"def firstName(self):\n others = self._otherNames.split()\n return others[0]",
"def generate_username():\n n = random.randint(1, 999999)\n new_username = 'user%d' % (n,)\n\n while User.objects.filter(username=new_username).exists():\n n = random.randint(1, 999999)\n new_username = 'user%d' % (n,)\n\n return new_username"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Internal function to randomly generate a gender.
|
def gen_random_gender(bias=GENDER_BIAS):
_random = random.randrange(0, 99)
if _random <= bias:
return 'f'
else:
return 'm'
|
[
"def gender(self, auto=True):\n if auto:\n if (self.first_name.endswith('a')\n or self.first_name.endswith('ine')\n or self.first_name.endswith('ie')):\n return 'Female'\n return 'Male'\n return random.choice(['Male', 'Female'])",
"def generate_given_name(gender: Gender) -> str:\n\n gender_index = 0 if gender == gender.male else 1\n name_index = random.randint(0, len(data.names.get_given_names()) - 1)\n\n return data.names.get_given_names()[name_index][gender_index]",
"def random_camper(gender=None):\n return random.choice(get_campers(gender))",
"def __DefineGender(self):\n if self.__sex_genes[0] == self.__sex_genes[1]:\n self.__gender = FEMALE\n else:\n self.__gender = MALE",
"def get_gender(gender_number: int):\r\n if gender_number == 1 or gender_number == 3 or gender_number == 5:\r\n return \"male\"\r\n elif gender_number == 2 or gender_number == 4 or gender_number == 6:\r\n return \"female\"",
"def rnd_female_name(iNo_of_names):\n\n# DATABASE FOR THE MALE NAMES\n import modules.x_database as db\n\n # Connect to the database\n ccTremb = db.connect()\n cChosen_db = db.rnd_woman(ccTremb)\n aaNames = pick_name_w_alt(iNo_of_names, cChosen_db)\n return aaNames",
"def generate_gender_digits(gender: Gender) -> str:\n if gender == Gender.FEMALE:\n number = randrange(GENDER_FEMALE_MIN, GENDER_FEMALE_MAX + 1)\n else:\n number = randrange(GENDER_MALE_MIN, GENDER_MALE_MAX + 1)\n return f\"{number:03d}\"",
"def choose_sex(self):\n x = random.random()\n cumul_prob = 0.0\n for s, p in zip(self.sexes, self.sex_prob):\n cumul_prob += p\n if x < cumul_prob:\n return s\n raise ValueError(\"Unable to choose sex, \"\n \"check probabilities sum to 1.0\")",
"def random_camper_sample(count, gender=None):\n return random.sample(get_campers(gender), 5)",
"def _get_gender(self):\n female = ['female', 'actress', 'women']\n male = ['male', 'actor', 'men']\n full_text = self.soup.get_text().lower()\n count_female = full_text.count(' she ') + full_text.count(' her ')\n count_male = full_text.count(' he ') + full_text.count(' his ')\n\n try:\n #Grabs the text in catlinks id\n catlinks = self.soup.find(id='catlinks').text.lower()\n if any(s in catlinks for s in female):\n self.gender = 'F'\n elif any(s in catlinks for s in male):\n self.gender = 'M'\n else:\n try:\n ratio_male = float(count_male) / float(count_female)\n except:\n ratio_male = 1\n if ratio_male > 2:\n self.gender = 'M'\n elif ratio_male < 0.5:\n self.gender = 'F'\n else:\n self.gender = None\n except:\n self.gender = None",
"def gen_first_name(ucase=2, lcase=2, gender=False):\n gen_name = {}\n \n if not gender:\n if random.randrange(1,100) > GENDER_BIAS:\n gender = 'f'\n else:\n gender = 'm'\n\n _name = None\n _male_name_seed = random.randrange(1, 90040)\n _female_name_seed = random.randrange(1500, 90024) #1500? Too many Patricia's\n \n if gender == 'f':\n _name = female_name_tuples[bisect.bisect(FEMALE_NAME_KEYS, _female_name_seed)][1]\n else:\n _name = male_name_tuples[bisect.bisect(MALE_NAME_KEYS, _male_name_seed)][1]\n\n\n _random = random.randrange(0, 100)\n if _random < ucase:\n gen_name['given_name'] = _name\n gen_name['case'] = 'u'\n elif _random > 100 - lcase:\n gen_name['given_name'] = _name.swapcase()\n gen_name['case'] = 'l'\n else:\n gen_name['given_name'] = _name.title()\n gen_name['case'] = 'p'\n gen_name['gender'] = gender\n\n return gen_name",
"def rnd_male_name(iNo_of_names):\n\n# DATABASE FOR THE MALE NAMES\n import modules.x_database as db\n\n # Connect to the database\n ccTremb = db.connect()\n cChosen_db = db.rnd_man(ccTremb)\n aaNames = pick_name_w_alt(iNo_of_names, cChosen_db)\n return aaNames",
"def check_gender(self, gender):\n \n if gender.lower() == 'male' or gender.lower() == 'female':\n return gender.lower()\n else:\n print('Please enter correct gender type: male or female')\n exit()",
"def gender_full(self) -> str:\n gender_map = {\"m\": \"man\", \"f\": \"woman\", \"x\": \"nonbinary\"}\n return gender_map.get(self._gender, \"unknown\")",
"def GetSexGene(self):\n return random.choice(self.__sex_genes)",
"def _get_state(self):\n gender = np.random.choice([0, 1])\n age = np.random.choice(range(18, 65))\n norm_age = (age - 18) / (65 - 18)\n return np.array([0, gender, norm_age])",
"def db_gender(self, value):\n known = {u'male': u'Q6581097',\n u'female': u'Q6581072',\n u'unknown': u'somevalue'} # a special case\n if value not in known.keys():\n pywikibot.output(u'invalid gender entry: %s' % value)\n return\n\n if known[value] in (u'somevalue', u'novalue'):\n return WD.Statement(\n known[value],\n special=True)\n else:\n return WD.Statement(\n self.wd.QtoItemPage(known[value]))",
"def generate_name(race, gender):\n\n # raise BaseException(os.getcwd())\n if os.getcwd() == '/app':\n base_path = '.'\n else:\n base_path = '..'\n\n if gender == globals.NONBINARY:\n with open(os.path.join(base_path, 'core', 'data', 'races', race,\n 'first_male.txt')) as f: first_names = f.readlines()\n with open(os.path.join(base_path, 'core', 'data', 'races', race,\n 'first_female.txt')) as f: first_names.extend(f.readlines())\n\n else:\n with open(os.path.join(base_path, 'core', 'data', 'races', race, 'first_{}.txt'.format(gender))) as f: first_names = f.readlines()\n\n with open(os.path.join(base_path, 'core', 'data', 'races', race, 'last.txt')) as f: last_names = f.readlines()\n\n return {\n 'first': first_names[random.randint(0, len(first_names)-1)].rstrip(),\n 'last': last_names[random.randint(0, len(last_names)-1)].rstrip()\n }",
"def generateBlood():\n blood_type = choice(bloodTypes)\n Rh = '+' if randint(0, 1) else '-'\n return f\"'{blood_type}{Rh}'\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates a random email address based on the first and last names.
|
def gen_personal_email(first_name, last_name):
domains = ['gmail.com', 'yahoo.com', 'hotmail.com',
'icloud.com', 'aol.com', 'outlook.com']
domain_seed = random.randrange(0, len(domains))
first_seed = random.randrange(0, 2)
account = ''
if first_seed == 0:
account = '{0}.{1}@'.format(first_name, last_name)
elif first_seed == 1:
account = '{0}{1}@'.format(first_name[:1], last_name)
else:
account = '{0}{1}@'.format(first_name, last_name[:1])
return account + domains[domain_seed]
|
[
"def create_random_email(self):\n email = ''\n for _ in range(self.NAME_LENGTH):\n email += choice(ascii_letters)\n email += '@example.local'\n return email",
"def generate_email(self, user_name):\n\t\tfirst_name, last_name = user_name.split()\n\t\treturn first_name.lower() +last_name.lower() + \"@dummy.com\"",
"def gen_business_email(first, last, company_name):\n company_fixed = company_name.replace('&', ' ').replace(' ', ' ')\n company_parts = company_fixed.split()\n if len(company_parts) > 3:\n company_domain = company_parts[0] + company_parts[1] #+ y[2]\n elif len(company_parts) > 1:\n company_domain = \"\".join([part for part in company_parts if part != company_parts[len(company_parts) - 1]])\n else:\n company_domain = company_parts[0]\n company_domain_fixed = company_domain.replace(\", Inc.\", \"\").replace(\"Inc.\", \"\").replace(\"Inc\", \"\").replace('/', '').replace('|', '').replace(',', '').replace(';', '').replace(\"'\", \"\").strip()\n if len(company_domain_fixed) > 30:\n company_domain_fixed = company_domain_fixed[:20]\n email = gen_personal_email(first, last)\n email = email.split('@')[0] + '@' + company_domain_fixed + '.com'\n email = email.replace('..', '.')\n return email",
"def get_random_name():\n first_name = get_rnd('first_name')\n last_name = get_rnd('last_name')\n username = first_name[0:2] + last_name[0:6]\n return (\"%s\" % username.lower(), \"%s %s\" % (first_name, last_name))",
"def create_people(self):\n emails = [\n \"user1@ggrc.com\",\n \"miha@policy.com\",\n \"someone.else@ggrc.com\",\n \"another@user.com\",\n ]\n for email in emails:\n self.generator.generate_person({\n \"name\": email.split(\"@\")[0].title(),\n \"email\": email,\n }, \"Administrator\")",
"def random_phone_generator():\n first = str(randint(100, 999))\n second = str(randint(1, 888)).zfill(3)\n last = (str(randint(1, 9998)).zfill(4))\n while last in ['1111', '2222', '3333', '4444', '5555', '6666', '7777', '8888']:\n last = (str(randint(1, 9998)).zfill(4))\n \n return '{}-{}-{}'.format(first, second, last)",
"def create_random_surname(self):\n surname = ''\n for _ in range(self.NAME_LENGTH):\n surname += choice(ascii_letters)\n return surname",
"def any_email():\n return \"%s@%s.%s\" % (any_string(max_length=10),\n any_string(max_length=10),\n any_string(min_length=2, max_length=3))",
"def generate_surname() -> str:\n\n surnames = data.names.get_surnames()\n max_index = len(surnames) - 1\n index = random.randint(0, max_index)\n\n return surnames[index]",
"def get_name_email(self):\n field_entries = self.fields.all()\n first_name = \"\"\n last_name = \"\"\n name = \"\"\n email = \"\"\n for entry in field_entries:\n field = entry.field\n if field.field_type.lower() == 'emailfield':\n email = entry.value\n if field.field_type.lower() == 'emailverificationfield':\n email = entry.value\n if field.label.lower() in ['name']:\n name = entry.value\n if field.label.lower() in ['first name']:\n first_name = entry.value\n if field.label.lower() in ['last name']:\n last_name = entry.value\n if not name:\n if first_name or last_name:\n name = '%s %s' % (first_name, last_name)\n if not name:\n # pick the name from email\n if email:\n if '@' in email:\n name, domain = email.split('@')\n else:\n name = email\n\n return (name, email)",
"def get_random_email_domain():\n random = randint(0, 100)\n for domain in EMAIL_DOMAINS:\n if random <= domain[0]:\n return domain[1]\n return None",
"def email(self):\n return '{}.{}@email.com'.format(self.first, self.last)",
"def gen_phone_number():\n area_code = random.randrange(100, 799)\n phone_1 = random.randrange(100, 999)\n phone_2 = random.randrange(1000, 9999)\n return str(area_code) + str(phone_1) + str(phone_2)",
"def generate_attendee_id(self):\n n = random.randint(1, 12)\n identifier = \"\".join(random.choice(string.ascii_letters) for i in range(n))\n return identifier",
"def generate_uuid(self, user_name):\n\t\timport random\n\t\tuuid = \"\"\n\t\tfirst_name, last_name = user_name.split()\n\t\tuuid += first_name[0]",
"def generate_username(\n email: typing.Optional[str],\n given_name: typing.Optional[str],\n family_name: typing.Optional[str],\n) -> str:\n\n def check_name(name):\n username = slugify(name)\n if name and User.objects.filter(username=username).count() == 0:\n return username\n else:\n return None\n\n email_name = check_name(email.split(\"@\")[0]) if email else None\n if email_name:\n return email_name\n\n given_slug = check_name(given_name) if given_name else None\n if given_slug:\n return given_slug\n\n name_slug = (\n check_name(given_name + \" \" + family_name)\n if given_name and family_name\n else None\n )\n if name_slug:\n return name_slug\n\n email_slug = check_name(email) if email else None\n if email_slug:\n return email_slug\n\n base_name = email_name if email_name else \"user\"\n existing = User.objects.filter(username__startswith=base_name + \"-\").count()\n return \"{}-{}\".format(base_name, existing + 1)",
"def gen_first_name(ucase=2, lcase=2, gender=False):\n gen_name = {}\n \n if not gender:\n if random.randrange(1,100) > GENDER_BIAS:\n gender = 'f'\n else:\n gender = 'm'\n\n _name = None\n _male_name_seed = random.randrange(1, 90040)\n _female_name_seed = random.randrange(1500, 90024) #1500? Too many Patricia's\n \n if gender == 'f':\n _name = female_name_tuples[bisect.bisect(FEMALE_NAME_KEYS, _female_name_seed)][1]\n else:\n _name = male_name_tuples[bisect.bisect(MALE_NAME_KEYS, _male_name_seed)][1]\n\n\n _random = random.randrange(0, 100)\n if _random < ucase:\n gen_name['given_name'] = _name\n gen_name['case'] = 'u'\n elif _random > 100 - lcase:\n gen_name['given_name'] = _name.swapcase()\n gen_name['case'] = 'l'\n else:\n gen_name['given_name'] = _name.title()\n gen_name['case'] = 'p'\n gen_name['gender'] = gender\n\n return gen_name",
"def enter_address_in_email_field(self):\n email = self.random_email()\n email_filed = self.wait_for_an_element_to_be_present('#email_create')\n email_filed.send_keys(email)\n return email",
"def generateSMSEmail(profile):\r\n if profile['carrier'] is None or not profile['phone_number']:\r\n return None\r\n\r\n return str(profile['phone_number']) + \"@\" + profile['carrier']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates a random address for the specified state.
|
def gen_address(state=None):
if state:
if state in STATE_ADDRESS_LIST:
address_list_length = len(STATE_ADDRESS_LIST[state])
state_seed = random.randrange(0, address_list_length - 1)
# print(state_addresses[state])
return STATE_ADDRESS_LIST[state][state_seed]
else:
raise ValueError('Unknown State Code')
else:
_random_state = random.randrange(0, len(STATE_CODE_LIST))
state = STATE_CODE_LIST[_random_state]
random_seed = random.randrange(0, len(STATE_ADDRESS_LIST[state]))
return STATE_ADDRESS_LIST[state][random_seed]
|
[
"def random_state(self, state):\n pass",
"def generate_addresses(self, session):\n # fill a few foreign key dependencies\n session.add(context.State(\n short_name='WA',\n long_name='Washington'\n ))\n\n addresses = [\n context.Address(\n latitude=self.random_latitude(),\n longitude=self.random_longitude(),\n address='that street you know somewhere',\n city='a city %s' % i,\n county='King',\n state='WA',\n zip='a zip'\n )\n for i in range(5)\n ]\n session.add_all(addresses)\n session.commit()",
"def __generate_state() -> str:\n return ''.join(secrets.choice(string.ascii_uppercase + string.digits) for _ in range(16))",
"def random_address(locale=None):\n # XXX Exclude 'ar_PS' that doesn't work currently (it's defined in Faker\n # but not in pycountry).\n # See: https://github.com/scaleway/postal-address/issues/20\n while locale in [None, 'ar_PS']:\n locale = random.choice(list(faker.config.AVAILABLE_LOCALES))\n fake = faker.Faker(locale=locale)\n\n components = {\n 'line1': fake.street_address(),\n 'line2': fake.sentence(),\n 'postal_code': fake.postcode(),\n 'city_name': fake.city(),\n 'country_code': fake.country_code()}\n\n subdiv_codes = list(territory_children_codes(components['country_code']))\n if subdiv_codes:\n components['subdivision_code'] = random.choice(subdiv_codes)\n\n return Address(strict=False, **components)",
"def get_rand_ip():\n return socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))",
"def generate_state():\r\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\r\n for x in range(32))\r\n login_session['state'] = state",
"def random_number():\n return random.getrandbits(32)",
"def createnewaddress(self):\n return self.wallet.create_new_address(False).to_ui_string()",
"def gen_employer(state):\n # print(config.pickle_path)\n pkf = open(config.pickle_path +'employers.pkl', \"rb\")\n\n employers = pickle.load(pkf)\n pkf.close()\n\n sublist = [employer for employer in employers if employer['state'] == state]\n seed = random.randrange(0, len(sublist))\n return sublist[seed]",
"def random_longitude():\n return np.round(np.random.uniform(-124, -117, 1), 3)[0]",
"def create_address(self, address: Address):\n pass",
"def generateStateOCDID(state):\n ocdid = TURBOVOTE_BASEOCDID\n ocdid += TURBOVOTE_STATEOCDID\n ocdid += state.lower()\n\n return ocdid",
"def rand():\r\n global rand_seed\r\n rand_seed = (MULTIPLIER * rand_seed + INCREMENT)\r\n return (rand_seed >> 16) & 0x7FFF",
"def urandom(size: int) -> str:\n ...",
"def random_state(self, s, n=20):\n for i in range(n):\n a = random.choice(self.actions(s))\n s = self.result(s,a)\n return s",
"def random_location(self):\n location_key = random.choice(list(self.data[\"capitalof\"]))\n if bool(random.getrandbits(1)):\n location = location_key\n else:\n location = self.data[\"capitalof\"][location_key]\n return location",
"def _generate_address():\n try:\n return str(bson.ObjectId()) # bson is faster\n except NameError:\n return str(uuid.uuid1())[:-18] # remove mac-addr",
"def generate_state_key(self, state, role):\n\n pass",
"def get_value(self, state):\n if str(state) in self.value_dict.keys():\n return self.value_dict[str(state)]\n else:\n return random.randint(0, 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a random employer for the specified state.
|
def gen_employer(state):
# print(config.pickle_path)
pkf = open(config.pickle_path +'employers.pkl', "rb")
employers = pickle.load(pkf)
pkf.close()
sublist = [employer for employer in employers if employer['state'] == state]
seed = random.randrange(0, len(sublist))
return sublist[seed]
|
[
"def gen_address(state=None):\n if state:\n if state in STATE_ADDRESS_LIST:\n address_list_length = len(STATE_ADDRESS_LIST[state])\n state_seed = random.randrange(0, address_list_length - 1)\n # print(state_addresses[state])\n return STATE_ADDRESS_LIST[state][state_seed]\n else:\n raise ValueError('Unknown State Code')\n else:\n _random_state = random.randrange(0, len(STATE_CODE_LIST))\n state = STATE_CODE_LIST[_random_state]\n\n random_seed = random.randrange(0, len(STATE_ADDRESS_LIST[state]))\n return STATE_ADDRESS_LIST[state][random_seed]",
"def get_random_experiment(self) -> Experiment:\n\n experiment = get_experiment()\n experiment_name = \"\".join(random.choice(string.ascii_letters) for i in range(8))\n experiment.name = experiment_name\n return experiment",
"def random_state(self, state):\n pass",
"def select_random_county(county_state_list):\n county_state = random.choice(county_state_list)\n county = county_state[0]\n state = county_state[1]\n\n return county, state",
"def randomEyes(self, duration):\n if not self.proxy:\n self.proxy = self.session.service(\"ALLeds\")\n return self.proxy.randomEyes(duration)",
"def get_rng(random_state):\n if random_state is None:\n return np.random.mtrand._rand\n elif isinstance(random_state, (numbers.Integral, np.integer)):\n return np.random.RandomState(random_state)\n if isinstance(random_state, np.random.RandomState):\n return random_state\n raise ValueError('Wrong random state. Expecting None, an int or a numpy '\n 'RandomState instance, got a '\n '{}'.format(type(random_state)))",
"def get_random_student(self):\n if self.num_students == 0:\n return None\n return random.choice(self._students)",
"def random_teacher(av_teachers, g, *grade):\n random.seed()\n if grade:\n t = random.choice(av_teachers[grade_level])\n else:\n grade_level = random.choice(g)\n t = random.choice(av_teachers[grade_level])\n return t",
"def choose_office_random(dojo):\n number_of_offices = len(dojo.office)\n if number_of_offices > 0:\n index = random.randrange(number_of_offices)\n else:\n return \"NoRoomException\"\n list_offices = list(dojo.office)\n return list_offices[index].name",
"def get_random_user():\n count = User.objects.all().count()\n if not count:\n raise User.DoesNotExist\n random = randint(0, count - 1)\n return User.objects.all()[random]",
"def first_participant_choice():\n return random.randint(0, 2)",
"def get_random(self):\n index = randrange(self.size)\n return self.individuals[index]",
"def generate_person(self, cls=Person):\n # choose a sex\n sex = self.choose_sex()\n # choose a name\n name = self.choose_name(sex)\n # sample age\n age = int(cls.age_distrib_func(*cls.age_distrib_args))\n # sample height\n height = cls.height_distrib_func(*cls.height_distrib_args)\n \n return Person(name, sex, age, height)",
"def random_camper(gender=None):\n return random.choice(get_campers(gender))",
"def _get_state(self):\n gender = np.random.choice([0, 1])\n age = np.random.choice(range(18, 65))\n norm_age = (age - 18) / (65 - 18)\n return np.array([0, gender, norm_age])",
"def generate_given_name(gender: Gender) -> str:\n\n gender_index = 0 if gender == gender.male else 1\n name_index = random.randint(0, len(data.names.get_given_names()) - 1)\n\n return data.names.get_given_names()[name_index][gender_index]",
"def GetSexGene(self):\n return random.choice(self.__sex_genes)",
"def gen_random_gender(bias=GENDER_BIAS):\n _random = random.randrange(0, 99)\n if _random <= bias:\n return 'f'\n else:\n return 'm'",
"def getName(state):\n assert state is not None, \"getName called with no state\"\n return state[0]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates a business email address based on the persons name and employer.
|
def gen_business_email(first, last, company_name):
company_fixed = company_name.replace('&', ' ').replace(' ', ' ')
company_parts = company_fixed.split()
if len(company_parts) > 3:
company_domain = company_parts[0] + company_parts[1] #+ y[2]
elif len(company_parts) > 1:
company_domain = "".join([part for part in company_parts if part != company_parts[len(company_parts) - 1]])
else:
company_domain = company_parts[0]
company_domain_fixed = company_domain.replace(", Inc.", "").replace("Inc.", "").replace("Inc", "").replace('/', '').replace('|', '').replace(',', '').replace(';', '').replace("'", "").strip()
if len(company_domain_fixed) > 30:
company_domain_fixed = company_domain_fixed[:20]
email = gen_personal_email(first, last)
email = email.split('@')[0] + '@' + company_domain_fixed + '.com'
email = email.replace('..', '.')
return email
|
[
"def generate_email(self, user_name):\n\t\tfirst_name, last_name = user_name.split()\n\t\treturn first_name.lower() +last_name.lower() + \"@dummy.com\"",
"def gen_personal_email(first_name, last_name):\n domains = ['gmail.com', 'yahoo.com', 'hotmail.com',\n 'icloud.com', 'aol.com', 'outlook.com']\n\n domain_seed = random.randrange(0, len(domains))\n\n first_seed = random.randrange(0, 2)\n account = ''\n if first_seed == 0:\n account = '{0}.{1}@'.format(first_name, last_name)\n elif first_seed == 1:\n account = '{0}{1}@'.format(first_name[:1], last_name)\n else:\n account = '{0}{1}@'.format(first_name, last_name[:1])\n\n return account + domains[domain_seed]",
"def create_people(self):\n emails = [\n \"user1@ggrc.com\",\n \"miha@policy.com\",\n \"someone.else@ggrc.com\",\n \"another@user.com\",\n ]\n for email in emails:\n self.generator.generate_person({\n \"name\": email.split(\"@\")[0].title(),\n \"email\": email,\n }, \"Administrator\")",
"def create_random_email(self):\n email = ''\n for _ in range(self.NAME_LENGTH):\n email += choice(ascii_letters)\n email += '@example.local'\n return email",
"def create_email(name, donation):\n letter = ('Dear {}:\\nThank you for donating ${}.\\n'\n 'We appreciate your contribution!\\n\\n\\n\\n'\n 'Sincerely,\\n'\n 'DonationCentral Inc.'.format(name, donation))\n return letter",
"def get_name_email(self):\n field_entries = self.fields.all()\n first_name = \"\"\n last_name = \"\"\n name = \"\"\n email = \"\"\n for entry in field_entries:\n field = entry.field\n if field.field_type.lower() == 'emailfield':\n email = entry.value\n if field.field_type.lower() == 'emailverificationfield':\n email = entry.value\n if field.label.lower() in ['name']:\n name = entry.value\n if field.label.lower() in ['first name']:\n first_name = entry.value\n if field.label.lower() in ['last name']:\n last_name = entry.value\n if not name:\n if first_name or last_name:\n name = '%s %s' % (first_name, last_name)\n if not name:\n # pick the name from email\n if email:\n if '@' in email:\n name, domain = email.split('@')\n else:\n name = email\n\n return (name, email)",
"def getApplicationEmail(name):\n app_id = getApplicationId()\n assert app_id\n\n return \"%s@%s.appspotmail.com\" % (name, app_id)",
"def email(self):\n return '{}.{}@email.com'.format(self.first, self.last)",
"def get_email_address(request):\n\n dd_statuses = (const.STATUS_DD_U, const.STATUS_DD_NU)\n if request.person.status in dd_statuses:\n email_address = request.person.uid + \"@debian.org\"\n else:\n email_address = request.person.email\n return email_address",
"def email_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"email_address\")",
"def sib_email_id(context):\n target_organization_types = [\n \"commune\",\n \"epci\",\n \"department\",\n \"public_org\",\n \"region\",\n ]\n\n result = \"\"\n\n user = context[\"user\"]\n if user.is_authenticated and user.beneficiary_organization is not None:\n organization_types = user.beneficiary_organization.organization_type\n\n # Check overlap between the two lists\n if not set(organization_types).isdisjoint(target_organization_types):\n result = user.email\n\n return result",
"def Email(i: dict) -> str:\n if 'email' in i.keys():\n out = 'Email: <a href=\"mailto:%s\">%s</a>' % (i['email'], i['email'])\n else:\n out = \"Email: Not Available\\n\\n\"\n return \"<p>\" + out + \"</p>\"",
"def generate_email(self):\n template_path = config['Templates']['folder'] + '/'\n if self.mentor is None:\n template_path += config['Templates']['alone mentees']\n with open(template_path) as tmpl:\n email = tmpl.read().format(recipient=self)\n else:\n template_path += config['Templates']['mentees']\n with open(template_path) as tmpl:\n email = tmpl.read().format(recipient=self, mentor=self.mentor)\n return email",
"def send_invitation_email(self, employee):\n token = account_activation_token.make_token(employee.user)\n\n html_message = render_to_string('employees/employee_invitation_email.html', {\n 'employee': employee,\n 'invite_url': self.data['invite_url'],\n 'uid': urlsafe_base64_encode(force_bytes(employee.user.pk)).decode(\"utf-8\"),\n 'token': token,\n 'organization': employee.organization,\n 'site_name': self.data['invite_url'],\n 'expiration_days': settings.PASSWORD_RESET_TIMEOUT_DAYS\n })\n\n subject = 'Welcome ' + employee.first_name + ' ' + employee.last_name\n\n send_mail(subject,\n '',\n settings.DEFAULT_FROM_EMAIL,\n [employee.user.email],\n html_message = html_message,\n fail_silently=False\n )",
"def mail_address(self):\n return self.project_name + self.base_mail_address",
"def CreateShippingInvoiceAddress(self, ebay_order):\n\t\tvals = {}\n\t\tEbayCustomerInfo = ebay_order[\n\t\t\t'TransactionArray']['Transaction'][0]['Buyer']\n\t\tebay_cust_shipping_adrs = ebay_order['ShippingAddress']\n\t\tvals['invoice_partner_id'] = ebay_order['BuyerUserID']\n\t\tname = ''\n\t\tif EbayCustomerInfo.get('UserFirstName'):\n\t\t\tname = EbayCustomerInfo.get('UserFirstName')\n\t\tif name == '':\n\t\t\tif ebay_cust_shipping_adrs.get('Name'):\n\t\t\t\tname = ebay_cust_shipping_adrs.get('Name')\n\t\t\telse:\n\t\t\t\tname = 'No Name'\n\t\t# vals['last_name'] = EbayCustomerInfo.get('UserLastName')\n\t\tvals['customer_name'] = name\n\t\tvals['invoice_name'] = name\n\t\tif EbayCustomerInfo.get('Email') and EbayCustomerInfo.get('Email')!= 'Invalid Request':\n\t\t\tvals['customer_email'] = EbayCustomerInfo.get('Email')\n\t\t\tvals['invoice_email'] = EbayCustomerInfo.get('Email')\n\t\telse:\n\t\t\tvals['customer_email'] = 'No Email'\n\t\t\tvals['invoice_email'] = 'No Email'\n\t\tvals['invoice_street'] = ebay_cust_shipping_adrs.get('Street1')\n\t\tvals['invoice_street2'] = ebay_cust_shipping_adrs.get('Street2')\n\t\tif ebay_cust_shipping_adrs.get('Phone') and ebay_cust_shipping_adrs.get('Phone') != 'Invalid Request':\n\t\t\tvals['invoice_phone'] = ebay_cust_shipping_adrs.get('Phone')\n\t\tvals['invoice_city'] = ebay_cust_shipping_adrs.get('CityName')\n\t\tvals['invoice_zip'] = ebay_cust_shipping_adrs.get('PostalCode')\n\t\tvals['invoice_state_id'] = ebay_cust_shipping_adrs.get('StateOrProvince')\n\t\tvals['invoice_country_id'] = ebay_cust_shipping_adrs.get('Country')\n\t\treturn vals",
"def any_email():\n return \"%s@%s.%s\" % (any_string(max_length=10),\n any_string(max_length=10),\n any_string(min_length=2, max_length=3))",
"def build_email():\n sender = get_config_value(EMAIL_SENDER, DEFAULT_EMAIL_SENDER)\n recipient = get_config_value(EMAIL_RECIPIENT, DEFAULT_EMAIL_RECIPIENT)\n email = {'to': recipient, 'from': sender}\n return email",
"def create_email(self):\n if self.curr_temp < self.avg_temp - 5:\n self.create_personalised_email(\"COLD\")\n elif self.curr_temp > self.avg_temp + 5:\n self.create_personalised_email(\"WARM\")\n else:\n self.create_personalised_email(\"NORMAL\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates a set of dates. Passing birth_year ensures dates are chronological.
|
def gen_dates(birth_year=None):
birthdate = None
if birth_year:
byear = random.randrange(birth_year - 5, birth_year + 5)
else:
byear = random.randrange(1944, 1992)
birthdate = datetime.date(byear, random.randrange(1, 12), random.randrange(1, 28))
wyear = random.randrange(byear + 18, byear + 35)
if wyear > 2012:
wyear = 2012
wedding = datetime.date(wyear, random.randrange(1, 12), random.randrange(1, 28))
results = {'birth' : birthdate, 'wedding' : wedding}
return results
|
[
"def generate_selected_dates(year_from=2000, year_to=2020, doy_start=1, doy_end=-1):\n import calendar, time\n dates = []\n for year in range(year_from, year_to+1):\n if doy_end == -1:\n if calendar.isleap(year):\n end_day = 367\n else:\n end_day = 366\n else:\n end_day = doy_end\n dates_this_yr = [time.strftime(\"%Y.%m.%d\", time.strptime(\"%d/%d\" % (i, year),\n \"%j/%Y\")) for i in\n range(doy_start, end_day)]\n dates.extend(dates_this_yr)\n return dates",
"def get_all_years(self, ) -> 'iterable[datetime.datetime]':\n # define earliest and latest years of entries\n start_year = self.query_all_entries().order_by(\n JournalEntry.create_date).first()\n end_year = self.query_all_entries().order_by(\n JournalEntry.create_date.desc()).first()\n if start_year and end_year:\n for y in range(start_year.create_date.year,\n end_year.create_date.year + 1):\n # find any entry within this year but before next year\n found = self.query_all_entries().filter(\n JournalEntry.create_date >= datetime.datetime(\n y, 1, 1, 0, 0)).filter(\n JournalEntry.create_date < datetime.datetime(\n y + 1, 1, 1, 0, 0)).first()\n # only yield this year if has an entry\n if found:\n yield datetime.datetime(y, 1, 1, 0, 0)",
"def pick_birthdate():\n year = randrange(1916, 2017)\n month = randrange(12)\n if isleap(year) and month == 1:\n day = randrange(1, 30)\n elif month == 1:\n day = randrange(1, 29)\n elif month == 3 or month == 5 or month == 8 or month == 10:\n day = randrange(1, 31)\n else:\n day = randrange(1, 32)\n return [str(day), str(month), str(year)]",
"def generate_dates():\n current_year = datetime.now().year\n current_date = datetime.now().strftime('%m%d')\n years = range(2015, current_year)\n quarters = [\"0331\", \"0630\", \"0930\", \"1231\"]\n all_dates = []\n for r in itertools.product(years, quarters):\n all_dates.append(str(r[0]) + r[1])\n for q in quarters:\n if q < current_date:\n all_dates.append(str(current_year) + q)\n logging.info(\"dates %s\", all_dates)\n return all_dates",
"def generate_dates():\n available_dates = []\n # For loop generates days 21-25\n for i in range(21, 26):\n # All generated dates will be in January 2019\n available_dates.append(date.Date(1, i, 2019))\n return available_dates",
"def get_years(self):\n \n items = []\n\n # Get a datetime object\n now = datetime.datetime.now()\n currentYear = now.year\n startYear = 1950\n \n for eachYear in range(startYear, (currentYear + 5), 1):\n items.append( str(eachYear) ) \n\n return items",
"def random_birthday(n):\n birthdays = []\n for i in range(n):\n bd = random.randint(1, 365)\n birthdays.append(bd)\n\n return birthdays",
"def construct_date_list():\n start = datetime.datetime.strptime(\"1989-04-16\", \"%Y-%m-%d\")\n end = datetime.datetime.strptime(datetime.datetime.strftime(datetime.datetime.now(), \"%Y-%m-%d\"), \"%Y-%m-%d\")\n \n date_generated = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days + 1)]\n \n return date_generated",
"def iterate_liturgical_year(year):\n date = liturgical_year_start(year)\n while date <= liturgical_year_end(year):\n yield date\n\n date += dt.timedelta(1)",
"def birthday_in_gregorian(self, gregorian_year):\n jan1 = GregorianDate.new_year(gregorian_year)\n y = HebrewDate.from_fixed(jan1).year\n date1 = self.birthday(y)\n date2 = self.birthday(y + 1)\n return list_range([date1, date2], GregorianDate.year_range(gregorian_year))",
"def get_prime_birthdays(year, month, day):\n\n start_year = date.today().year\n\n while True:\n try:\n birthday = date(start_year, month, day)\n if is_day_prime(birthday):\n yield 'Your {} birthday is a prime year!'.format(start_year - year)\n start_year += 1\n except ValueError:\n print('Cannot exceed year 9999')\n break",
"def get_years_list_choice(self):\n return range(1950, 1998)",
"def __get_years_(search_year, start, step) -> list:\n sql_request = _sql_request_search_years(search_year)\n years = get_ids_by_request(sql_request, start, step)\n return years",
"def gen_modelled_date(start_date, end_date):\n # 2012, 2013, 2014\n year_model = [1, 2, 4]\n year_model = reduce(lambda x, y: x+y, [[year]*freq for year, freq in\n zip(range(2012, 2015), year_model)])\n rand_year = random.choice(year_model)\n\n\n # J F M A M J J A S O N D\n month_model = [1, 4, 8, 9, 7, 5, 4, 6, 8, 12, 10, 6]\n month_model = reduce(lambda x, y: x+y, [[month]*freq for month, freq in\n zip(range(1, 13), month_model)])\n rand_month = random.choice(month_model)\n\n week_dict = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: []} \t\n num_days_in_month = monthrange(rand_year, rand_month)[1]\n\n for day in range(1, num_days_in_month+1):\n week_dict[datetime.date(rand_year, rand_month, day).weekday()] += [day] \n \n\n # M T W R F S S\n week_model = [2, 1, 1, 2, 4, 8, 3]\n week_model = reduce(lambda x, y: x+y, [[week]*freq for week, freq in\n zip(range(7), week_model)])\n rand_day = random.choice(week_dict[random.choice(week_model)])\n\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20\n # 21 22 23\n hour_model = [1, 1, 1, 1, 1, 1, 2, 9, 7, 5, 2, 1, 1, 2, 2, 3, 4, 14,\n 10, 8, 6, 3, 1, 1]\n hour_model = reduce(lambda x, y: x+y, [[hour]*freq for hour, freq in\n zip(range(24), hour_model)])\n rand_hour = random.choice(hour_model)\n \n rand_minute = random.choice(range(60))\n\n rand_second = random.choice(range(60))\n \n random_timestamp_arr = [rand_year, rand_month, rand_day, rand_hour,\n rand_minute, rand_second]\n return random_timestamp_arr",
"def date_generator(start, numdays):\n date_list = [start + datetime.timedelta(days=x) for x in range(numdays)]\n return date_list",
"def genAge(self):\n date = self.dataHandler.getRandomDate()\n self.identity.birthYear = date.year\n self.identity.age = datetime.datetime.now().year - self.identity.birthYear\n self.identity.birthday = f\"{date.day}.{date.month}\"",
"def get_genres_year(year) -> list:\n sql_request = sql_request_genres_year(year)\n\n sql_data = get_data_from_db(sql_request)\n genres = create_data_of_year(sql_data)\n return genres",
"def of_year(cls, year):\n start = datetime(year, 1, 1)\n start_quarter = list(\n rrule(MONTHLY, interval=3, dtstart=start, count=4)\n )\n end_quarter = [\n date + relativedelta(months=3, days=-1) for date in start_quarter\n ]\n return [cls(*item) for item in list(zip(start_quarter, end_quarter))]",
"def _date_range(start_date, end_date):\n\n for n in range(int((end_date - start_date).days)):\n yield start_date + timedelta(n)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates a random set of net worth, income and liquid asset data.
|
def gen_financials():
net_worth = random.randrange(5, 33) * 10000
liquid_assets = net_worth / random.randrange(1, 10)
annual_income = random.randrange(7, 42) * 5000
financials = {'net_worth' : net_worth, 'liquid_assets' : liquid_assets,
'annual_income' : annual_income}
return financials
|
[
"def generate(cls):\n account_id = random.randint(0, 10)\n amount = random.randint(0, 20000)\n auction_id = random.randint(0, 20)\n time_unit = random.randint(0, 100)\n return cls(account_id=account_id, amount=amount, auction_id=auction_id, time_unit=time_unit)",
"def generate_random_data(N, x_1_range, x_2_range):\n\n x_1s = uniform(x_1_range[0], x_1_range[1], N)\n x_2s = uniform(x_2_range[0], x_2_range[1], N)\n\n return x_1s, x_2s",
"def gen_inputs():\n #generate dates\n d0 = date(1970, 1, 1)\n days = random.randint(1, 20000)\n val_date = d0 + timedelta(days=days)\n days = random.randint(1, 10)\n settle_date = val_date + timedelta(days=days)\n days = random.randint(1, 5000)\n exercise_date = settle_date + timedelta(days=days)\n \n #generate stock, strike, vol\n stock = random.uniform(0, 1000)\n strike = random.uniform(0, 1000)\n vol = random.uniform(0, 2)\n \n put_call = \"put\"\n risk_free = random.uniform(0, 1.0)\n dividend = random.uniform(0, 1.0)\n method = \"PDE\"\n time_steps = 800\n grid_points = 800\n return {\"ValDate\": val_date, \n \"SettleDate\": settle_date,\n \"ExerciseDate\": exercise_date,\n \"Stock\": stock,\n \"Strike\": strike,\n \"Vol\": vol,\n \"PutCall\": put_call,\n \"RiskFreeRate\": risk_free,\n \"Dividend\": dividend,\n \"Method\": method,\n \"TimeSteps\": time_steps,\n \"GridPoints\": grid_points\n }",
"def generate_random_data():\n\n src_ip = ParamVector.random_ip()\n dst_ip = ParamVector.random_ip()\n src_port = ParamVector.random_port()\n dst_port = ParamVector.random_port()\n size_low = randint(ParamVector.SIZE_RANDOM_LOW_MIN, ParamVector.SIZE_RANDOM_LOW_MAX)\n size_high = size_low + randint(ParamVector.SIZE_RANDOM_HIGH_MIN, ParamVector.SIZE_RANDOM_HIGH_MAX)\n ttl = randint(ParamVector.TTL_THRESH_MIN, ParamVector.TTL_THRESH_MAX)\n protoc = get_random_protocol()\n seq_low = randint(ParamVector.SEQ_THRESH_MIN, ParamVector.SEQ_THRESH_MAX)\n seq_high = seq_low + randint(ParamVector.SEQ_THRESH_MIN, ParamVector.SEQ_THRESH_MAX)\n\n weight_func = lambda: uniform(0, ParamVector.WEIGHT_MAX_VAL)\n\n weights = {ParamVector.DST_IP: weight_func(),\n ParamVector.SRC_IP: weight_func(),\n ParamVector.DST_PORT: weight_func(),\n ParamVector.SRC_PORT: weight_func(),\n ParamVector.SIZE: weight_func(),\n ParamVector.TTL: weight_func(),\n ParamVector.PROTOCOL: weight_func(),\n ParamVector.SEQ: weight_func()}\n\n sum_weights = sum(weights.values())\n\n # normalizing the values:\n\n for key in weights.keys():\n weights[key] = weights[key] / sum_weights\n return ParamVector(ip_src_set={src_ip},\n ip_dst_set={dst_ip},\n port_src_set={src_port},\n port_dst_set={dst_port},\n sizes_lower_bound=size_low,\n sizes_upper_bound=size_high,\n ttl_lower_bound=ttl,\n protocol_set={protoc},\n seq_lower_bound=seq_low,\n seq_upper_bound=seq_high,\n weight_of=weights,\n malicious_threshold=random()\n )",
"def generate_cauctions(random=np.random.RandomState(0), filename=None, n_items=100, n_bids=500, min_value=1, max_value=100,\n value_deviation=0.5, add_item_prob=0.9, max_n_sub_bids=5,\n additivity=0.2, budget_factor=1.5, resale_factor=0.5,\n integers=False, warnings=False):\n\n assert min_value >= 0 and max_value >= min_value\n assert add_item_prob >= 0 and add_item_prob <= 1\n\n def choose_next_item(bundle_mask, interests, compats, add_item_prob, random):\n n_items = len(interests)\n prob = (1 - bundle_mask) * interests * compats[bundle_mask, :].mean(axis=0)\n prob /= prob.sum()\n return random.choice(n_items, p=prob)\n\n # common item values (resale price)\n values = min_value + (max_value - min_value) * random.rand(n_items)\n\n # item compatibilities\n compats = np.triu(random.rand(n_items, n_items), k=1)\n compats = compats + compats.transpose()\n compats = compats / compats.sum(1)\n\n bids = []\n n_dummy_items = 0\n\n # create bids, one bidder at a time\n while len(bids) < n_bids:\n\n # bidder item values (buy price) and interests\n private_interests = random.rand(n_items)\n private_values = values + max_value * value_deviation * (2 * private_interests - 1)\n\n # substitutable bids of this bidder\n bidder_bids = {}\n\n # generate initial bundle, choose first item according to bidder interests\n prob = private_interests / private_interests.sum()\n item = random.choice(n_items, p=prob)\n bundle_mask = np.full(n_items, 0)\n bundle_mask[item] = 1\n\n # add additional items, according to bidder interests and item compatibilities\n while random.rand() < add_item_prob:\n # stop when bundle full (no item left)\n if bundle_mask.sum() == n_items:\n break\n item = choose_next_item(bundle_mask, private_interests, compats, add_item_prob, random)\n bundle_mask[item] = 1\n\n bundle = np.nonzero(bundle_mask)[0]\n\n # compute bundle price with value additivity\n price = private_values[bundle].sum() + np.power(len(bundle), 1 + additivity)\n if integers:\n price = int(price)\n\n # drop negativaly priced bundles\n if price < 0:\n if warnings:\n print(\"warning: negatively priced bundle avoided\")\n continue\n\n # bid on initial bundle\n bidder_bids[frozenset(bundle)] = price\n\n # generate candidates substitutable bundles\n sub_candidates = []\n for item in bundle:\n\n # at least one item must be shared with initial bundle\n bundle_mask = np.full(n_items, 0)\n bundle_mask[item] = 1\n\n # add additional items, according to bidder interests and item compatibilities\n while bundle_mask.sum() < len(bundle):\n item = choose_next_item(bundle_mask, private_interests, compats, add_item_prob, random)\n bundle_mask[item] = 1\n\n sub_bundle = np.nonzero(bundle_mask)[0]\n\n # compute bundle price with value additivity\n sub_price = private_values[sub_bundle].sum() + np.power(len(sub_bundle), 1 + additivity)\n if integers:\n sub_price = int(sub_price)\n\n sub_candidates.append((sub_bundle, sub_price))\n\n # filter valid candidates, higher priced candidates first\n budget = budget_factor * price\n min_resale_value = resale_factor * values[bundle].sum()\n for bundle, price in [\n sub_candidates[i] for i in np.argsort([-price for bundle, price in sub_candidates])]:\n\n if len(bidder_bids) >= max_n_sub_bids + 1 or len(bids) + len(bidder_bids) >= n_bids:\n break\n\n if price < 0:\n if warnings:\n print(\"warning: negatively priced substitutable bundle avoided\")\n continue\n\n if price > budget:\n if warnings:\n print(\"warning: over priced substitutable bundle avoided\")\n continue\n\n if values[bundle].sum() < min_resale_value:\n if warnings:\n print(\"warning: substitutable bundle below min resale value avoided\")\n continue\n\n if frozenset(bundle) in bidder_bids:\n if warnings:\n print(\"warning: duplicated substitutable bundle avoided\")\n continue\n\n bidder_bids[frozenset(bundle)] = price\n\n # add XOR constraint if needed (dummy item)\n if len(bidder_bids) > 2:\n dummy_item = [n_items + n_dummy_items]\n n_dummy_items += 1\n else:\n dummy_item = []\n\n # place bids\n for bundle, price in bidder_bids.items():\n bids.append((list(bundle) + dummy_item, price))\n\n # generate the LP file\n if filename is not None:\n with open(filename, 'w') as file:\n bids_per_item = [[] for item in range(n_items + n_dummy_items)]\n\n file.write(\"maximize\\nOBJ:\")\n for i, bid in enumerate(bids):\n bundle, price = bid\n file.write(f\" +{price} x{i+1}\")\n for item in bundle:\n bids_per_item[item].append(i)\n\n file.write(\"\\n\\nsubject to\\n\")\n for item_bids in bids_per_item:\n if item_bids:\n for i in item_bids:\n file.write(f\" +1 x{i+1}\")\n file.write(f\" <= 1\\n\")\n\n file.write(\"\\nbinary\\n\")\n for i in range(len(bids)):\n file.write(f\" x{i+1}\")\n\n c = np.array([bid[1] for i, bid in enumerate(bids)])\n\n A_row_indices = []\n A_col_indices = []\n bids_per_item = [[] for item in range(n_items + n_dummy_items)]\n for i, bid in enumerate(bids):\n bundle, price = bid\n for item in bundle:\n bids_per_item[item].append(i)\n\n k = 0\n for item_bids in bids_per_item:\n if item_bids:\n for i in item_bids:\n A_row_indices.append(k)\n A_col_indices.append(i)\n k += 1\n\n A = scipy.sparse.csr_matrix((np.array([1 for _ in A_row_indices]),\n (A_row_indices, A_col_indices))).todense()\n \n A = np.array(A)\n b = np.ones(A.shape[0])\n\n return A, b, c",
"def create_random_data(demand_count, supply_count, seed = 2):\n np.random.RandomState(seed) #TODO not sure if this is the right way to do it. Read documentation\n demand = pd.DataFrame()\n supply = pd.DataFrame()\n\n # create element lenghts\n demand['Length'] = ((MAX_LENGTH/2 + 1) - MIN_LENGTH) * np.random.random_sample(size = demand_count) + MIN_LENGTH\n supply['Length'] = ((MAX_LENGTH + 1) - MIN_LENGTH) * np.random.random_sample(size = supply_count) + MIN_LENGTH\n\n # create element areas independent of the length. Can change this back to Artur's method later, but I want to see the effect of even more randomness. \n demand['Area'] = ((MAX_AREA + .001) - MIN_AREA) * np.random.random_sample(size = demand_count) + MIN_AREA\n supply['Area'] = ((MAX_AREA + .001) - MIN_AREA) * np.random.random_sample(size = supply_count) + MIN_AREA\n\n # constraints\n #demand['Area'] = np.full((demand_count,), MIN_AREA)\n #supply['Area'] = np.full((supply_count,), MIN_AREA)\n\n\n # intertia moment\n demand['Inertia_moment'] = demand.apply(lambda row: row['Area']**(2)/12, axis=1) # derived from area assuming square section\n supply['Inertia_moment'] = supply.apply(lambda row: row['Area']**(2)/12, axis=1) # derived from area assuming square section\n\n # height - assuming square cross sections\n demand['Height'] = np.power(demand['Area'], 0.5)\n supply['Height'] = np.power(supply['Area'], 0.5)\n\n supply['Is_new'] = False\n \n # Change index names\n demand.index = map(lambda text: 'D' + str(text), demand.index)\n supply.index = map(lambda text: 'R' + str(text), supply.index)\n \n return demand.round(2), supply.round(2)",
"def gen_stimuli(random_input):\n model = GoldenModel(CONFIG_FILENAME, NET_FILENAME, clip_balanced=False)\n if random_input:\n x = np.random.randint(-60, 60, (model.C, model.T))\n else:\n x = np.load(INPUT_FILENAME)[\"input\"][0, :, :]\n x = F.quantize_to_int(x, model.input_scale)\n y_exp = model.layers[0](x)\n return x, y_exp",
"def generate_data_client(min=1000, max=1100):\n percentage = np.random.uniform()\n client_id = np.random.randint(min, max)\n return {\n \"clientid\": f\"{client_id}\".zfill(10),\n \"pageGender\": random.choices(['M', 'F'], [percentage, 1 - percentage])[0],\n #\"timestamp\": str(datetime.datetime.now())\n }",
"def fake_vacancies_data(faker):\n def gen_vacancies(sources_count=1, vacancies_count=3):\n vacancies_data = []\n for s in range(sources_count):\n source_name = faker.company()\n for v in range(vacancies_count):\n vacancies_data.append({\n 'source': faker.uri(),\n 'source_name': source_name[:16],\n 'name': faker.job()\n })\n return vacancies_data\n return gen_vacancies",
"def generate_random_data():\n\n def _generate_random_data(num_files=100, min_size=0, max_size=1000):\n \"\"\"Generate a number of byte strings with random content (binary) and random length (in a given range).\n\n :param num_files: the number of files to generate\n :param min_size: the smallest allowed file size\n :param max_size: the smallest allowed file size\n :return: a dictionary where the key is the data MD5 and the value is the bytes content\n \"\"\"\n files = {}\n for _ in range(num_files):\n size = random.randint(min_size, max_size)\n content = bytearray(random.getrandbits(8) for _ in range(size))\n md5 = hashlib.md5(content).hexdigest()\n files[md5] = content\n return files\n\n yield _generate_random_data",
"def generatetestshare(self, issuerID=None, fullname=None, shortname=None,\n abbrevname=None, description=None,\n industrysector=None, currentprice=None,\n marketcapitalisation=None, sharecount=None,\n daychangepercent=None, daychangeprice=None,\n daypricehigh=None, daypricelow=None, dayvolume=None):\n # Generate values for each attribute if one is not assigned.\n if not issuerID:\n issuerID = ''.join(random.choices(string.ascii_uppercase, k=3))\n if not fullname:\n fullname = ''.join(random.choices(string.ascii_lowercase, k=10))\n if not shortname:\n shortname = ''.join(random.choices(string.ascii_lowercase, k=10))\n if not abbrevname:\n abbrevname = ''.join(random.choices(string.ascii_lowercase, k=10))\n if not description:\n description = ''.join(random.choices(string.ascii_lowercase, k=50))\n if not industrysector:\n industrysector = ''.join(random.choices(\n string.ascii_lowercase, k=10))\n if not currentprice:\n currentprice = round(random.uniform(1.0, 1000.0), 2)\n if not marketcapitalisation:\n marketcapitalisation = random.randint(1000000, 1000000000)\n if not sharecount:\n sharecount = random.randint(1000000, 1000000000)\n if not daychangepercent:\n daychangepercent = round(random.uniform(-1.0, 1.0), 2)\n if not daychangeprice:\n daychangeprice = round(random.uniform(1.0, 100.0), 2)\n if not daypricehigh:\n daypricehigh = round(random.uniform(currentprice, 1000.0), 2)\n if not daypricelow:\n daypricelow = round(random.uniform(1.0, currentprice), 2)\n if not dayvolume:\n dayvolume = random.randint(1000, 10000000)\n # Create share\n share = Share(\n issuerID=issuerID,\n fullname=fullname,\n abbrevname=abbrevname,\n shortname=shortname,\n description=description,\n industrysector=industrysector,\n currentprice=currentprice,\n marketcapitalisation=marketcapitalisation,\n sharecount=sharecount,\n daychangepercent=daychangepercent,\n daychangeprice=daychangeprice,\n daypricehigh=daypricehigh,\n daypricelow=daypricelow,\n dayvolume=dayvolume\n )\n # Return generated share\n return share",
"def generate_demand(self,num_points):\n\n latitude,longitude = self.random_point_generator(num_points)\n demand = np.array([np.random.randint(10,100) for observation in range(num_points)])\n\n\n return latitude, longitude, demand",
"def test_random(self):\n\t\tfor _ in range(1000):\n\t\t\tself.assertReadData(rnd.randrange(0, len(self.basis)))",
"def get_random_hydrated_basins(n, cfg=CFG, rng=None):\n rng = _rng(rng)\n rand_arr = rng.random(size=n)\n # Fraction of ctype hydrated basin impactors\n if cfg.impact_ice_comets:\n ast_frac = 1 - cfg.comet_ast_frac\n hydration_prob = ast_frac * cfg.ctype_frac * cfg.ctype_hydrated\n else:\n hydration_prob = cfg.ctype_frac * cfg.ctype_hydrated\n ctype_inds = rand_arr < hydration_prob\n\n # Fraction of cometary basins (use same rand_arr and pick unique inds)\n hydration_prob = cfg.comet_ast_frac\n comet_inds = rand_arr > (1 - hydration_prob)\n return ctype_inds, comet_inds",
"def block(max_number_of_txns, exp_time):\n blk = {'transactions':[transaction(randrange(2, max_txt_length)) for i in range(randrange(1, max_number_of_txns))], 'time':exp_time}\n return blk",
"def gen_vars_uniform(nsets, min_, max_, n, round_to_int=False):\r\n periods = np.random.uniform(low=min_, high=max_, size=(nsets, n))\r\n\r\n if round_to_int:\r\n return np.rint(periods).tolist()\r\n else:\r\n return periods.tolist()",
"def generate_data(self) -> None:\n self.data = [np.concatenate([np.expand_dims(np.random.randint(2, size=self.bandit.n_features), axis=1)\n for i in np.arange(self.n_rounds)], axis=1)\n for j in np.arange(self.num_sims)]",
"def _generate_random_unitaries(self):\n\n n = number_of_qubits(self._qp)\n for i in range(self._samples):\n unitaries_list = []\n for j in range(n):\n u = unitary_group.rvs(2)\n alpha, theta, phi, lam = decompose_yzy(u)\n unitaries_list.append([theta, phi, lam])\n self._unitaries.update({'sample_{}'.format(i): unitaries_list})",
"def rand_inst(self):\n inst = [0.0] * self.food_count\n\n entries = self.rnd_entries()\n if entries < 1:\n entries = RND_EXP_ENTRIES # extra boost to our average\n\n idxs = set([randrange(len(inst))])\n while len(idxs) < entries:\n idxs.add(randrange(len(inst)))\n\n for idx in idxs:\n inst[idx] = choice([0.5, 1.0, 1.5, 2.0])\n\n return inst"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a random phone number.
|
def gen_phone_number():
area_code = random.randrange(100, 799)
phone_1 = random.randrange(100, 999)
phone_2 = random.randrange(1000, 9999)
return str(area_code) + str(phone_1) + str(phone_2)
|
[
"def random_phone_generator():\n first = str(randint(100, 999))\n second = str(randint(1, 888)).zfill(3)\n last = (str(randint(1, 9998)).zfill(4))\n while last in ['1111', '2222', '3333', '4444', '5555', '6666', '7777', '8888']:\n last = (str(randint(1, 9998)).zfill(4))\n \n return '{}-{}-{}'.format(first, second, last)",
"def random_number():\n return random.getrandbits(32)",
"def generate_otp(digit):\n lower = 10 ** (digit - 1)\n upper = 10 ** digit - 1\n return random.randint(lower, upper)",
"def get_number(self):\n return self.random_number",
"def get_random_number():\n\n return random.randint(0, 100000)",
"def generate_number_to_be_guessed():\n\n digits = [str(digit) for digit in range(10)]\n random.shuffle(digits)\n\n return ''.join(digits[:3])",
"def generate_phone_numbers(n, with_replacement=True):\n numbers = []\n for i in range(n):\n num = _get_random_phone()\n while not with_replacement and num in numbers:\n num = _get_random_phone()\n numbers.append(num)\n return numbers",
"def fake_phone_number(format=\"999-999-9999\"):\n number = []\n for char in format:\n if char == \"9\":\n n = next(_number_generator)\n if not number:\n # do not start phone numbers with zero\n while n == \"0\":\n n = next(_number_generator)\n number.append(n)\n else:\n number.append(char)\n return \"\".join(number)",
"def _generate_random() -> int:\n import pyotp # pylint: disable=import-outside-toplevel\n\n return int(pyotp.random_base32(length=32, chars=list(\"1234567890\")))",
"def set_number(self):\n self._number = range(1000, 9999)\n self.random_number = random.choice(self._number)\n return str(self.random_number)",
"def get_random_sensor_id():\n return \"\".join(random.choice(\"0123456789abcdef\") for i in range(12))",
"def generate_code():\n digits = list(range(10))\n random.shuffle(digits[:3])\n print(digits[:3])\n \n return digits",
"def get_random_values():\n digits = list(range(10))\n random.shuffle(digits)\n digits = [str(digit) for digit in digits[:3]]\n return \"\".join(digits)",
"def get_phone_num():\n # Init\n valid_ph_num = False\n empty_str = ''\n # Prompt for phone number\n while not valid_ph_num:\n phone_num = input('Enter phone numbr (xxx-xxx-xxxx): ')\n # check if valid form\n if len(phone_num) != 12 or phone_num[3] != '-' or phone_num[7] != '-':\n print('Invalid Entry - Must be of the form xxx-xxx-xxxx\\n')\n else:\n # Check for non-digis\n digit_index = 0\n valid_ph_num = True\n phone_num_digits = phone_num.replace('-', empty_str)\n \n while valid_ph_num and digit_index < len(phone_num_digits):\n if not phone_num_digits[digit_index].isdigit():\n print('* Non-digit: {0} *\\n'.format(phone_num_digits[digit_index]))\n valid_ph_num = False\n else:\n digit_index += 1\n return phone_num",
"def gen_credit_card_number():\n return random.choice(CC_TYPES_ACTIVE)()",
"def generate_ping():\n pin = randint(0, 9999999)\n test = pin\n digits = 0\n while test > 0:\n test = test/10\n digits += 1\n\n if 7-digits != 0:\n pin = ('0'*(7-digits))+str(pin)\n \n return pin",
"def get_phone_number(entity: str) -> str:\n type_, uuid = entity.split(\":\")\n model = MODEL_MAPPING.get(type_)\n if not model:\n return\n return model.user.phone_number",
"def get_rand_ip():\n return socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))",
"def paymentcard_digit_gen():\n return uuid.uuid4().hex[:10]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a random bank account number between 9 and 14 characters in length. Some values have an appeneded text string.
|
def gen_bank_account(numeric_only = False):
num_len = random.randrange(7, 12)
upper_range = int(math.pow(10, num_len)-1)
account_number = random.randrange(1, upper_range)
if numeric_only:
first_letter_seed = 22 #the percentage of account numbers with 1-2 initial letters.
account_number_seed = random.randrange(0, 99)
if account_number_seed <= first_letter_seed:
account_number = 'AB' + str(account_number)
return str(account_number)
|
[
"def generation_account_number():\n return random.randrange(1111111111, 9999999999)",
"def paymentcard_digit_gen():\n return uuid.uuid4().hex[:10]",
"def card_digit_gen ():\n return uuid.uuid4().hex[:8]",
"def gen_credit_card_number():\n return random.choice(CC_TYPES_ACTIVE)()",
"def randstring():\n return binascii.b2a_hex(os.urandom(15)).upper()",
"def generate_id():\n length = 6\n return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length))",
"def generate_random_alphanumeric_string(self, length=5):\n return ''.join(random.choice('0123456789ABCDEF')\n for i in range(length))",
"def generate_number_to_be_guessed():\n\n digits = [str(digit) for digit in range(10)]\n random.shuffle(digits)\n\n return ''.join(digits[:3])",
"def _generate_code(self) -> str:\n letters = string.ascii_letters + string.digits\n return \"\".join(random.choice(letters) for i in range(10)) # nosec",
"def rnd_string(n_bytes):\n return ''.join(\n random.choice(string.ascii_letters + string.digits)\n for _ in range(n_bytes))",
"def get_random_values():\n digits = list(range(10))\n random.shuffle(digits)\n digits = [str(digit) for digit in digits[:3]]\n return \"\".join(digits)",
"def generate_random_id() -> str:\n random_id = \"\"\n characters = list('0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n for _ in range(12):\n random_id += random.choice(characters)\n return random_id",
"def random_string():\n rs = (''.join(random.choice(string.ascii_uppercase)\n for i in range(16)))\n\n return rs",
"def randString(self, alphabet='ascii', minLen=0, maxLen=1032):\r\n\t\tchars = self.Alphabets[alphabet]\r\n\t\tstring = \"\"\r\n\t\tfor x in range(0, random.randint(minLen, maxLen)):\r\n\t\t\tstring += chars[random.randint(0, len(chars)-1)]\r\n\t\treturn string",
"def get_login():\n length = randint(6, 10)\n chars = string.ascii_uppercase\n return \"\".join(choice(chars) for _ in range(length))",
"def generate_ballot_number() -> str:\r\n # TODO: Implement this! Feel free to add parameters to this method, if necessary\r\n raise NotImplementedError()",
"def generate_user_id(num_char):\n letters = string.ascii_lowercase + string.ascii_uppercase + string.digits\n return ''.join(random.choice(letters) for i in range(num_char))",
"def get_random_string(length=12, allowed_chars=CHARS):\n return ''.join(random.choice(allowed_chars) for _ in range(length))",
"def gen_phone_number():\n area_code = random.randrange(100, 799)\n phone_1 = random.randrange(100, 999)\n phone_2 = random.randrange(1000, 9999)\n return str(area_code) + str(phone_1) + str(phone_2)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a random 16 digit numeric value, common to Visa style credit card numbers.
|
def gen_credit_card_number():
return random.choice(CC_TYPES_ACTIVE)()
|
[
"def rand16(self):\n \n data=self.EZSPtrans([0x49]);\n if data==None:\n print \"Insufficient random data.\";\n return 0;\n return ord(data[6])+(ord(data[7])<<8);",
"def card_digit_gen ():\n return uuid.uuid4().hex[:8]",
"def paymentcard_digit_gen():\n return uuid.uuid4().hex[:10]",
"def random_number():\n return random.getrandbits(32)",
"def get_hexadecimal_random_number():\n return \"\".join([random.choice('0123456789abcdef') for _ in range(16)])",
"def generation_account_number():\n return random.randrange(1111111111, 9999999999)",
"def generate_hex_int():\n random_num = random.randint(0, 16777215)\n hex_num = hex(random_num)\n return int(hex_num, 16)",
"def rand():\r\n global rand_seed\r\n rand_seed = (MULTIPLIER * rand_seed + INCREMENT)\r\n return (rand_seed >> 16) & 0x7FFF",
"def d12():\n\treturn random.randint(1, 12)",
"def random_vin():\n chars = string.ascii_uppercase + \\\n string.ascii_lowercase + \\\n string.digits\n return ''.join(random.choice(chars) for _ in range(17))",
"def _random_hex(digits=8):\n from string import hexdigits\n from random import choice\n\n return \"\".join(choice(hexdigits) for _ in range(digits))",
"def get_random_values():\n digits = list(range(10))\n random.shuffle(digits)\n digits = [str(digit) for digit in digits[:3]]\n return \"\".join(digits)",
"def _generate_random() -> int:\n import pyotp # pylint: disable=import-outside-toplevel\n\n return int(pyotp.random_base32(length=32, chars=list(\"1234567890\")))",
"def random_data():\n return binascii.b2a_hex(os.urandom(31)).decode('utf-8')",
"def _generate_nonce(n):\n\n return base64.b16encode(os.urandom(n/8))",
"def get_random_number():\n\n return random.randint(0, 100000)",
"def random_number(bits):\n max, min = 2**bits - 1, 2**(bits - 1)\n return random.randint(min, max)",
"def get_random(self):\n return u\"%08X\" % self.random.getrandbits(32)",
"def generate_otp(digit):\n lower = 10 ** (digit - 1)\n upper = 10 ** digit - 1\n return random.randint(lower, upper)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
iterative_levenshtein(s, t) > ldist ldist is the Levenshtein distance between the strings s and t. For all i and j, dist[i,j] will contain the Levenshtein distance between the first i characters of s and the first j characters of t
|
def iterative_levenshtein(s, t):
rows = len(s)+1
cols = len(t)+1
dist = [[0 for x in range(cols)] for x in range(rows)]
# source prefixes can be transformed into empty strings
# by deletions:
for i in range(1, rows):
dist[i][0] = i
# target prefixes can be created from an empty source string
# by inserting the characters
for i in range(1, cols):
dist[0][i] = i
for col in range(1, cols):
for row in range(1, rows):
if s[row-1] == t[col-1]:
cost = 0
else:
cost = 1
deletion = dist[row-1][col] + 1
insertion = dist[row][col-1] + 1
substitution = dist[row-1][col-1] + cost
a = [deletion, insertion, substitution]
dist[row][col] = min(a) # substitution
#print a.index(min(a))
rowscount = rows-1
colscount = cols-1
#print colscount
while rowscount > 0 or colscount > 0:
currdistance = dist[rowscount][colscount]
#print 'currdistance: ' + str(currdistance)
deletion = dist[rowscount-1][colscount]
insertion = dist[rowscount][colscount-1]
substitution = dist[rowscount-1][colscount-1]
a = [substitution, deletion, insertion]
#print a
mindist = min(a)
#print 'mindist: ' + str(mindist)
argmin = a.index(min(a))
#print 'argmin: ' + str(argmin)
if argmin == 1:
rowscount -= 1
if currdistance == mindist + 1:
#operationsdict["delete " + s[rowscount]] += 1
print "delete " + s[rowscount]
operationsdict["delete " + s[rowscount]] += 1
elif argmin == 2:
colscount -= 1
if currdistance == mindist + 1:
print "insert " + t[colscount]
operationsdict["insert " + t[colscount]] += 1
elif argmin == 0:
colscount -= 1
rowscount -= 1
if currdistance == mindist + 1:
print "substitute " + s[rowscount] + " for " + t[colscount]
operationsdict["substitute " + s[rowscount] + " for " + t[colscount]] += 1
for r in range(rows):
#print(dist[r])
pass
return dist[row][col]
|
[
"def levenshtein_distance_using_lexical_tree(lexical_tree, input_string, strategy=0, case_sensitive=0):",
"def levenshtein_distance(str_1, str_2):\n return textdistance.levenshtein.normalized_similarity(str_1, str_2)",
"def rel_levenshtein(s1, s2):\n maxlen = max(len(s1), len(s2))\n if maxlen > 0:\n return levenshtein(s1, s2) / float(maxlen)\n else:\n return 0",
"def edit_levenshtein(c1, c2):\n return 0 if c1 == c2 else -1",
"def test_levenshteinDistance_bat_cat(self):\n distance = util.levenshteinDistance('bat', 'cat')\n self.assertEqual(distance, 1)",
"def levenshtein(string, candidates):\n\n distances = defaultdict(int)\n num_lines = len(string)\n\n for k, v in candidates.items():\n expanded = False\n # Expands the length of each candidate to match the length of the compared string\n if len(v) != len(string):\n v = (v * (num_lines // len(v) + 1))[:num_lines]\n expanded = True\n\n edit_distance = distance(string, v)\n\n # If we expanded the candidate, then it is a worse match than what we have already\n if edit_distance in distances and expanded:\n continue\n\n distances[distance(string, v)] = k\n\n return distances[min(distances)]",
"def test_levenshteinDistance_cat_cat(self):\n distance = util.levenshteinDistance('cat', 'cat')\n self.assertEqual(distance, 0)",
"def test_levenshteinDistance_bar_cat(self):\n distance = util.levenshteinDistance('bar', 'cat')\n self.assertEqual(distance, 2)",
"def levenshtein_distance(diffs):\n levenshtein = 0\n insertions = 0\n deletions = 0\n for (op, data) in diffs:\n if op == DIFF_INSERT:\n insertions += len(data)\n elif op == DIFF_DELETE:\n deletions += len(data)\n elif op == DIFF_EQUAL:\n # A deletion and an insertion is one substitution.\n levenshtein += max(insertions, deletions)\n insertions = 0\n deletions = 0\n levenshtein += max(insertions, deletions)\n return levenshtein",
"def Levenshtein_distance(a, b):\n if len(a) == 0:\n return len(b)\n elif len(b) == 0:\n return len(a)\n elif a[0] == b[0]:\n return Levenshtein_distance(a[1:], b[1:])\n else:\n return 1 + min(Levenshtein_distance(a[1:], b), Levenshtein_distance(a, b[1:]), Levenshtein_distance(a[1:], b[1:]))",
"def test_levenshteinDistance_bridgedb_doge(self):\n distance = util.levenshteinDistance('bridgedb', 'doge')\n self.assertEqual(distance, 6)",
"def levenshtein_distance(s1, s2, shortest=False):\n if shortest and len(s1) != len(s2):\n length = min(len(s1), len(s2))\n s1 = s1[:length]\n s2 = s2[:length]\n oneago = None\n thisrow = range(1, len(s2) + 1) + [0]\n for x in xrange(len(s1)):\n twoago, oneago, thisrow = oneago, thisrow, [0] * len(s2) + [x + 1]\n for y in xrange(len(s2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (s1[x] != s2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n return thisrow[len(s2) - 1]",
"def test_levenshteinDistance_blank_blank(self):\n distance = util.levenshteinDistance('', '')\n self.assertEqual(distance, 0)",
"def _Levenshtein_find(s, V):\n d = np.zeros((len(V),))\n for (i,t) in enumerate(V):\n d[i] = _Levenshtein_distance(s,t)\n idx = np.argmin(d)\n return(V[idx], idx)",
"def test_levenshteinDistance_feidanchaoren0043_feidanchaoren0011(self):\n email1 = Address('feidanchaoren0043@gmail.com')\n email2 = Address('feidanchaoren0011@gmail.com')\n # Fuzzy match if the Levenshtein Distance is less than or equal to:\n fuzzyMatch = 4\n distance = util.levenshteinDistance(email1.local, email2.local)\n self.assertLessEqual(distance, fuzzyMatch)",
"def edit_distance(s1, s2):\n d = {}\n lenstr1 = len(s1)\n lenstr2 = len(s2)\n for i in xrange(-1, lenstr1 + 1):\n d[(i, -1)] = i + 1\n for j in xrange(-1, lenstr2 + 1):\n d[(-1, j)] = j + 1\n\n for i in xrange(lenstr1):\n for j in xrange(lenstr2):\n if s1[i] == s2[j]:\n cost = 0\n else:\n cost = 1\n d[(i, j)] = min(\n d[(i - 1, j)] + 1, # deletion\n d[(i, j - 1)] + 1, # insertion\n d[(i - 1, j - 1)] + cost, # substitution\n )\n if i and j and s1[i] == s2[j - 1] and s1[i - 1] == s2[j]:\n d[(i, j)] = min(d[(i, j)], d[i - 2, j - 2] + cost) # transposition\n\n return d[lenstr1 - 1, lenstr2 - 1]",
"def lev_distance(self,b):\n str1 = self.name\n str2 = b.name\n d=dict()\n for i in range(len(str1)+1):\n d[i]=dict()\n d[i][0]=i\n for i in range(len(str2)+1):\n d[0][i] = i\n for i in range(1, len(str1)+1):\n for j in range(1, len(str2)+1):\n d[i][j] = min(d[i][j-1]+1, d[i-1][j]+1, d[i-1][j-1]+(not str1[i-1] == str2[j-1]))\n return d[len(str1)][len(str2)]",
"def _match_term_to_results_with_levenshtein(self, current_search_term, ocr_results):\n possible_matches = []\n for result in ocr_results:\n ocr_result_word = result[0]\n distance = editdistance.eval(current_search_term, ocr_result_word)\n similarity = 1 - distance / max(len(ocr_result_word), len(current_search_term)) \n if similarity > self.minimum_word_similarity:\n possible_matches.append(result)\n\n return possible_matches",
"def levenshtein_distance(word, node_val):\n\n memo = {}\n\n # time for a closure!\n def memo_levenshtein_distance(word, i, node_val, j):\n if (word, i, node_val, j) in memo:\n return memo[(word, i, node_val, j)]\n\n if len(word) - i == 0:\n return len(node_val) - j\n if len(node_val) - j == 0:\n return len(word) - i\n if word[i] != node_val[j]:\n cost = 1\n else:\n cost = 0\n\n distance = min(memo_levenshtein_distance(word, i+1, node_val, j) + 1,\n memo_levenshtein_distance(word, i, node_val, j+1) + 1,\n memo_levenshtein_distance(word, i+1, node_val, j+1) + cost)\n\n memo[(word, i, node_val, j)] = distance\n return distance\n\n return memo_levenshtein_distance(word, 0, node_val, 0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test fiducial averaging (preload_all = False).
|
def test_fiducials_10():
peaks = {"x" : numpy.array([1.0, 2.0, 3.0]),
"y" : numpy.array([1.0, 1.0, 1.0])}
filename = "test_fiducials.hdf5"
h5_name = storm_analysis.getPathOutputTest(filename)
storm_analysis.removeFile(h5_name)
# Write data.
with saH5Py.SAH5Py(h5_name, is_existing = False) as h5:
for i in range(3):
h5.addLocalizations(peaks, i)
h5.addMovieInformation(FakeReader(n_frames = 3))
# Track fiducials..
fiducials.trackFiducials(h5_name, radius = 0.1)
# Check
with fiducials.SAH5Fiducials(h5_name) as h5:
[ave, n] = h5.averageFiducials(fields = ["y"], preload_all = False)
assert(numpy.allclose(ave["y"], numpy.ones(3)))
|
[
"def test(model, loaders, avg=True, device='cpu', loss_func=None):\n model.eval()\n\n acc_arr = []\n loss_arr = []\n\n for loader in loaders:\n loss, acc = test_dataset(model, loader, device, loss_func)\n acc_arr.append(acc)\n loss_arr.append(loss.item())\n\n model.train()\n\n if avg:\n return 100 * np.mean(acc_arr), np.mean(loss_arr)\n else:\n return acc_arr, loss_arr",
"def get_mean_F1(model, validation_loader):\n model.eval()\n mean_f1 = 0\n for (data, target) in validation_loader:\n output = model(data)\n mean_f1 += f1_score(target.detach().cpu().numpy(), output_to_class(output), average='micro') / len(validation_loader)\n \n return mean_f1",
"def test_mean_infected(self):\n\n surviver = Surviver.objects.all()\n infected, not_infected = calc_mean_infected(surviver)\n self.assertEqual(infected, 50.0)\n self.assertEqual(not_infected, 50.0)",
"def test_basic_add(self):\n data_loader = self.get_data_loader()\n\n count = 0\n for data in data_loader:\n image, labels = data\n result = add_test_augmenter(count, image['rgb'])\n self.assertAlmostEqual(torch.mean(result[0].cpu()),\n torch.mean(image['rgb'][0][0]), places=5)\n\n count += 1",
"def train_and_test():\n\ttrain_data, test_data, test_users, test_movies = get_train_data()\n\tprint \"loaded train & test data\"\n\tcf = collaborative_filtering(train_data)\n\t# evaluate the collaborative filtering model by printing the rmse value for the test data\n\tprint cf.score(test_data)",
"def test_get_average_occurence_count_method():\n keywordsChief1 = KeywordsChief(\"test_data/keywords.yaml\")\n assert keywordsChief1.get_average_occurrence_count() == 1.0\n\n keywordsChief2 = KeywordsChief(\"test_data/keywords_ngram2.yaml\")\n assert keywordsChief2.get_average_occurrence_count() == 1.0\n\n keywordsChief3 = KeywordsChief(\"test_data/keywords_ngram3.yaml\")\n assert keywordsChief3.get_average_occurrence_count() == 1.0",
"def test_62_intensity_weighted_mean_spectral_analysis():\n\tcasalog.origin(\"test_62_intensity_weighted_mean_spectral_analysis\")\n\tcasalog.post(\"starting\")\n\n\t# Removing any file output from previous runs, so immath will proceed\n\tos.system('rm -rf imgG192_6s_spw0-63_mfs2.image.tt1.filtered')\n\timmath(imagename=['imgG192_6s_spw0-63_mfs2.image.tt1',\n\t 'imgG192_6s_spw0-63_mfs2.image.tt0'],\n\t mode='evalexpr',\n\t expr='IM0[IM1>2E-4]',\n\t outfile='imgG192_6s_spw0-63_mfs2.image.tt1.filtered')\n\t#\n\t# Removing any file output from previous runs, so immath will proceed\n\tos.system('rm -rf imgG192_6s_spw0-63_mfs2.image.tt0.filtered')\n\timmath(imagename=['imgG192_6s_spw0-63_mfs2.image.tt0'],\n\t mode='evalexpr',\n\t expr='IM0[IM0>2E-4]',\n\t outfile='imgG192_6s_spw0-63_mfs2.image.tt0.filtered')",
"def testSummary(self, clf):\n summary1 = clf.summary()\n self.failUnless('not yet trained' in summary1)\n clf.train(datasets['uni2small'])\n summary = clf.summary()\n # It should get bigger ;)\n self.failUnless(len(summary) > len(summary1))\n self.failUnless(not 'not yet trained' in summary)",
"def test_gaussian_profile(): \n\n # check sigma input\n obj = galsim.Gaussian(sigma=sigma)\n image_galsim_sigma = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galflow_sigma = gf.lightprofiles.gaussian(sigma=[sigma], nx=stamp_size, ny=stamp_size)[0,...]\n\n # check batch input\n obj1 = galsim.Gaussian(sigma=sigma)\n obj2 = galsim.Gaussian(sigma=sigma*2)\n image_galsim_batch1 = obj1.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galsim_batch2 = obj2.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galsim_batch = np.stack([image_galsim_batch1, image_galsim_batch2], axis=0)\n image_galflow_batch = gf.lightprofiles.gaussian(sigma=[sigma, sigma*2], nx=stamp_size, ny=stamp_size)\n\n # check half_light_radius input\n obj = galsim.Gaussian(half_light_radius=hlr)\n image_galsim_hlr = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galflow_hlr = gf.lightprofiles.gaussian(half_light_radius=[hlr], nx=stamp_size, ny=stamp_size)[0,...]\n\n # check fwhm input\n obj = galsim.Gaussian(fwhm=fwhm)\n image_galsim_fwhm = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galflow_fwhm = gf.lightprofiles.gaussian(fwhm=[fwhm], nx=stamp_size, ny=stamp_size)[0,...]\n\n # check fwhm input\n obj = galsim.Gaussian(fwhm=fwhm)\n image_galsim_scale = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=scale, method='no_pixel').array\n image_galflow_scale = gf.lightprofiles.gaussian(fwhm=[fwhm], nx=stamp_size, ny=stamp_size, scale=scale)[0,...]\n\n # check flux input\n obj = galsim.Gaussian(fwhm=fwhm, flux=flux)\n image_galsim_flux = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galflow_flux = gf.lightprofiles.gaussian(fwhm=[fwhm], flux=[flux], nx=stamp_size, ny=stamp_size)[0,...]\n\n # check even and odd stamp sizes\n obj = galsim.Gaussian(fwhm=fwhm, flux=flux)\n image_galsim_size = obj.drawImage(nx=stamp_size, ny=stamp_size+1, scale=1., method='no_pixel').array\n image_galflow_size = gf.lightprofiles.gaussian(fwhm=[fwhm], flux=[flux], nx=stamp_size, ny=stamp_size+1)[0,...]\n\n assert_allclose(image_galsim_sigma, image_galflow_sigma, atol=1e-5)\n assert_allclose(image_galsim_batch, image_galflow_batch, atol=1e-5)\n assert_allclose(image_galsim_hlr, image_galflow_hlr, atol=1e-5)\n assert_allclose(image_galsim_fwhm, image_galflow_fwhm, atol=1e-5)\n assert_allclose(image_galsim_scale, image_galflow_scale, rtol=1e-5)\n assert_allclose(image_galsim_flux, image_galflow_flux, atol=1e-5)\n assert_allclose(image_galsim_size, image_galflow_size, atol=1e-5)",
"def test_trial_ensemble(trial_name, classifier):\n models_dir = args.saved_models + '/{0}/best_models/'.format(trial_name)\n best_models = [m[2] for m in os.walk(models_dir)][0]\n classifiers = []\n for m in best_models:\n new_classifier = classifier\n new_classifier.load_checkpoint(models_dir+m)\n classifiers.append(new_classifier)\n \n total_correct = 0\n for i, x in enumerate(classifier.test_di):\n label = x[4] if classifier.classification_type == \"simple\" else x[5]\n predictions = [c.classify(x) for c in classifiers]\n avg_prediction = np.mean(predictions, 0)\n class_prediction = avg_prediction.argmax(0)\n if class_prediction == label:\n total_correct += 1\n \n return total_correct / len(classifier.test_di)",
"def test_AggregateProfiles_init():\n assert ap.sql_file == file\n assert ap.strata == [\"Metadata_Plate\", \"Metadata_Well\"]\n assert ap.merge_cols == [\"TableNumber\", \"ImageNumber\"]\n assert ap.features == \"infer\"\n pd.testing.assert_frame_equal(image_df, ap.image_df)\n assert ap.subsample_frac == 1\n assert ap_subsample.subsample_frac == 1\n assert ap.subsample_n == \"all\"\n assert ap_subsample.subsample_n == 2\n assert ap.subset_data_df == \"none\"\n assert ap.output_file == \"none\"\n assert ap.operation == \"median\"\n assert not ap.is_aggregated\n assert ap.subsampling_random_state == \"none\"\n assert ap_subsample.subsampling_random_state == 123",
"def mean_over_files(fnames, key):\n num_files = check_file_list(fnames)\n mean = 0\n for fname in fnames:\n with h5.File(fname, 'r') as ifi:\n try:\n mean += ifi[key][:].mean()\n except ValueError:\n raise\n return mean / num_files",
"def get_mean_face(images):\n count = len(images)\n ave = np.zeros(images[0].shape)\n for arr in images:\n ave = ave + arr/count\n if DEBUG:\n plt.matshow(ave)\n plt.show()\n return ave",
"def test_cpu_invariance(self) -> AggregatedResultBundle:\n res_list = []\n for cpu, item_group in groupby(self.invariance_items, key=lambda x: x.cpu):\n item_group = list(item_group)\n # combine all frequencies of that CPU class, although they should\n # all be the same\n max_freq = max(itertools.chain.from_iterable(\n x.freq_list for x in item_group\n ))\n max_freq_items = [\n item\n for item in item_group\n if item.freq == max_freq\n ]\n for item in max_freq_items:\n # Only test util, as it should be more robust\n res = item.test_task_util_avg()\n res_list.append(res)\n\n return AggregatedResultBundle(res_list, 'cpu')",
"def test_data_loader_multiprocessing(self):\n self.assertEqual(\n sum(\n (\n batch.positives.shape[0]\n for batch in torch.utils.data.DataLoader(dataset=self.instance, batch_size=None, num_workers=2)\n )\n ),\n self.factory.num_triples,\n )",
"def test_fake(self):\n source_model = SkyModel(spectral_model=PowerLawSpectralModel())\n dataset = SpectrumDatasetOnOff(\n name=\"test\",\n counts=self.on_counts,\n counts_off=self.off_counts,\n models=source_model,\n exposure=self.aeff * self.livetime,\n edisp=self.edisp,\n acceptance=RegionNDMap.from_geom(geom=self.on_counts.geom, data=1),\n acceptance_off=RegionNDMap.from_geom(geom=self.off_counts.geom, data=10),\n )\n real_dataset = dataset.copy()\n\n background = RegionNDMap.from_geom(dataset.counts.geom)\n background.data += 1\n dataset.fake(npred_background=background, random_state=314)\n\n assert real_dataset.counts.data.shape == dataset.counts.data.shape\n assert real_dataset.counts_off.data.shape == dataset.counts_off.data.shape\n assert dataset.counts_off.data.sum() == 39\n assert dataset.counts.data.sum() == 5",
"def test_total_images(test_data_path, nb_images, classifier,\n\t\t\t\t\t minibatch_size = 25, decision_rule = 'majority_vote',\n\t\t\t\t\t only_green = False):\n\tvalid_decision_rule = ['majority_vote', 'weighted_vote']\n\tif decision_rule not in valid_decision_rule:\n\t\traise NameError(decision_rule + ' is not a valid decision rule.')\n\n\tprint('\t Testing for the database : ' + test_data_path)\n\n\tdata_test = il.Test_loader(test_data_path, subimage_size = 100, only_green = only_green)\n\n\n\tpool = Pool()\n\ttp = 0\n\tfp = 0\n\tnb_CGG = 0\n\taccuracy = 0\n\tfor i in range(nb_images):\n\t\tbatch, label, width, height, original, image_file = data_test.get_next_image()\n\t\tbatch_size = batch.shape[0]\n\t\tj = 0\n\t\tprediction = 0\n\t\tlabels = []\n\t\tdiff = []\n\t\tnb_im = 0\n\t\twhile j < batch_size:\n\n\t\t\tdat = []\n\t\t\tfor k in range(j, min(j+minibatch_size, batch_size)): \n\t\t\t\tdat.append([batch[k], label])\n\n\t\t\tto_compute = [i for i in range(minibatch_size)]\n\t\t\tresult = pool.starmap(partial(compute_features, \n\t\t\t\t\t\t\t\t\t\tbatch_size = 1, \n\t\t\t\t\t\t\t\t\t\tnb_batch = minibatch_size, \n\t\t\t\t\t\t\t\t\t\tmode = 'lbp'),\n\t\t\t\t\t\t\t\t\t\tzip(dat, to_compute)) \n\t\t\tres = []\n\t\t\tfor k in range(len(result)):\n\t\t\t\tres.append(result[k][0][0])\n\t\t\tres = normalize(np.array(res), axis = 1)\n\t\t\tpred = np.log(classifier.predict_proba(res) + 0.00000001)\n\t\t\t# print(classifier.predict(np.array(res)))\n\t\t\t\t\t\n\t\t\tnb_im += pred.shape[0]\n\t\t\tlabel_image = np.argmax(pred, 1)\n\t\t\td =\tnp.max(pred, 1) - np.min(pred, 1)\n\t\t\tfor k in range(d.shape[0]):\n\t\t\t\tdiff.append(np.round(d[k], 1))\n\n\t\t\tif decision_rule == 'majority_vote':\n\t\t\t\tprediction += np.sum(label_image)\n\t\t\tif decision_rule == 'weighted_vote':\n\t\t\t\tprediction += np.sum(-2*d*(label_image - 0.5))\n\n\t\t\tfor l in label_image:\n\t\t\t\tlabels.append(data_test.image_class[l])\n\t\t\tj+=minibatch_size\n\n\t\t\t\t \n\t\tdiff = np.array(diff)\n\t\tif decision_rule == 'majority_vote':\n\t\t\tprediction = data_test.image_class[int(np.round(prediction/batch_size))]\n\t\tif decision_rule == 'weighted_vote':\n\t\t\tprediction = data_test.image_class[int(max(prediction,0)/abs(prediction))]\n\t\t\t\t\n\n\t\tif label == 'CGG':\n\t\t\tnb_CGG += 1\n\t\tif(label == prediction):\n\t\t\taccuracy+= 1\n\t\t\tif(prediction == 'CGG'):\n\t\t\t\ttp += 1\n\t\telse:\n\t\t\tif prediction == 'CGG':\n\t\t\t\tfp += 1\n\t\tprint(prediction, label)\n\n\t\tif ((i+1)%10 == 0):\n\t\t\tprint('\\n_______________________________________________________')\n\t\t\tprint(str(i+1) + '/' + str(nb_images) + ' images treated.')\n\t\t\tprint('Accuracy : ' + str(round(100*accuracy/(i+1), 2)) + '%')\n\t\t\tif tp + fp != 0:\n\t\t\t\tprint('Precision : ' + str(round(100*tp/(tp + fp), 2)) + '%')\n\t\t\tif nb_CGG != 0:\n\t\t\t\t\tprint('Recall : ' + str(round(100*tp/nb_CGG,2)) + '%')\n\t\t\tprint('_______________________________________________________\\n')\n\n\n\tprint('\\n_______________________________________________________')\n\tprint('Final Accuracy : ' + str(round(100*accuracy/(nb_images), 3)) + '%')\n\tprint('Final Precision : ' + str(round(100*tp/(tp + fp), 3)) + '%')\n\tprint('Final Recall : ' + str(round(100*tp/nb_CGG, 3)) + '%')\n\tprint('_______________________________________________________\\n')",
"def getaverage(dir):\n print(dir)\n for (dirpath,dirname,filenames) in walk(dir):\n count = 0\n for f in filenames:\n if f.endswith(\".tif\"):\n count = count+1\n with open(dir+'/'+f,'rb') as fptr:\n im = Image.open(fptr)\n imarray = np.array(im)\n print(\"mean/max/min: \",np.mean(imarray),np.max(imarray),np.min(imarray))\n if count==1:\n farray = imarray.astype('float32')\n new = False\n else:\n farray = farray+imarray\n del imarray\n farray = farray/count\n print(\"ave mean/max/min: \",np.mean(farray),np.max(farray),np.min(farray))\n return farray",
"def generateTestData():\n all_albums = Album.objects.all()\n # for each album, generate a random number of votes\n for album in all_albums:\n n = 80\n votes = random.randint(0, n)\n album.votes = votes\n album.contests = n\n try:\n album.rating = 100 * (album.votes / album.contests) # % rating\n except ZeroDivisionError:\n album.rating = 0\n album.save()\n print('Generated test data')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Strips invalid characters from a filename and ensures that the file_length is less than `max_bytes` bytes.
|
def strip_invalid_filename_characters(filename: str, max_bytes: int = 200) -> str:
filename = "".join([char for char in filename if char.isalnum() or char in "._- "])
filename_len = len(filename.encode())
if filename_len > max_bytes:
while filename_len > max_bytes:
if len(filename) == 0:
break
filename = filename[:-1]
filename_len = len(filename.encode())
return filename
|
[
"def make_filename_safe(filename):\n allowed_length = 255 # windows doesn't support more than 255 character filenames\n allowed_chars = string.ascii_letters + string.digits + \"~ -_.()\"\n safe_filename = ''.join(c for c in filename if c in allowed_chars)\n return safe_filename[:allowed_length]",
"def sanitize_filename(filename):\n filename = filename.replace(\"..\", \".\")\n for character in filename:\n if character not in (\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" +\n \"abcdefghijklmnopqrstuvwxyz\" + \n \"0123456789\" + \n \"._-\"):\n filename = filename.replace(character, \"\")\n return filename",
"def clean_filename(filename, replace='', acceptable_chars=('.', '_')):\n return \"\".join([c if c.isalpha() or c.isdigit() or c in acceptable_chars else replace for c in filename])",
"def clean_filename(filename):\n # Drop file extension\n # Do not use os.path.splitext because filename not required to have an extension\n for extension in [\".zip\", \".tgz\", \".par\", \".PAR\"]:\n if filename.endswith(extension):\n filename = filename.replace(extension, \"\")\n\n # Drop imaging extensions\n # Do not use os.path.splitext because filename may include periods\n for extension in [\".dicom\", \".dcm\", \".parrec\"]:\n if filename.endswith(extension):\n filename = filename.replace(extension, \"\")\n\n # Replace non-alphanumerics with underscore and avoiding repeat underscores\n filename = re.sub(\"[^0-9a-zA-Z]+\", \"_\", filename)\n\n # Do not end on an underscore\n filename = filename.rstrip(\"_\")\n\n # if no alphanumerics are present in the filename, set the filename to inputs\n if len(filename) == 0:\n filename = \"inputs\"\n\n return filename",
"def is_valid_file_name_length(file_name, length):\n return len(file_name) <= int(length)",
"def validate(cls, filename):\n \n filename = (\n re.sub(cls._INVALID_CHARS_PATTERN, \"\", filename)\n .strip(\" \")\n .rstrip(\".\")\n )\n \n root, ext = os.path.splitext(filename)\n # For reserved names, the comparison must be case-insensitive\n # (because Windows has case-insensitive filenames).\n if root.upper() in cls._INVALID_NAMES:\n filename = root + \" (1)\" + ext\n \n if not filename:\n filename = _(\"Untitled\")\n \n return filename",
"def validate_suffix_length(output_file_suffix, suffix_length):\n\t\tif len(output_file_suffix) > suffix_length:\n\t\t\t\tprint(\"split: reached maximum possible number of files with suffix length of %d\" % suffix_length)\n\t\t\t\texit(1)",
"def safe_filename(filename, replacement_char='_'):\n illegal_chars_pattern = re.compile(r\"[#%&{}\\\\<>\\*\\?/ \\$!'\\\":@\\+`|=]\")\n return re.sub(illegal_chars_pattern, replacement_char, filename)",
"def canonical_filename(filename):\n if type(filename) is not type(u''):\n filename = str(filename).decode('utf-8')\n filename = unicodedata.normalize('NFC', filename)\n filename = filename.strip()\n filename = _replace_re.sub(u'_', filename)\n if _prefix_re.match(unicodedata.normalize('NFKC', filename)):\n filename = '_' + filename\n if len(filename) > 255:\n filename = filename[:127] + u'\\N{horizontal ellipsis}' + filename[-127:]\n return filename",
"def _validate_file_name(cplex, filename, description):\n if filename is None:\n return filename\n matches = _validate_file_name.illegal_characters.search(filename)\n if matches:\n raise ValueError(\n \"Unallowed character (%s) found in CPLEX %s file path/name.\\n\\t\"\n \"For portability reasons, only [%s] are allowed.\"\n % (matches.group(), description,\n _validate_file_name.allowed_characters.replace(\"\\\\\",'')))\n # CPLEX only supports quoting spaces starting in v12.8.\n if ' ' in filename:\n if cplex.version()[:2] >= (12,8):\n filename = '\"'+filename+'\"'\n else:\n raise ValueError(\n \"Space detected in CPLEX %s file path/name\\n\\t%s\\nand \"\n \"CPLEX older than version 12.8. Please either upgrade \"\n \"CPLEX or remove the space from the %s path.\"\n % (description, filename, description))\n return filename",
"def createFilename(self, name):\n validFilenameChars = \"-_.()%s%s\" % (string.ascii_letters, string.digits)\n cleanedFilename = unicodedata.normalize('NFKD', name)\n return ''.join(c for c in cleanedFilename if c in validFilenameChars)",
"def makeValidFilename(value, normalize_unicode = False, windows_safe = False, custom_blacklist = None, replace_with = \"_\"):\n\n if windows_safe:\n # Allow user to make Windows-safe filenames, if they so choose\n sysname = \"Windows\"\n else:\n sysname = platform.system()\n\n # If the filename starts with a . prepend it with an underscore, so it\n # doesn't become hidden.\n\n # This is done before calling splitext to handle filename of \".\", as\n # splitext acts differently in python 2.5 and 2.6 - 2.5 returns ('', '.')\n # and 2.6 returns ('.', ''), so rather than special case '.', this\n # special-cases all files starting with \".\" equally (since dotfiles have\n # no extension)\n if value.startswith(\".\"):\n value = \"_\" + value\n\n # Treat extension seperatly\n value, extension = split_extension(value)\n\n # Remove any null bytes\n value = value.replace(\"\\0\", \"\")\n\n # Blacklist of characters\n if sysname == 'Darwin':\n # : is technically allowed, but Finder will treat it as / and will\n # generally cause weird behaviour, so treat it as invalid.\n blacklist = r\"/:\"\n elif sysname in ['Linux', 'FreeBSD']:\n blacklist = r\"/\"\n else:\n # platform.system docs say it could also return \"Windows\" or \"Java\".\n # Failsafe and use Windows sanitisation for Java, as it could be any\n # operating system.\n blacklist = r\"\\/:*?\\\"<>|\"\n\n # Append custom blacklisted characters\n if custom_blacklist is not None:\n blacklist += custom_blacklist\n\n # Replace every blacklisted character with a underscore\n value = re.sub(\"[%s]\" % re.escape(blacklist), replace_with, value)\n\n # Remove any trailing whitespace\n value = value.strip()\n\n # There are a bunch of filenames that are not allowed on Windows.\n # As with character blacklist, treat non Darwin/Linux platforms as Windows\n if sysname not in ['Darwin', 'Linux']:\n invalid_filenames = [\"CON\", \"PRN\", \"AUX\", \"NUL\", \"COM1\", \"COM2\",\n \"COM3\", \"COM4\", \"COM5\", \"COM6\", \"COM7\", \"COM8\", \"COM9\", \"LPT1\",\n \"LPT2\", \"LPT3\", \"LPT4\", \"LPT5\", \"LPT6\", \"LPT7\", \"LPT8\", \"LPT9\"]\n if value in invalid_filenames:\n value = \"_\" + value\n\n # Replace accented characters with ASCII equivalent\n if normalize_unicode:\n import unicodedata\n value = unicode(value) # cast data to unicode\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n\n # Truncate filenames to valid/sane length.\n # NTFS is limited to 255 characters, HFS+ and EXT3 don't seem to have\n # limits, FAT32 is 254. I doubt anyone will take issue with losing that\n # one possible character, and files over 254 are pointlessly unweidly\n max_len = 254\n\n if len(value + extension) > max_len:\n if len(extension) > len(value):\n # Truncate extension instead of filename, no extension should be\n # this long..\n new_length = max_len - len(value)\n extension = extension[:new_length]\n else:\n # File name is longer than extension, truncate filename.\n new_length = max_len - len(extension)\n value = value[:new_length]\n\n return value + extension",
"def _get_file_too_large_error_message(filename):\n\n return _(\n 'File {filename} exceeds maximum size of '\n '{maximum_size_in_megabytes} MB.'\n ).format(\n filename=filename,\n maximum_size_in_megabytes=settings.MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB,\n )",
"def send_filename(self, fn):\n\t\tif len(fn) > 255:\n\t\t\traise FilenameTooLongError('%s contains more than 255 characters' % fn)\n\t\twith BytesIO(bytes(fn.ljust(255, '\\x00'), 'utf-8')) as f:\n\t\t\tself.send_data(f, 255)",
"def check_resource_max_length(name: str) -> None:\n if len(name) > RESOURCE_MAX_LENGTH:\n raise OpenshiftTektonResourcesNameTooLongError(\n f\"Resource name {name} is longer than {RESOURCE_MAX_LENGTH} characters\"\n )",
"def filename(self):\n fname = self.raw_filename\n if not isinstance(fname, text_type):\n fname = fname.decode('utf8', 'ignore')\n fname = normalize('NFKD', fname)\n fname = fname.encode('ASCII', 'ignore').decode('ASCII')\n fname = os.path.basename(fname.replace('\\\\', os.path.sep))\n fname = re.sub(r'[^a-zA-Z0-9-_.\\s]', '', fname).strip()\n fname = re.sub(r'[-\\s]+', '-', fname).strip('.-')\n return fname[:255] or 'empty'",
"def wipe_bad_chars(filename):\n return multi_replace(filename, {\"(\": \"\", \" \": \"_\", \")\": \"\", \"/\": \"_\"}, True)",
"def test_reject_name_if_too_long(self):\n # Assume\n\n name = \"Thisnameexceedstwentycharacters\"\n\n # Action\n\n result = birthdays.name_is_valid(name)\n\n # Assert\n\n self.assertFalse(result)",
"def get_max_file(files):\n max_length = 0\n max_file = None\n for f in files:\n if f['length'] > max_length:\n max_length = f['length']\n max_file = f\n return max_file"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Runs async functions in sync scopes. Can be used in any scope.
|
def synchronize_async(func: Callable, *args, **kwargs) -> Any:
return fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args, **kwargs) # type: ignore
|
[
"def async_test(func):\r\n\r\n def wrapper(*args, **kwargs):\r\n future = func(*args, **kwargs)\r\n asyncio.run(future)\r\n return wrapper",
"async def run_sync(func: Callable[..., T], *args: Any) -> T:\n loop = asyncio.get_event_loop()\n return await loop.run_in_executor(None, func, *args)",
"def async_func(self, *args, **kwargs):\n del args\n task = TaskRunner(run_function=func, obj=self, kwargs=kwargs)\n ret = task.start()\n return ret",
"def run_the_sync_code():\n start_time = datetime.now()\n sync_db_example(1)\n sync_db_example(3)\n print(f\"Finished retrieving all items, finish Time: {datetime.now() - start_time}, {datetime.now()}\")",
"def thread_async(self):\n while True:\n callback, command, kwargs = self.async_queries.get()\n resp = self.query(command, **kwargs)\n callback(resp)",
"def synchronous(lock=threading.RLock()):\n def synced(func):\n @wraps(func)\n def synchronizer(self, *args, **kwargs):\n with lock:\n return func(self, *args, **kwargs)\n return synchronizer\n return synced",
"def syncify(*types):\n # Our asynchronous generators all are `RequestIter`, which already\n # provide a synchronous iterator variant, so we don't need to worry\n # about asyncgenfunction's here.\n for t in types:\n for name in dir(t):\n if not name.startswith('_') or name == '__call__':\n if inspect.iscoroutinefunction(getattr(t, name)):\n _syncify_wrap(t, name)",
"def async_test(func):\n \n @wraps(func)\n def inner(self: TestCase, *args, **kwargs):\n loop = asyncio.get_event_loop()\n return loop.run_until_complete(func(self, *args, **kwargs))\n return inner",
"def _testDefaultSync(self):\n return self.PerformSync()",
"def call_async(func):\n\n @wraps(func)\n def wrapper(self, *args, **kw):\n \"\"\"Wraps instance method to be called on loop thread\"\"\"\n\n def call():\n \"\"\"Calls function on loop thread\"\"\"\n try:\n func(self, *args, **kw)\n except Exception:\n logger.exception(\n \"failed to call async [%r] with [%r] [%r]\", func, args, kw\n )\n\n self.loop.call_soon_threadsafe(call)\n\n return wrapper",
"async def bounded(f, *args, **kwargs):\n async with semaphores[f]:\n return await f(*args, **kwargs)",
"async def _run_services(objects: List[BaseTask]):\n return await asyncio.gather(*[obj.async_run() for obj in objects])",
"def apply_sync(self, func, *args, **kwargs):\n apply_key = int(self.apply_counter)\n self.apply_counter += 1\n futures = []\n with self.comm_executor(self.global_comm, root=0) as executor:\n for rank in range(1, self.global_size):\n futures.append(executor.submit(mpi_futures_apply_wrapper, func, apply_key, args, kwargs))\n results = [future.result() for future in futures]\n return results",
"def run_cmd_list_async(cmd_list):\n loop = None\n try:\n loop = asyncio.get_event_loop()\n except RuntimeError:\n # Create event loop when one is not available\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n cmds = []\n # Create a list of partial functions to run\n for cmd in cmd_list:\n cmds.append(run_cmd_async(cmd))\n\n # Wait until all are complete\n loop.run_until_complete(\n asyncio.gather(\n *cmds\n )\n )",
"def test_runnable_sync(self):\n run = RunAndExit()\n run.start_and_wait_completed(sync=True)",
"def async_context():\n global __ref_cnt\n global __all_task_done\n __ref_cnt += 1\n logger.debug(\"ref_cnt: %d\", __ref_cnt)\n yield\n __ref_cnt -= 1\n logger.debug(\"ref_cnt: %d\", __ref_cnt)\n __try_all_task_done()",
"def sync_execute_queries(queries, platforms=list(Platforms), proxy_list=[]):\n return asyncio.run(execute_queries(queries, platforms, proxy_list))",
"def _maybe_sync(self):\n client = self._client_reference()\n if client is None:\n raise RuntimeError('The slasher\\'s client was already garbage collected.')\n \n for sync_hook in RUNTIME_SYNC_HOOKS:\n if not sync_hook(client):\n return\n \n Task(KOKORO, self._do_main_sync(client))",
"def run(self, func, *args, **kwargs):\n if self.obj.sync_type.enabled:\n func(*args, **kwargs)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert the json schema into a python type hint
|
def json_schema_to_python_type(schema: Any) -> str:
type_ = get_type(schema)
if type_ == {}:
if "json" in schema["description"]:
return "Dict[Any, Any]"
else:
return "Any"
elif type_ == "null":
return "None"
elif type_ == "integer":
return "int"
elif type_ == "string":
return "str"
elif type_ == "boolean":
return "bool"
elif type_ == "number":
return "int | float"
elif type_ == "array":
items = schema.get("items")
if "prefixItems" in items:
elements = ", ".join(
[json_schema_to_python_type(i) for i in items["prefixItems"]]
)
return f"Tuple[{elements}]"
else:
elements = json_schema_to_python_type(items)
return f"List[{elements}]"
elif type_ == "object":
des = ", ".join(
[
f"{n}: {json_schema_to_python_type(v)} ({v.get('description')})"
for n, v in schema["properties"].items()
]
)
return f"Dict({des})"
elif type_ in ["oneOf", "anyOf"]:
desc = " | ".join([json_schema_to_python_type(i) for i in schema[type_]])
return desc
else:
raise APIInfoParseError(f"Cannot parse schema {schema}")
|
[
"def _simple_to_json_schema(_schema):\n if _schema is str:\n return {'type': 'string'}\n elif _schema is int:\n return {'type': 'integer'}\n elif _schema is float:\n return {'type': 'number'}\n elif _schema is bool:\n return {'type': 'boolean'}\n elif _schema is ObjectId:\n return {'type': 'string', 'format': Format.OBJECTID}\n elif _schema is datetime:\n return {'type': 'string', 'format': Format.DATETIME}",
"def json_schema_type(in_type: str) -> str:\n return \"string\" if in_type == \"long_string\" else in_type",
"def loads(schema):\n return JsonSchema(schema)",
"def __gen_type_properties(json_schema, path, resolver, config, es_mapping):\n has_scope = 'id' in json_schema\n # update the current scope if the schema has an id\n if has_scope:\n resolver.push_scope(json_schema.get('id'))\n\n # resolve reference if there are any\n while '$ref' in json_schema:\n path = json_schema.get('$ref')\n json_schema = resolver.resolve(path)[1]\n\n if 'patternProperties' in json_schema:\n raise JsonSchemaSupportError('Schemas with patternProperties ' +\n 'are not supported.', path)\n if 'additionalProperties' in json_schema:\n raise JsonSchemaSupportError('Schemas with ' +\n 'additionalProperties are not ' +\n 'supported.', path)\n\n if es_mapping is None:\n es_mapping = {}\n\n # if the schema is in fact a collection of schemas, merge them\n json_schema_keys = set(json_schema.keys())\n collection_intersect = json_schema_keys.intersection(__collection_keys)\n if collection_intersect:\n # we suppose the schema is valid and only one of the collection keys\n # is present\n collection_key = collection_intersect.pop()\n # visit each schema and use it to extend current elasticsearch\n # mapping\n path += '/' + collection_key\n index = 0\n for sub_schema in json_schema.get(collection_key):\n __gen_type_properties(sub_schema, path + '[' + str(index) + ']',\n resolver, config, es_mapping)\n index += 1\n return es_mapping\n\n # get json schema type\n json_type = json_schema.get('type')\n\n if not json_type:\n if 'properties' in json_schema:\n json_type = 'object'\n else:\n # FIXME: handle enums with no type\n raise JsonSchemaSupportError('Schema has no \"type\" field', path)\n\n if isinstance(json_type, list):\n raise JsonSchemaSupportError('Schema with array of types are ' +\n 'not supported', path)\n\n if json_type == 'array':\n items = json_schema.get('items')\n # array items type is mandatory\n if not items:\n raise JsonSchemaSupportError('Cannot have schema with ' +\n '\"array\" type without ' +\n 'specifying the items type',\n path)\n # visit each item schema and use it to extend current elasticsearch\n # mapping\n path += '/items'\n if isinstance(items, list):\n index = 0\n for item in items:\n __gen_type_properties(item, path + '[' + str(index) + ']',\n resolver, config, es_mapping)\n index += 1\n return es_mapping\n else:\n # visit items' schema and use it to extend current elasticsearch\n # mapping\n return __gen_type_properties(items, path, resolver, config,\n es_mapping)\n\n # find the corresponding elasticsearch type\n if json_type == 'object':\n es_type = 'object'\n else:\n es_type, es_type_props = config.get_es_type(json_type,\n json_schema.get('format'))\n\n # if current elasticsearch mapping's type is already known, the new one and\n # the old should match\n if 'type' in es_mapping or 'properties' in es_mapping:\n # 'properties' is set either when the elasticsearch type is 'object' or\n # for root types\n old_es_type = ('object' if 'properties' in es_mapping\n else es_mapping['type'])\n if old_es_type != es_type:\n # elasticsearch root type mapping has no \"type\" property\n if 'properties' in es_mapping and 'type' not in es_mapping:\n raise JsonSchemaSupportError('Root schema type can ' +\n 'only be \"object\".', path)\n else:\n raise JsonSchemaSupportError('Redefinition of field ' +\n 'with another type is not ' +\n 'supported.', path)\n\n # add the type to the elasticsearch mapping if it is not a root type\n if 'properties' not in es_mapping:\n es_mapping['type'] = es_type\n\n if es_type == 'object':\n es_properties = es_mapping.get('properties')\n if not es_properties:\n es_properties = {}\n es_mapping['properties'] = es_properties\n # build the elasticsearch mapping corresponding to each json schema\n # property\n for prop, prop_schema in json_schema['properties'].iteritems():\n es_properties[prop] = __gen_type_properties(prop_schema,\n path + '/' + prop,\n resolver, config,\n es_properties.get(prop))\n # visit the dependencies defining additional properties\n if 'dependencies' in json_schema:\n deps_path = path + '/dependencies'\n for prop, deps in json_schema['dependencies'].iteritems():\n # if this is a \"schema dependency\", extend our current es\n # mapping with it\n if isinstance(deps, dict):\n __gen_type_properties(deps, deps_path + '[' + prop + ']',\n resolver, config, es_mapping)\n else:\n es_mapping['type'] = es_type\n if es_type_props:\n for type_prop, type_prop_value in es_type_props.iteritems():\n es_mapping[type_prop] = type_prop_value\n\n # pop the current jsonschema context\n if has_scope:\n resolver.pop_scope()\n\n return es_mapping",
"def _convert_type(self, parsed_data, schema, options):\n # check for list types here\n payload = parsed_data\n for key, value in schema.iteritems():\n key = str(key)\n # if the schema value is declared as string\n if value == 'string':\n payload[key] = str(payload[key])\n\n # if the schema value is declared as integer\n elif value == 'integer':\n try:\n payload[key] = int(payload[key])\n except ValueError:\n logger.error('Invalid schema - %s is not an int', key)\n return False\n\n elif value == 'float':\n try:\n payload[key] = float(payload[key])\n except ValueError:\n logger.error('Invalid schema - %s is not a float', key)\n return False\n\n elif value == 'boolean':\n payload[key] = str(payload[key]).lower() == 'true'\n\n elif isinstance(value, list):\n pass\n\n elif isinstance(value, (OrderedDict)):\n # allow for any value to exist in the map\n if len(value) == 0:\n pass\n else:\n # handle nested csv\n if isinstance(payload[key], str):\n options['hints'] = options['hints'][key]\n parse_csv = get_parser('csv')\n parsed_nested_key = parse_csv(payload[key],\n schema[key],\n options).parse()\n # Call the first element since a list is returned\n payload[key] = parsed_nested_key[0]\n\n self._convert_type(payload[key], schema[key], options)\n else:\n logger.error('Invalid declared type - %s', value)\n\n return payload",
"def test_marshmallow_schema():\n\n class UserSchema(Schema):\n name = fields.Int()\n\n schema_type = hug.types.MarshmallowInputSchema(UserSchema())\n assert schema_type({\"name\": 23}, {}) == {\"name\": 23}\n assert schema_type(\"\"\"{\"name\": 23}\"\"\", {}) == {\"name\": 23}\n assert schema_type.__doc__ == \"UserSchema\"\n with pytest.raises(InvalidTypeData):\n schema_type({\"name\": \"test\"}, {})\n\n schema_type = hug.types.MarshmallowReturnSchema(UserSchema())\n assert schema_type({\"name\": 23}) == {\"name\": 23}\n assert schema_type.__doc__ == \"UserSchema\"\n with pytest.raises(InvalidTypeData):\n schema_type({\"name\": \"test\"})",
"def resolve_arg_to_schema(arg: Union[Type, Schema]) -> Schema:\n if inspect.isclass(arg):\n if issubclass(arg, DOMElement):\n return arg.__json_schema__()\n if issubclass(arg, Enum):\n return SchemaEnum(arg)\n else:\n return SchemaPrimitive(arg)\n elif isinstance(arg, Schema):\n return arg\n else:\n raise TypeError(f\"Unexpected object type: {type(arg)}\")",
"def generate_type_mapping(json_schema, base_uri, context_schemas, config):\n resolver = jsonschema.RefResolver(referrer=json_schema,\n store=context_schemas,\n base_uri=base_uri)\n return __gen_type_properties(json_schema, base_uri, resolver, config, {\n '_all': {'enable': config.all_field},\n 'numeric_detection': config.numeric_detection,\n 'date_detection': config.date_detection,\n # empty type mapping\n 'properties': {},\n })",
"def get_schema(self):\r\n schema = {}\r\n schema[\"type\"] = self.type\r\n if self.type == \"string\":\r\n schema[\"blank\"] = True # allow blank strings\r\n if self.optional:\r\n schema[\"required\"] = False\r\n\r\n return schema",
"def generate_schema():\n\n _result = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"The JSON Schema for QAL transformations\",\n \"title\": \"QAL Transformation\",\n \"type\": \"object\",\n \"version\": __version__,\n \"properties\": {},\n \"namespace\": \"qal\",\n \"definitions\": {}\n }\n\n def _property_to_type(_property_name):\n if _property_name == \"uuid\":\n return [{\n \"type\": \"string\",\n \"pattern\": \"^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$\"\n }]\n elif _property_name == \"mappings\":\n return [{\n \"type\": \"array\",\n \"items\": {\n \"$ref\": \"#/definitions/Mapping\"\n }\n }]\n elif _property_name == \"substitution\":\n return [{\"$ref\": \"#/definitions/Substitution\"}]\n\n elif _property_name == \"resources\":\n return [{\n \"type\": \"array\",\n \"items\": {\n \"$ref\": \"qal://resources.json#/definitions/Resource\"\n }\n }]\n elif _property_name == \"resources\":\n return [{\n \"type\": \"array\",\n \"items\": {\n \"$ref\": \"qal://resources.json#/definitions/Resource\"\n }\n }]\n elif _property_name in [\"builtin_substitutions\", \"key_fields\", \"destination_log_level\", \"key_fields\",\n \"source\", \"destination\"]:\n # Disregard these fields\n return None\n elif _property_name in [\"delete\", \"insert\", \"update\", \"is_key\"]:\n # Disregard these fields\n return [{\"type\": \"boolean\"}]\n else:\n return [{\"type\": \"string\"}]\n\n # First, Add parameter types\n for _curr_class in list_prefixed_classes(globals(), \"\", _exclude=[]):\n _result[\"definitions\"].update({_curr_class: {\n \"type\": \"object\",\n \"properties\": json_add_child_properties(globals(), _curr_class, _property_to_type)\n }\n })\n\n return _result",
"def parse_schema(self, schema_json):\n if sys.version_info.major >= 3:\n return avro.schema.Parse(schema_json)\n else:\n return avro.schema.parse(schema_json)",
"def _get_discriminated_type_from_schema(schema: Dict) -> str:\n generic_type = JsonSchemaTranslator._get_type_from_schema(schema)\n if generic_type == \"object\" and \"javaType\" in schema:\n return schema[\"javaType\"].split(\".\")[-1]\n if generic_type == \"object\" and \"title\" in schema and \"properties\" in schema:\n return schema[\"title\"]\n if \"format\" in schema:\n return f\"{generic_type}({schema['format']})\"\n\n return generic_type",
"def to_schema_field(field_class):\n class DynamicJSONField(field_class):\n def prepare_value(self, value):\n \"\"\"\n Use the raw field data in the JSON field.\n \"\"\"\n if value is None:\n return value\n return super().prepare_value(getattr(value, '_data', value))\n\n return DynamicJSONField",
"def _introspect_schema(self, raw_schema, raw_attributes=None):\n schema = []\n sane_attributes = {}\n\n if raw_attributes:\n for field in raw_attributes:\n sane_attributes[field['AttributeName']] = field['AttributeType']\n\n for field in raw_schema:\n data_type = sane_attributes.get(field['AttributeName'], STRING)\n\n if field['KeyType'] == 'HASH':\n schema.append(\n HashKey(field['AttributeName'], data_type=data_type)\n )\n elif field['KeyType'] == 'RANGE':\n schema.append(\n RangeKey(field['AttributeName'], data_type=data_type)\n )\n else:\n raise exceptions.UnknownSchemaFieldError(\n \"%s was seen, but is unknown. Please report this at \"\n \"https://github.com/boto/boto/issues.\" % field['KeyType']\n )\n\n return schema",
"def _extract_schema_json(\n schema_path: str\n) -> dict:\n with open(schema_path) as json_file:\n json_data = json.load(json_file)\n\n schema_dict = {x[\"name\"]: x[\"type\"] for x in json_data}\n\n schema_dict = _schema_sql_to_bq_compatibility(schema_dict)\n\n return schema_dict",
"def _jsonschema_type_mapping(self):\n oneOf = {\"oneOf\": [], \"description\": description, \"default\": default, \"title\": self.name}\n\n for idx, option in enumerate(field_options):\n mfield_meta = option.metadata[\"marshmallow_field\"]\n\n # Necessary for key/name de-duplication in case a name is not supplied by the user:\n mfield_meta_class_name = str(mfield_meta.__class__).split(\".\")[-1].split(\"'\")[0].lower()\n\n # If the option inherits from a custom dataclass-field, then use the custom jsonschema:\n if hasattr(mfield_meta, \"_jsonschema_type_mapping\"):\n oneOf[\"oneOf\"].append(mfield_meta._jsonschema_type_mapping())\n # Otherwise, extract the jsonschema using a dummy dataclass as intermediary:\n else:\n\n @m_dataclass\n class DummyClass:\n tmp: Any = option\n\n dummy_schema = unload_jsonschema_from_marshmallow_class(DummyClass)\n tmp_json_schema = dummy_schema[\"properties\"][\"tmp\"]\n # Manually set the title, otherwise it would be 'tmp':\n tmp_json_schema[\"title\"] = f\"{self.name}_{mfield_meta_class_name}_option\"\n oneOf[\"oneOf\"].append(tmp_json_schema)\n\n # Add null as an option if we want to allow none but none of the field options allow none.\n any_field_options_allow_none = any(\n option.metadata[\"marshmallow_field\"].allow_none for option in field_options\n )\n if allow_none and not any_field_options_allow_none:\n oneOf[\"oneOf\"] += [{\"type\": \"null\", \"title\": \"null_option\", \"description\": \"Disable this parameter.\"}]\n\n return oneOf",
"def python_type(self):",
"def test_json_issue():\n grammar = \"\"\"\n File:\n Array | Object\n ;\n\n Array:\n \"[\" values*=Value[','] \"]\"\n ;\n\n Value:\n PrimitiveValue | Object | Array | NullValue\n ;\n\n Object:\n \"{\" members*=Member[','] \"}\"\n ;\n\n Member:\n key=STRING ':' value=Value\n ;\n\n PrimitiveValue:\n val=STRING | val=FLOAT | val=BOOL\n ;\n\n NullValue:\n val=\"null\"\n ;\n \"\"\"\n json_mm = metamodel_from_str(grammar)\n assert json_mm['Object']._tx_type is RULE_COMMON\n assert json_mm['Member']._tx_type is RULE_COMMON\n assert json_mm['Array']._tx_type is RULE_COMMON\n assert json_mm['File']._tx_type is RULE_ABSTRACT\n assert json_mm['Value']._tx_type is RULE_ABSTRACT",
"def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n graphql_type = cast(GraphQLInputObjectType, graphql_type)\n\n if self._out_type:\n graphql_type.out_type = self._out_type # type: ignore\n\n if self._out_names:\n for graphql_name, python_name in self._out_names.items():\n if graphql_name not in graphql_type.fields:\n raise ValueError(\n f\"Field {graphql_name} is not defined on type {self.name}\"\n )\n graphql_type.fields[graphql_name].out_name = python_name"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse command line arguments to dictionary. The numeric values are auto convert to the according types.
|
def parse_args(sys_argv: List[str]) -> Dict[str, Union[str, int, float, bool]]:
kwargs = {} # type: Dict[str, Union[str, int, float, bool]]
if len(sys_argv) > 1:
for arg in sys_argv[1:]:
k = arg.split("=")[0][2:]
v = arg.split("=")[1] # type: Union[str, int, float, bool]
if v == 'True':
v = True
elif v == 'False':
v = False
else:
try:
v = int(v)
except ValueError:
try:
v = float(v)
except ValueError:
pass
kwargs[k] = v
return kwargs
|
[
"def _args_to_dict() -> Dict[str, str]:\r\n arguments = {}\r\n for argument in sys.argv[1:]:\r\n if '=' in argument:\r\n separated = argument.find('=')\r\n key, value = argument[:separated], argument[separated + 1:]\r\n arguments[key] = value\r\n return arguments",
"def easy_args(sysArgs, _dict):\n\n\n for farg in sysArgs:\n #Try to weed out some meaningless args.\n\n #print(farg)\n\n if \".py\" in farg:\n continue\n if \"=\" not in farg:\n continue\n\n #print('cycle')\n try:\n #if 1:\n\n #########\n #Split out the dict name, key name, and value\n #########\n\n (dicitem,val) = farg.split(\"=\") #Split on equals operator\n (dic,arg) = dicitem.split(\".\")\n if '*=' in farg:\n (dicitem,val) = farg.split(\"*=\") #If in-place multiplication, split on '*='\n (dic,arg) = dicitem.split(\".\")\n if '/=' in farg:\n (dicitem,val) = farg.split(\"/=\") #If in-place division, split on '/='\n (dic,arg) = dicitem.split(\".\")\n\n if '+=' in farg:\n (dicitem,val) = farg.split(\"+=\") #If in-place addition, split on '+='\n (dic,arg) = dicitem.split(\".\")\n if '-=' in farg:\n (dicitem,val) = farg.split(\"-=\") #If in-place addition, split on '-='\n (dic,arg) = dicitem.split(\".\")\n\n\n #print('check dic,arg', dic,arg)\n\n\n #########\n #Basic type conversion from string to float, boolean\n #########\n\n if val == 'True':\n #print('1')\n val = True\n elif val == 'False': #First check if args are boolean\n #print('2')\n val = False\n else:\n #print('3')\n try:\n val = float(val) #next try to convert to a float,\n except ValueError:\n pass #otherwise leave as string\n\n #print('test', val,val_)\n\n #########\n #Resolve the units (using scaling/Pint)\n #########\n\n\n #if Pint quantity, we need to create a dimensional version for reassign, add/ subtract\n if hasattr(_dict[arg], 'dimensionality'):\n #need to catch ints here\n val_ = float(val)* 1.*_dict[arg].units\n\n else:\n val_ = val\n\n #########\n #Update the given dictionary\n #########\n #in place multiply/divides use val (the dimensional quant)\n #addition, subtraction, ressign use val_ which may have units\n\n try:\n if '*=' in farg:\n _dict[arg] = _dict[arg]*val #multiply parameter by given factor\n elif '/=' in farg:\n _dict[arg] = _dict[arg]/float(val) #divide parameter by given value\n elif '+=' in farg:\n _dict[arg] = _dict[arg]+val_ #add to parameter given value\n elif '-=' in farg:\n _dict[arg] = _dict[arg]-val_ #subtract from parameter given value\n else:\n _dict[arg] = val_ #or reassign parameter by given value\n\n except:\n pass\n\n except:\n pass",
"def parse_cmd_arguments():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', '--input', help='input JSON file', required=True)\n parser.add_argument('-o', '--output', help='ouput JSON file', required=True)\n #adding debug argument for parsing\n parser.add_argument('-d', '--debug', help='debug level',\n default='0', choices=('0', '1', '2', '3'))\n\n return parser.parse_args()",
"def getCommandLineParams():\n\tdictionary = {}\n\tfor arg in sys.argv[1:]:\n\t\tchunks = string.split(arg, '=')\n\t\tif len(chunks) == 2:\n\t\t\tkey, value = chunks\n\t\t\tdictionary[key] = value\n\treturn dictionary",
"def parseArgs (args):\n result = {}\n \n for arg in args:\n try:\n (var, val) = string.split (arg, '=', 1)\n except:\n raise (SyntaxError, '%s is in the wrond format' % (arg))\n \n if (var[:2] != '--'):\n raise (SyntaxError, 'variable names must start with a ' +\n 'double dash (%s)' % (var))\n \n result[var[2:]] = val\n return (result)",
"def parseArgs(args):\n args_map = {}\n curkey = None\n for i in xrange(1, len(args)):\n if args[i][0] == '-':\n args_map[args[i]] = True\n curkey = args[i]\n else:\n assert curkey\n args_map[curkey] = args[i]\n curkey = None\n return args_map",
"def args_to_hparam_dict(args):\n hp_dict = {}\n for hpkey, hpvalue in args.__dict__.items():\n if not isinstance(hpvalue, (str, bool, int, float)):\n hp_dict[hpkey] = str(hpvalue)\n else:\n hp_dict[hpkey] = hpvalue\n return hp_dict",
"def parse_arguments():\n epilog = '''Currency codes according to http://www.xe.com/iso4217.php'''\n parser = argparse.ArgumentParser(description='Convert amount of one currency to another.',\n epilog=epilog)\n parser.add_argument('--amount', type=float, required=True,\n help='Amount which we want to convert - float')\n parser.add_argument('--input_currency', required=True,\n help='Input currency - 3 letters name or currency symbol')\n parser.add_argument('--output_currency',\n help='Requested/output currency - 3 letters name or currency symbol')\n args = parser.parse_args()\n amount = args.amount\n input_cur = args.input_currency\n output_cur = args.output_currency\n\n return amount, input_cur, output_cur",
"def convert_parser_arguments_to_key_value(parameters: List[str]) -> Dict[str, str]:\n if parameters is None:\n return {}\n\n d = {}\n\n for p in parameters:\n entry = p[0].split(\"=\")\n d[entry[0]] = entry[1]\n\n return d",
"def parse_options(arguments, initial_options=copy.copy(default_options), \r\n allowed_types=allowed_types):\r\n (options, args) = option_parser.parse_args(args=arguments)\r\n if (options.outtype == None and \r\n options.outfilename and \r\n '.' in options.outfilename) :\r\n basename, extension = os.path.splitext(options.outfilename)\r\n if extension[1:] in allowed_types:\r\n options.outtype = extension[1:]\r\n options.outfilename = basename\r\n options_dict = options.__dict__\r\n initial_options_dict = initial_options.__dict__\r\n \r\n return diff_dict(options_dict, initial_options_dict), args",
"def process_args():\n parser = get_args()\n args = vars(parser.parse_args())\n if args['stdin']:\n args['data'] = sys.stdin.read()\n args['data'] = json.loads(args['data'])\n return args",
"def parse_args_string(val: str) -> TypeInspectionsArgs:\n out = {}\n\n for chunk in val.split(';'):\n args = {}\n\n alias, _, argstr = chunk.strip().partition(':')\n argstr = argstr.strip()\n\n for arg in argstr.split(','):\n name, _, val = arg.partition('=')\n val = val.strip()\n\n if val:\n args[name.strip()] = val\n\n if args:\n out[alias.strip()] = args\n\n return out",
"def parse_arguments(self, arguments: List[str]) -> Dict[str, Any]:\n parameters = list(map(lambda x: x.name, self.params.values()))\n idx = 0\n result = dict()\n\n while idx < len(arguments):\n name, val, incr = read_param_pair(idx, arguments, parameters)\n idx += incr\n result[name] = val\n\n for (key, val) in result.items():\n if key in self.params.keys():\n annotation = self.params[key].annotation\n\n if annotation is not None:\n result[key] = annotation(val)\n\n return result",
"def parse(self, command_line) -> dict:\n raise NotImplementedError",
"def read_args():\n rtype_dict = {'P':'PP', 'R':'BR', 'S':'SP'}\n\n rindex = int(sys.argv[1])\n try:\n request_type = rtype_dict[sys.argv[2]]\n except (IndexError, KeyError):\n logging.warning(\"Missing or invalid request type. Default request type SP assumed.\")\n request_type = 'SP'\n\n return rindex, request_type",
"def taxon_cmd_line_checker(argv):\n tax_dict = {}\n if argv[0].endswith('.py'):\n argv = argv[1:]\n \n # need to have an even number of (integer, name) arguments\n try:\n pairs = [(argv[i], argv[i+1]) for i in range(0, len(argv), 2)]\n for (taxon, name) in pairs:\n for char in taxon:\n if not char.isdigit():\n break\n tax_dict[abs(int(taxon))] = name\n \n # print usage information if error in format\n except:\n print('\\n ### invalid command line argument format ###\\n')\n print(' arguments must be a series of \"taxonomy number\" \"name\" pairs')\n print(' where \"taxonomy number\" is integer and \"name\" is text string.')\n print(' example: prompt>python program.py 9606 human 4932 yeast\\n')\n \n # return dictionary, it will be empty if any errors encountered\n return tax_dict",
"def test_PluggableTransport_parseArgumentsIntoDict_valid_list(self):\n pt = bridges.PluggableTransport()\n args = pt._parseArgumentsIntoDict([\"sharedsecret=foobar\",\n \"publickey=1234\"])\n self.assertIsInstance(args, dict)\n self.assertItemsEqual(args, {\"sharedsecret\": \"foobar\",\n \"publickey\": \"1234\"})",
"def args_to_json(default_config: dict, preserve_args: tuple = (\"gpu\", \"save\")) -> Tuple[dict, list]:\n args = []\n config = default_config.copy()\n key, val = None, None\n for arg in sys.argv[1:]:\n if \"=\" in arg:\n key, val = arg.split(\"=\")\n elif key:\n val = arg\n else:\n key = arg\n if key and val:\n parsed_key = key.lstrip(\"-\").split(\".\")\n if parsed_key[0] in preserve_args:\n args.append(\"--{}={}\".format(parsed_key[0], val))\n else:\n nested = config\n for level in parsed_key[:-1]:\n nested[level] = config.get(level, {})\n nested = nested[level]\n try:\n # Convert numerics to floats / ints\n val = literal_eval(val)\n except ValueError:\n pass\n nested[parsed_key[-1]] = val\n key, val = None, None\n return config, args",
"def process_commandline_args(cls, args: argparse.Namespace):\n assert is_dataclass(cls)\n\n arg_dict = vars(args)\n kwargs = {}\n\n for fld in fields(cls):\n argument = arg_dict[fld.name]\n if issubclass(fld.type, Enum):\n argument = fld.type[argument]\n kwargs[fld.name] = argument\n\n settings = cls(**kwargs)\n return settings",
"def validate_args(kwdict, section, key, dtype, default=None):\n\n # The input has already been parsed from the config file using\n # ast.literal_eval, so the dictionary values should have recognisable\n # python types.\n\n if default is not None:\n val = kwdict[section].pop(key, default)\n else:\n val = kwdict[section][key]\n\n if dtype is str:\n try:\n val = str(val).rstrip('/ ')\n except UnicodeError as err: # Pretty much the only error str() can raise\n raise\n elif dtype is int:\n try:\n val = int(val)\n except ValueError as err:\n raise\n elif dtype is float:\n try:\n val = float(val)\n except ValueError as err:\n raise\n elif dtype is bool:\n try:\n val = bool(val)\n except ValueError as err:\n raise\n else:\n raise NotImplementedError('Only str, int, bool, and float are valid types.')\n\n return val"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a subscription to NETCONF server callback userdefined callback function to be invoked when a notfiication arrives errback userdefined function to be invoked when an error occurs manager Manager object returned when user connects to NETCONF server, used to store connection info so ncclient can reconnect using that information (by default ncclient will not handle reconnecting the the NETCONF server if user does not pass in a manager) retries specifies the number of times ncclient will attempt to reconnect to VTS if the connection is dropped delay specifies the time ncclient will wait between consecutive attempts to reconnect to VTS following a dropped connection stream specifies the stream user want to receive notifications from (by default NETCONF stream notifications) filter specifies the notifications user wants to receive based on xml subtree structure and content (by default all notifications arrive) start_time specifies the time user wants to start receiving notifications (by default start from present time) stop_time specifies time time user wants to stop receiving notifications
|
def request(self, callback, errback, manager=None, retries=20, delay=1,
stream=None, filter=None, start_time=None, stop_time=None):
if callback is None:
raise ValueError("Missing a callback function")
if errback is None:
raise ValueError("Missing a errback function")
subscription_node = etree.Element(qualify("create-subscription", NETCONF_NOTIFICATION_NS))
if stream is not None:
streamTag = etree.Element(qualify("stream"))
streamTag.text = stream
subscription_node.append(streamTag)
if filter is not None:
subscription_node.append(util.build_filter(filter))
if start_time is not None:
subscription_node.append(self.datetime_to_rfc("startTime", start_time))
if stop_time is not None:
subscription_node.append(self.datetime_to_rfc("stopTime", stop_time))
self.session.add_listener(NotificationListener(callback, errback,
manager=manager, retries=retries, delay=delay,
stream=stream, filter=filter, start_time=start_time, stop_time=stop_time))
return self._request(subscription_node)
|
[
"def callback(self, root, raw):\n tag, attrs = root\n if tag != qualify(\"notification\", NETCONF_NOTIFICATION_NS):\n self.user_errback(NotificationError(\"Received a message not of type notification\"))\n return\n notification = Notification(raw)\n self.reconnect_time = notification.eventTime.replace(tzinfo=None) + timedelta.resolution\n\n # This might be unnecessary if callback is never invoked\n # when connection drops\n if notification.connected:\n self.user_callback(notification)\n else:\n self.user_errback(notification)",
"def client_callback1(now):\n print(\"Client callback function for service 1\")\n print(\"Message received: '%s' (%s)\" % (now, type(now)))",
"def on_connect(self, client, userdata, rc):\n print(\"Connected with result code: \" + str(rc))\n self.subscribe(\"orchestra/glock\")",
"def on_connect(self, client, userdata, flags, rc):\n try:\n for topic in self.topic_list:\n self.clientMqtt.subscribe(topic)\n self.logger.d(\"subscribe on \" + topic)\n for topic in self.topic_list_unsubscribe:\n self.clientMqtt.unsubscribe(topic)\n self.logger.d(\"unsubscribe on \" + topic)\n self.mqtt_connect_event.set()\n except Exception as e:\n import traceback\n exc_type, exc_obj, exc_tb = sys.exc_info()\n exceptionStr = (\n os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n + \", line \"\n + str(exc_tb.tb_lineno)\n + \" : \"\n + str(e) +\n \"\".join(traceback.format_tb(e.__traceback__))\n )\n self.logger.e(exceptionStr)",
"def __init_mqtt(self):\n\n def on_connect(client, userdata, flags, rc):\n \"\"\"Callback for when the connection is established with the mqtt broker\"\"\"\n try:\n logging.info('MQTT Paho Connected with result code ' + str(rc))\n self.flag_connected = True\n logging.info('Subscribing to invoke topic')\n client.subscribe(self.invoke_topic)\n client.subscribe(self.cloud_to_device_topic)\n\n\n except Exception as e:\n logging.warning(\"on_connect with result error %s\" % e)\n\n def on_message(client, userdata, msg):\n \"\"\"Callback for when a message is received by client\"\"\"\n logging.info('MQTT message arrived')\n print('MQTT message arrived')\n logging.debug('topic %s' % msg.topic)\n print('topic %s' % msg.topic)\n logging.debug('payload %s' % msg.payload)\n print('payload %s' % msg.payload)\n self.handle_mqtt_messages(msg.topic, msg.payload)\n\n def on_disconnect(client, userdata, rc):\n \"\"\"Callback for when the connection is lost\"\"\"\n self.flag_connected = False\n logging.info('MQTT Disconnected!!')\n\n self.paho_client_mqtt = mqtt.Client(client_id=self.device_id, protocol=self.broker_mqtt_protocol)\n self.paho_client_mqtt.on_connect = on_connect\n self.paho_client_mqtt.on_message = on_message\n self.paho_client_mqtt.on_disconnect = on_disconnect\n self.paho_client_mqtt.username_pw_set(username=self.username)\n self.paho_client_mqtt.tls_set(ca_certs=self.broker_mqtt_CACert,\n certfile=self.device_cert,\n keyfile=self.device_key,\n cert_reqs=ssl.CERT_REQUIRED,\n tls_version=ssl.PROTOCOL_TLSv1_2,\n ciphers=None)\n self.paho_client_mqtt.tls_insecure_set(True)",
"def configure_client():\n client.on_connect = on_connect\n client.on_disconnect = on_disconnect\n client.will_set('status/mqttc', payload=\"disconnected\", qos=1, retain=True)\n print('connecting')\n client.connect('broker', 1883, 60)",
"def addConnectionCallback(*args, **kwargs):\n \n pass",
"def addPreConnectionCallback(*args, **kwargs):\n \n pass",
"def on_connect(self, client, userdata, flags, retcode):\n refresh = \"{}/{}\".format(self.root_topic, REFRESH)\n self.log.info(\n \"Connected with client %s, userdata %s, flags %s, and \"\n \"result code %s. Subscribing to refresh command topic %s\",\n client,\n userdata,\n flags,\n retcode,\n refresh,\n )\n\n self.connected = True\n\n # Publish the ONLINE message to the LWT\n self._publish_mqtt(ONLINE, self.lwt, True)\n\n # Resubscribe on connection\n for reg in self.registered:\n self.log.info(\"on_connect: Resubscribing to %s\", reg)\n self.client.subscribe(reg)\n\n # causes sensors to republish their states\n self.msg_processor(\"MQTT connected\")",
"def reconnect_callback():\n return mock.MagicMock()",
"def getNotifications(nodeIdentifier, items):",
"def add_unconfirmed_listener(self, topics: List, callback: Callable):\n self.unconfirmed_callbacks[self.counter] = (topics, callback)\n self.counter += 1",
"def test_subscribe_callback_must_be_a_function(self):\n test_is_done = threading.Event()\n\n def started(client):\n \"\"\"started listener\"\"\"\n with pytest.raises(TypeError):\n client.subscribe('/foo1', 'share', {}, 7)\n callback = self.func004\n client.subscribe('/foo2', 'share', {}, callback)\n client.stop()\n test_is_done.set()\n client = mqlight.Client('amqp://host',\n 'test_subscribe_callback_must_be_a_function',\n on_started=started)\n test_is_done.wait(self.TEST_TIMEOUT)\n assert test_is_done.is_set()",
"def addConnectionFailedCallback(*args, **kwargs):\n \n pass",
"def client_callback2(val1, val2):\n\n print(\"Client callback function for service 2\")\n print(\"Values received: %s (%s) and %s (%s)\" % (val1, type(val1), val2, type(val2)))",
"def test_callback_server_params(params):\n params = Py4JComponent.configure_gateway(callback_server=params)[-3]\n assert params.address == '1.2.3.4'\n assert params.port == 5678",
"def recvNotificationCb(self, ntf):\n try:\n handler = self.hal_config_msg_handlers[ntf.msg.HalNotificationType]\n self.logger.info(\"Receive a interest notification message:\" + str(ntf.msg))\n\n if not isinstance(ntf, HalMessage):\n raise AttributeError(\"Invalid HAL message passed\")\n\n ntf = handler(ntf)\n if None is not ntf:\n self.send_cfg_msg(HalConfigMsg.MsgTypeVspAvpExchange, ntf.msg.HalNotificationPayLoad)\n else:\n self.logger.info(\"Notification message return is None\")\n except Exception as e:\n self.logger.error(\n \"Got an error:%s, the ntf msg:%s\", str(e), ntf.msg)",
"def on_connect(client, userdata, flags, rc):\n client.subscribe(mqtt_gps_topic)",
"def add_confirmed_listener(self, topics: List, callback: Callable):\n self.confirmed_callbacks[self.counter] = (topics, callback)\n self.counter += 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Called when a new RFC 5277 notification is received. The root argument allows the callback to determine whether the message is a notification. Here, root is a tuple of (tag, attributes) where tag is the qualified name of the root element and attributes is a dictionary of its attributes (also qualified names). raw will contain the xml notification as a string.
|
def callback(self, root, raw):
tag, attrs = root
if tag != qualify("notification", NETCONF_NOTIFICATION_NS):
self.user_errback(NotificationError("Received a message not of type notification"))
return
notification = Notification(raw)
self.reconnect_time = notification.eventTime.replace(tzinfo=None) + timedelta.resolution
# This might be unnecessary if callback is never invoked
# when connection drops
if notification.connected:
self.user_callback(notification)
else:
self.user_errback(notification)
|
[
"def data_received_callback(self, raw: bytes):\n if raw:\n try:\n knxipframe = KNXIPFrame(self.xknx)\n knxipframe.from_knx(raw)\n knx_logger.debug(\"Received: %s\", knxipframe)\n self.handle_knxipframe(knxipframe)\n except CouldNotParseKNXIP as couldnotparseknxip:\n logger.exception(couldnotparseknxip)",
"def _callback(msg):\n print('subscription message data: ', msg.data.decode('utf-8'))\n if msg.attributes:\n print('subscription message attributes:\\n')\n pprint(msg.attributes)\n msg.ack()",
"def cmd_NOTIFY(self, cmd, args):\n if len(args) != 5:\n self.wfile.write('501 Wrong number of arguments.\\r\\n')\n return\n\n try:\n length = int(args[0])\n uid = int(args[1])\n type = int(args[2])\n msgid = int(args[3])\n sticky = int(args[4])\n except ValueError:\n self.wfile.write('501 Invalid argument.\\r\\n')\n return\n\n if length > 0:\n data = self.rfile.read(length)\n else:\n data = None\n\n if uid == 0 and not self.server.check_perms('broadcast'):\n self.wfile.write('554 Broadcast permission denied.\\r\\n')\n return\n\n self.server.notify(uid, type, msgid, bool(sticky), data)\n self.wfile.write('200 Ok.\\r\\n')",
"def _build_notification(title, message, details, level):\n notification = ui.Notification(title, message, details, level)\n ui.NOTIFICATION_PANEL.notify(notification)",
"def recvNotificationCb(self, ntf):\n try:\n handler = self.hal_config_msg_handlers[ntf.msg.HalNotificationType]\n self.logger.info(\"Receive a interest notification message:\" + str(ntf.msg))\n\n if not isinstance(ntf, HalMessage):\n raise AttributeError(\"Invalid HAL message passed\")\n\n ntf = handler(ntf)\n if None is not ntf:\n self.send_cfg_msg(HalConfigMsg.MsgTypeVspAvpExchange, ntf.msg.HalNotificationPayLoad)\n else:\n self.logger.info(\"Notification message return is None\")\n except Exception as e:\n self.logger.error(\n \"Got an error:%s, the ntf msg:%s\", str(e), ntf.msg)",
"def notificationsWithUID(uid): # @NoSelf",
"def _on_new_message(self, message):\n message = message.decode('utf-8')\n msg_dict = self._parser.parse(message)\n if msg_dict is None:\n logging.debug('failed to parse:'+str(message))\n return\n\n msg_obj = Message(msg_dict['timestamp'], msg_dict['hostname'], msg_dict['content'])\n # FIXME: make tag & other stuff available too\n self.broadcast(msg_obj)",
"def test_notification_decorator_lreal(self):\n # type: () -> None\n\n @self.plc.notification(constants.PLCTYPE_LREAL)\n def callback(handle, name, timestamp, value):\n self.assertEqual(value, 1234.56789012345)\n\n notification = create_notification_struct(\n struct.pack(\"<d\", 1234.56789012345)\n )\n callback(pointer(notification), \"\")",
"def data_received_callback(self, raw: bytes) -> None:\n if self._buffer:\n raw = self._buffer + raw\n self._buffer = b\"\"\n if not raw:\n return\n try:\n knxipframe, next_frame_part = KNXIPFrame.from_knx(raw)\n except IncompleteKNXIPFrame:\n self._buffer = raw\n raw_socket_logger.debug(\n \"Incomplete KNX/IP frame. Waiting for rest: %s\", raw.hex()\n )\n return\n except CouldNotParseKNXIP as couldnotparseknxip:\n knx_logger.debug(\n \"Unsupported KNXIPFrame from %s: %s in %s\",\n self.remote_hpai,\n couldnotparseknxip.description,\n raw.hex(),\n )\n else:\n knx_logger.debug(\n \"Received from %s: %s\",\n self.remote_hpai,\n knxipframe,\n )\n self.handle_knxipframe(knxipframe, self.remote_hpai)\n # parse data after current KNX/IP frame\n if next_frame_part:\n self.data_received_callback(next_frame_part)",
"def _email_callback(self, raw_email_data: List[Any]) -> None:\n print(f'===new email!====')\n try:\n msg = utils.email_from_raw_data(raw_email_data)\n # Clone repo\n # TODO maybe it already exists\n repo, info = self._manager.clone_from_email(msg)\n if repo is None or info is None:\n raise utils.EmailToPrError('no repo URL key!')\n # Create patch file in repo directory\n patch_filename, title, body = patch.from_email(msg, info.path)\n # Apply git patch to new branch\n pr_branch, base_branch = self._manager.apply_patch(repo, patch_filename)\n # Push to remote\n self._manager.push(repo)\n # Create PR\n pr_info = github.PrInfo(\n self._manager.repo_user,\n info.name,\n base_branch,\n pr_branch,\n title,\n body)\n url = github.create_pr(self._manager.repo_token, pr_info)\n print(f'PR created: {url}')\n except utils.EmailToPrError as e:\n print(f'email2pr error: {e}')",
"def create_notify_payload(host, nt, usn, location=None, al=None, max_age=None, extra_fields=None):\n if max_age is not None and not isinstance(max_age, int):\n raise ValueError(\"max_age must by of type: int\")\n data = (\n \"NOTIFY * HTTP/1.1\\r\\n\"\n \"HOST:{}\\r\\n\"\n \"NT:{}\\r\\n\"\n \"NTS:ssdp:alive\\r\\n\"\n \"USN:{}\\r\\n\"\n ).format(host, nt, usn)\n if location is not None:\n data += \"LOCATION:{}\\r\\n\".format(location)\n if al is not None:\n data += \"AL:{}\\r\\n\".format(al)\n if max_age is not None:\n data += \"Cache-Control:max-age={}\\r\\n\".format(max_age)\n if extra_fields is not None:\n for field, value in extra_fields.items():\n data += \"{}:{}\\r\\n\".format(field, value)\n data += \"\\r\\n\"\n return data.encode(\"utf-8\")",
"def test_notification_decorator_string(self):\n # type: () -> None\n\n @self.plc.notification(constants.PLCTYPE_STRING)\n def callback(handle, name, timestamp, value):\n self.assertEqual(value, \"Hello world!\")\n\n notification = create_notification_struct(b\"Hello world!\\x00\\x00\\x00\\x00\")\n callback(pointer(notification), \"\")",
"def new_notification(message):\r\n message_text = \"Пожалуйста, введите описание уведомления\"\r\n chat_id = message.chat.id\r\n BOT.send_message(chat_id, message_text)\r\n BOT.current_event[str(chat_id)] = \"NOTIF_GET_TITLE\"\r\n BOT.current_data[str(chat_id)] = notif.Notification()\r\n BOT.current_data[str(chat_id)].chat_id = chat_id",
"def getNotifications(nodeIdentifier, items):",
"def showEvent(self, event):\r\n super(Notification, self).showEvent(event)\r\n width, pgeo = self._parent.width(), self._parent.geometry()\r\n conditional_vertical = settings.NOTIFICATION_POSITION in (0, 1)\r\n conditional_horizontal = settings.NOTIFICATION_POSITION in (0, 2)\r\n x = pgeo.left() if conditional_horizontal else pgeo.right()\r\n y = (pgeo.bottom() - self.height()\r\n if conditional_vertical else pgeo.top())\r\n self.setFixedWidth(width)\r\n self.setGeometry(x, y, self.width(), self.height())\r\n background_color = str(settings.NOTIFICATION_COLOR)\r\n foreground_color = str(settings.NOTIFICATION_COLOR).lower().translate(\r\n maketrans('0123456789abcdef', 'fedcba9876543210'))\r\n self._root.setColor(background_color, foreground_color)\r\n self._root.start(self._duration)",
"def test_notify_new_reply_with_comment(self):\n review_request = self.create_review_request(\n create_repository=True,\n summary='Test Review Request',\n publish=True)\n\n review = self.create_review(review_request,\n user=self.user,\n publish=True)\n comment = self.create_general_comment(review, issue_opened=True)\n\n reply = self.create_reply(review, user=self.user, body_top='')\n self.create_general_comment(reply,\n text='This is a comment.',\n reply_to=comment)\n\n self._create_config()\n self.integration.enable_integration()\n\n self.spy_on(urlopen, call_original=False)\n self.spy_on(self.integration.notify)\n reply.publish()\n\n self.assertEqual(len(self.integration.notify.calls), 1)\n self.assertEqual(len(urlopen.spy.calls), 1)\n self.assertEqual(\n json.loads(urlopen.spy.calls[0].args[0].data),\n {\n 'body': '',\n 'msgtype': 'm.text',\n 'formatted_body':\n '<strong>#1: New reply from Test User: '\n 'http://example.com/r/1/#review2</strong><p>#1: '\n 'Test Review Request</p><blockquote>This '\n 'is a comment.</blockquote>',\n 'format': 'org.matrix.custom.html'\n })",
"def __init__(self, raw_data=None):\n super(PushMessage, self).__init__(raw_data)\n if raw_data is not None:\n self._build_from_raw(raw_data)\n else:\n self.created = datetime.datetime.now()\n self.operations = []",
"def render(self, notification):\n raise NotImplementedError()",
"def extract_notification_from_request(request):\n encoded = request.GET.get('notification') or request.POST.get('notification')\n\n if not encoded:\n return ('', '')\n\n try:\n notification = json.loads(base64.b64decode(encoded))\n except (json.JSONDecodeError, binascii.Error):\n notification = ('', '')\n\n return notification"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns all of the Wishlists
|
def list_wishlists():
app.logger.info("Request for wishlists")
wishlists = []
category = request.args.get("category")
name = request.args.get("name")
if category:
wishlists = WishList.find_by_category(category)
elif name:
wishlists = WishList.find_by_name(name)
else:
wishlists = WishList.all()
# wishlists = WishList.all()
results = [wishlist.serialize() for wishlist in wishlists]
app.logger.info(f'Returning {len(results)}')
response_body = {
'data': results,
'count': len(results)
}
return make_response(jsonify(response_body), status.HTTP_200_OK)
|
[
"def retrieve_all_wishlists(self, include_deleted=False):\n\n all_wishlists = []\n\n if include_deleted:\n # use a list comprehension to easily retrieve the dictionaries and merge them together into a JSON string\n all_wishlists = [{key: contents} for key, contents in self._wishlist_resources.iteritems()]\n else:\n # filter out those key, contents pairs where contents['deleted'] == True \n all_wishlists = [{key: contents} for key, contents in self._wishlist_resources.iteritems() if contents['deleted'] == False]\n\n return json.dumps(all_wishlists, indent=4)",
"def load_wishlist():\n #records = wishlist.GetRecords(1,300)\n\n row_query = ListQuery()\n row_query.start_index = str(1)\n rows_feed = client.GetListFeed(key=app.config['SPREADSHEET_KEY'], visibility='public', projection='full', wksht_id=wishlist.id.text.split('/')[-1])\n\n records = []\n\n for row in rows_feed.entry:\n records.append ( Record ( spreadsheet_key=app.config['SPREADSHEET_KEY'],\n worksheet_id=wishlist.id.text.split('/')[-1],\n row_entry=row,\n )\n )\n\n return [Item(r) for r in records]",
"def get_items(wishlist_id):\n app.logger.info('Request for items in wishlist')\n #item = Item()\n items = []\n\n name = request.args.get(\"name\")\n price = request.args.get(\"price\")\n\n if name: \n items = Item.find_by_name(name)\n elif price: \n items = Item.find_by_price(price)\n else: \n items = Item.all()\n\n #items = item.find_by_wishlist_id(wishlist_id)\n results = [item.serialize() for item in items] \n \n app.logger.info(f'Returning {len(results)} items')\n response_body = {\n \t'data': results,\n \t'count': len(results)\n }\n return make_response(jsonify(response_body), status.HTTP_200_OK)",
"def show_user_wishlists(user_id: UUID):\n user = users_repository.find_by_id(user_id)\n\n if not user:\n raise WishlistException(\n status_code=404,\n detail=f'User {user_id} does not exists'\n )\n\n wishlists = wishlists_repository.find_by_user_id(user_id)\n return [wishlist for wishlist in wishlists.dicts()]",
"def retrieve_all_items(self, wishlist_id=None):\n items_to_retrieve = {}\n\n if wishlist_id:\n if self._verify_wishlist_exists(wishlist_id):\n # collect all items from single wishlist\n items_to_retrieve[wishlist_id] = self._collect_items(wishlist_id)\n else:\n raise WishlistNotFoundException\n else:\n # collect all items from all wishlists\n for wishlist_key in self._wishlist_resources.keys():\n items_to_retrieve[wishlist_key] = self._collect_items(wishlist_key)\n\n return json.dumps(items_to_retrieve, indent=4)",
"def get_book_details_for_wishlist(self, wishlist_id):\n c = self.db.cursor()\n try:\n c.execute(\"\"\"\n SELECT\n Books.title AS title, \n Books.coverImageUrl AS coverImageUrl,\n Books.author AS author,\n Books.ISBN AS ISBN,\n COUNT(UserBooks.id) AS numberAvailable,\n min(UserBooks.points) AS minPoints,\n WishlistsBooks.wishlistId AS wishlistId,\n Books.id AS bookId\n FROM \n WishlistsBooks \n INNER JOIN \n Books \n ON\n WishlistsBooks.bookId=Books.id \n INNER JOIN\n Wishlists\n ON\n WishlistsBooks.wishlistId = Wishlists.id\n LEFT JOIN\n UserBooks\n ON\n WishlistsBooks.bookId = UserBooks.bookId \n WHERE\n WishlistsBooks.wishlistId = ? \n AND\n (\n UserBooks.userId IS NULL OR\n (\n UserBooks.userId != Wishlists.userId\n AND\n UserBooks.available = 1\n )\n )\n GROUP BY\n WishlistsBooks.bookId\"\"\",\n (wishlist_id,))\n rows = c.fetchall()\n except sqlite3.Error as e:\n log.error(f\"Error getting books in wishlist {wishlist_id} -- {e}\")\n raise Exception\n return rows",
"def get_wishlist(wishlist_id):\n app.logger.info('Request for an wishlist')\n wl = WishList()\n found_wl = wl.find(wishlist_id)\n found_wl_serialized = found_wl.serialize()\n found_wl_id = str(found_wl_serialized['id'])\n app.logger.info(f'Returning item: {found_wl_id}')\n response_body = {\n 'data': found_wl_serialized,\n 'id': found_wl_id\n }\n return make_response(jsonify(response_body), status.HTTP_200_OK)",
"def show_wishlist(wishlist_id: UUID):\n wishlist = list_wishlist_service.run(wishlist_id)\n wishlist_dict = model_to_dict(wishlist)\n products_dict = [asdict(product) for product in wishlist.products]\n wishlist_dict['products'] = products_dict\n\n return wishlist_dict",
"def get_wishlists_by_userid(self, user_id):\n c = self.db.cursor()\n try:\n c.execute(\"\"\"\n SELECT \n id\n FROM \n Wishlists \n WHERE \n userId = ?\"\"\",\n (user_id,))\n rows = c.fetchall()\n except sqlite3.Error as e:\n log.error(f\"{e}\")\n log.error(f\"Error getting wishlists for user {user_id}\")\n raise Exception\n return rows",
"def wishlist(request):\n wishlist = None\n try:\n wishlist = WishList.objects.get(user=request.user)\n except WishList.DoesNotExist:\n pass\n\n context = {\n 'wishlist': wishlist,\n }\n\n return render(request, 'wishlist/wishlist.html', context)",
"def get_wrestlers():\n results = []\n try:\n cursor.execute(\"\"\"SELECT * FROM wrestlers\"\"\")\n fieldnames = map(lambda x: x[0], cursor.description)\n for wrestler in cursor:\n # Compress with field names and turn into a dictionary\n wrestler = dict(zip(fieldnames, wrestler))\n results.append(wrestler)\n except sqlite3.Error as err:\n print \"ERROR in get_wrestlers: %s\" % err\n # Make sure we always return a list.\n return results",
"def site_wires(self):\n return []",
"def get_wedding_list():\n return jsonify(WeddingList.get_wedding_gifts(get_db()))",
"def get(self, request, *args, **kwargs):\n # get user wishlist queryset.\n user_wishlist = request.user.wishlist.all()\n serializer = ProductSerializer(user_wishlist, many=True, context={\"request\": self.request})\n return Response(serializer.data)",
"def fetchwikis( self ) :\n self.wikis = self.client.listwikipages( self )\n return self.wikis",
"def get_wish(user_id):\n\n return db.session.query(Answer).filter(Answer.wish.isnot(None)).filter_by(user_id=user_id).all()",
"async def list(self, ctx):\n keylist = []\n try:\n for key in data[ctx.message.server.id].keys():\n keylist.append(key)\n keylist = ', '.join(keylist)\n await self.Aya.say('Blacklisted words: \\n`' + keylist + '`')\n except KeyError:\n await self.Aya.say('You must add a word to the blacklist before invoking this command.')",
"def get_watchlists(self) -> list:\n try:\n result = self.api.get_watchlists()\n except BrokerException as err:\n print('[!] Unable to get watchlists.')\n raise err\n else:\n return result",
"def _collect_items(self, wishlist_id):\n\n items = self._wishlist_resources[wishlist_id]['items']\n formatted_items = [{'id': key, 'description': value['description']} for key, value in items.iteritems()]\n return formatted_items"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a wishlist by id
|
def get_wishlist(wishlist_id):
app.logger.info('Request for an wishlist')
wl = WishList()
found_wl = wl.find(wishlist_id)
found_wl_serialized = found_wl.serialize()
found_wl_id = str(found_wl_serialized['id'])
app.logger.info(f'Returning item: {found_wl_id}')
response_body = {
'data': found_wl_serialized,
'id': found_wl_id
}
return make_response(jsonify(response_body), status.HTTP_200_OK)
|
[
"def retrieve_wishlist(self, wishlist_id):\n\n if self._verify_wishlist_exists(wishlist_id):\n return json.dumps(self._wishlist_resources[wishlist_id], indent=4)\n else:\n raise WishlistNotFoundException",
"def show_wishlist(wishlist_id: UUID):\n wishlist = list_wishlist_service.run(wishlist_id)\n wishlist_dict = model_to_dict(wishlist)\n products_dict = [asdict(product) for product in wishlist.products]\n wishlist_dict['products'] = products_dict\n\n return wishlist_dict",
"def get_book_details_for_wishlist(self, wishlist_id):\n c = self.db.cursor()\n try:\n c.execute(\"\"\"\n SELECT\n Books.title AS title, \n Books.coverImageUrl AS coverImageUrl,\n Books.author AS author,\n Books.ISBN AS ISBN,\n COUNT(UserBooks.id) AS numberAvailable,\n min(UserBooks.points) AS minPoints,\n WishlistsBooks.wishlistId AS wishlistId,\n Books.id AS bookId\n FROM \n WishlistsBooks \n INNER JOIN \n Books \n ON\n WishlistsBooks.bookId=Books.id \n INNER JOIN\n Wishlists\n ON\n WishlistsBooks.wishlistId = Wishlists.id\n LEFT JOIN\n UserBooks\n ON\n WishlistsBooks.bookId = UserBooks.bookId \n WHERE\n WishlistsBooks.wishlistId = ? \n AND\n (\n UserBooks.userId IS NULL OR\n (\n UserBooks.userId != Wishlists.userId\n AND\n UserBooks.available = 1\n )\n )\n GROUP BY\n WishlistsBooks.bookId\"\"\",\n (wishlist_id,))\n rows = c.fetchall()\n except sqlite3.Error as e:\n log.error(f\"Error getting books in wishlist {wishlist_id} -- {e}\")\n raise Exception\n return rows",
"def get_items(wishlist_id):\n app.logger.info('Request for items in wishlist')\n #item = Item()\n items = []\n\n name = request.args.get(\"name\")\n price = request.args.get(\"price\")\n\n if name: \n items = Item.find_by_name(name)\n elif price: \n items = Item.find_by_price(price)\n else: \n items = Item.all()\n\n #items = item.find_by_wishlist_id(wishlist_id)\n results = [item.serialize() for item in items] \n \n app.logger.info(f'Returning {len(results)} items')\n response_body = {\n \t'data': results,\n \t'count': len(results)\n }\n return make_response(jsonify(response_body), status.HTTP_200_OK)",
"def show_user_wishlists(user_id: UUID):\n user = users_repository.find_by_id(user_id)\n\n if not user:\n raise WishlistException(\n status_code=404,\n detail=f'User {user_id} does not exists'\n )\n\n wishlists = wishlists_repository.find_by_user_id(user_id)\n return [wishlist for wishlist in wishlists.dicts()]",
"def get_item(wishlist_id, item_id):\n app.logger.info('Request for an item in wishlist')\n item = Item()\n found_item = item.find(item_id)\n found_item_serialized = found_item.serialize()\n found_item_id = str(found_item_serialized['id'])\n app.logger.info(f'Returning item: {found_item_id}')\n response_body = {\n \t'data': found_item_serialized,\n \t'id': found_item_id\n }\n return make_response(jsonify(response_body), status.HTTP_200_OK)",
"def load_wishlist():\n #records = wishlist.GetRecords(1,300)\n\n row_query = ListQuery()\n row_query.start_index = str(1)\n rows_feed = client.GetListFeed(key=app.config['SPREADSHEET_KEY'], visibility='public', projection='full', wksht_id=wishlist.id.text.split('/')[-1])\n\n records = []\n\n for row in rows_feed.entry:\n records.append ( Record ( spreadsheet_key=app.config['SPREADSHEET_KEY'],\n worksheet_id=wishlist.id.text.split('/')[-1],\n row_entry=row,\n )\n )\n\n return [Item(r) for r in records]",
"def retrieve_item(self, wishlist_id, item_id):\n desired_item = None\n\n if self._verify_wishlist_exists(wishlist_id):\n all_items = self._collect_items(wishlist_id)\n for item in all_items:\n if item.get('id') == item_id:\n desired_item = item\n else:\n raise WishlistNotFoundException\n \n if desired_item:\n return json.dumps(desired_item, indent=4)\n \n else:\n raise ItemNotFoundException",
"def get_wishlists_by_userid(self, user_id):\n c = self.db.cursor()\n try:\n c.execute(\"\"\"\n SELECT \n id\n FROM \n Wishlists \n WHERE \n userId = ?\"\"\",\n (user_id,))\n rows = c.fetchall()\n except sqlite3.Error as e:\n log.error(f\"{e}\")\n log.error(f\"Error getting wishlists for user {user_id}\")\n raise Exception\n return rows",
"def list_wishlists():\n app.logger.info(\"Request for wishlists\")\n \n wishlists = []\n category = request.args.get(\"category\")\n name = request.args.get(\"name\")\n\n if category:\n wishlists = WishList.find_by_category(category)\n elif name:\n wishlists = WishList.find_by_name(name)\n else:\n wishlists = WishList.all()\n # wishlists = WishList.all()\n \n results = [wishlist.serialize() for wishlist in wishlists]\n app.logger.info(f'Returning {len(results)}')\n \n response_body = {\n \t'data': results,\n \t'count': len(results)\n }\n return make_response(jsonify(response_body), status.HTTP_200_OK)",
"def wishlist(request):\n wishlist = None\n try:\n wishlist = WishList.objects.get(user=request.user)\n except WishList.DoesNotExist:\n pass\n\n context = {\n 'wishlist': wishlist,\n }\n\n return render(request, 'wishlist/wishlist.html', context)",
"def update_wishlist_by_id(id, mail):\n row = client.GetListFeed(key=app.config['SPREADSHEET_KEY'], visibility='public', projection='full', wksht_id=wishlist.id.text.split('/')[-1], row_id=id)\n r = Record(content=None, row_entry=row,\n spreadsheet_key=app.config['SPREADSHEET_KEY'],\n worksheet_id=wishlist.id.text.split('/')[-1], database_client=client)\n\n if r is not None:\n r.content['mail'] = mail\n r.content['date'] = datetime.now().strftime('%Y/%m/%d %H:%M')\n client.UpdateRow(row, r.content)\n return True\n\n return False",
"def update_wishlists(wishlist_id):\n app.logger.info(\"Request to update wishlist with id: %s\", wishlist_id)\n check_content_type(\"application/json\")\n wishlist = WishList.find(wishlist_id)\n if not wishlist:\n raise NotFound(\"Wishlist with id '{}' was not found.\".format(wishlist_id))\n\n wishlist.deserialize(request.get_json())\n wishlist.id = wishlist_id\n wishlist.save()\n return make_response(jsonify(wishlist.serialize()), status.HTTP_200_OK)",
"def add_to_wishlist(user_id):\n try:\n data = request.json\n wishlist = Wishlist(user_id=user_id, book_id=data.get('book_id'))\n db.session.add(wishlist)\n db.session.commit()\n return jsonify(message='Books added to wishlist', success=True, data={\"Book id\": data.get('book_id')})\n except Exception as e:\n logger.exception(e)\n return jsonify(message='Books not added to wishlist', success=False)",
"def delete_wishlists(wishlist_id):\n app.logger.info(f'Request to delete wishlist with id: {wishlist_id}')\n wishlist = WishList.find(wishlist_id)\n if wishlist:\n wishlist.delete()\n\n app.logger.info(f'Wishlist with ID [{wishlist_id}] delete complete.')\n return make_response(\"ITS GONE!\", status.HTTP_204_NO_CONTENT)",
"def get_wish(user_id):\n\n return db.session.query(Answer).filter(Answer.wish.isnot(None)).filter_by(user_id=user_id).all()",
"def retrieve_all_items(self, wishlist_id=None):\n items_to_retrieve = {}\n\n if wishlist_id:\n if self._verify_wishlist_exists(wishlist_id):\n # collect all items from single wishlist\n items_to_retrieve[wishlist_id] = self._collect_items(wishlist_id)\n else:\n raise WishlistNotFoundException\n else:\n # collect all items from all wishlists\n for wishlist_key in self._wishlist_resources.keys():\n items_to_retrieve[wishlist_key] = self._collect_items(wishlist_key)\n\n return json.dumps(items_to_retrieve, indent=4)",
"def _collect_items(self, wishlist_id):\n\n items = self._wishlist_resources[wishlist_id]['items']\n formatted_items = [{'id': key, 'description': value['description']} for key, value in items.iteritems()]\n return formatted_items",
"def getById(self, id):\n for item in self.list: \n if item.getId() == id:\n return item"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete a Wishlist This endpoint will delete a wishlist based the id specified in the path
|
def delete_wishlists(wishlist_id):
app.logger.info(f'Request to delete wishlist with id: {wishlist_id}')
wishlist = WishList.find(wishlist_id)
if wishlist:
wishlist.delete()
app.logger.info(f'Wishlist with ID [{wishlist_id}] delete complete.')
return make_response("ITS GONE!", status.HTTP_204_NO_CONTENT)
|
[
"def delete_wishlist(wishlist_id: UUID):\n delete_wishlist_service.run(wishlist_id)\n return",
"def delete_wishlist(self, wishlist_id):\n\n try:\n # even if a delete wishlist call was already made, this will just set the value to True again\n self._wishlist_resources[wishlist_id]['deleted'] = True\n except KeyError:\n # cannot delete something that did not exist beforehand\n raise WishlistNotFoundException",
"def delete_product_wishlist(wishlist_id: UUID, product_id: str):\n delete_product_wishlist_service.run(\n wishlist_id=wishlist_id,\n product_id=product_id,\n )\n return",
"def deleteSessionInWishlist(self, request):\n return self._sessionWishlist(request, add_to_list=False)",
"def deleteSessionInWishlist(self, request):\n return self._wishListAddition(request, add=False)",
"def delete_wish(wish):\n\n deleted_wish = Answer.query.filter_by(wish=wish).delete()\n\n db.session.commit()",
"def remove_item(self, wishlist_id, item_id):\n\n if self._verify_wishlist_exists(wishlist_id):\n try:\n del self._wishlist_resources[wishlist_id]['items'][item_id]\n except KeyError:\n raise ItemNotFoundException\n else:\n # the wishlist does not exist or has been deleted\n raise WishlistNotFoundException",
"def delete_item(userid, itemid):\n \n # Seach for user whose wishlist we'd like to view\n user = db.session.query(UserProfile).filter_by(id=userid).first()\n # Check if user was found\n if user:\n # Get specified item\n wish = db.session.query(Wish).filter_by(item_id=itemid).first()\n # Check if item was found\n if wish:\n # Delete from DB\n db.session.delete(wish)\n # Save changes\n db.session.commit()\n # JSON\n err = False\n msg = \"Success\"\n userData = {'itemid': wish.item_id}\n else: # Wish not found\n # Error\n err = True\n msg = \"Wished Item not found\"\n userData = {}\n else: # User not found\n # Error\n err = True\n msg = \"User not found\"\n userData = None\n \n # Generate JSON object\n return jsonify(error=err, data=userData, message=msg)",
"def deleteSessionInWishlist(self, request):\n # Get the session key\n sessionKey = request.sessionKey\n # Get the session object\n session = ndb.Key(urlsafe=sessionKey).get()\n # Check that session exists or not\n if not session:\n raise endpoints.NotFoundException(\n 'No session found with key: %s' % sessionKey)\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n # Get profile\n profile = self._getProfileFromUser()\n if not profile:\n raise endpoints.BadRequestException(\n 'Profile does not exist for user')\n # Check if key and Session match\n if not type(ndb.Key(urlsafe=sessionKey).get()) == Session:\n raise endpoints.NotFoundException(\n 'This key is not a Session instance')\n # Delete session from wishlist\n if sessionKey in profile.sessionKeysInWishlist:\n try:\n profile.sessionKeysInWishlist.remove(sessionKey)\n profile.put()\n except Exception:\n raise endpoints.InternalServerErrorException(\n 'Error in storing the wishlist')\n return BooleanMessage(data=True)",
"def get_wishlist(wishlist_id):\n app.logger.info('Request for an wishlist')\n wl = WishList()\n found_wl = wl.find(wishlist_id)\n found_wl_serialized = found_wl.serialize()\n found_wl_id = str(found_wl_serialized['id'])\n app.logger.info(f'Returning item: {found_wl_id}')\n response_body = {\n 'data': found_wl_serialized,\n 'id': found_wl_id\n }\n return make_response(jsonify(response_body), status.HTTP_200_OK)",
"def delete_list(list_id):\n #table names can't be parameterized, use until cascade gets set up properly in the database\n modify_db('DELETE FROM lists WHERE id = ?;', [list_id])\n modify_db('DELETE FROM posts WHERE list_id = ?;', [list_id])\n modify_db('DELETE FROM permissions WHERE list_id = ?;', [list_id])\n\n flash(\"list deleted!\")\n return redirect(url_for('home'))",
"def delete(request, wid):\n wdoc = find_by_id(wid)\n cl = wdoc.cl\n wdoc.delete()\n return HttpResponseRedirect(\n reverse('waybill.views.listing', args=[cl]))",
"def delete_list():\n list_data = json.loads(request.data)\n list_id = list_data['listId']\n list_to_delete = List.query.get(list_id)\n\n if list_to_delete:\n if list_to_delete.user_id == current_user.id:\n db.session.delete(list_to_delete)\n db.session.commit()\n\n return jsonify({})",
"def removeSessionFromWishlist(self, request):\r\n return self._sessionWishlist(request, add=False)",
"def update_wishlists(wishlist_id):\n app.logger.info(\"Request to update wishlist with id: %s\", wishlist_id)\n check_content_type(\"application/json\")\n wishlist = WishList.find(wishlist_id)\n if not wishlist:\n raise NotFound(\"Wishlist with id '{}' was not found.\".format(wishlist_id))\n\n wishlist.deserialize(request.get_json())\n wishlist.id = wishlist_id\n wishlist.save()\n return make_response(jsonify(wishlist.serialize()), status.HTTP_200_OK)",
"def delete_drinks(jwt_payload, id):\n drink = Drink.query.filter_by(id=id).first()\n if not drink:\n raise NotFound(\"drink\", id)\n\n try:\n drink.delete()\n return jsonify({\n \"success\": True,\n \"delete\": id,\n })\n except SQLAlchemyError:\n abort(500)",
"async def remove_from_watchlist(self, id_: str, watchlist: str = \"Default\") -> None:\n assert self._session is not None\n\n url = urls.WATCHLISTS / watchlist / f\"{id_}/\"\n\n try:\n async with self._session.delete(\n url,\n timeout=self._timeout,\n headers={\"Authorization\": self._access_token},\n ) as resp:\n response = await resp.json()\n if resp.status != 204:\n raise ClientAPIError(resp.method, resp.url, resp.status, response)\n except (aiohttp.ClientError, asyncio.TimeoutError) as e:\n raise ClientRequestError(\"DELETE\", url) from e",
"def delete_drink(*args, **kwargs):\n drink_id = kwargs['id']\n target_drink = Drink.query.get(drink_id)\n if not target_drink:\n abort(404)\n try:\n target_drink.delete()\n except Exception as e:\n print('EXCEPTION: ', str(e))\n abort(500)\n result = {\n \"success\": True,\n \"delete\": drink_id\n }\n return jsonify(result)",
"def delete_favourite():\n if request.method == \"POST\":\n user_id = mongo.db.users.find_one({\"username\": session[\"user\"]})[\"_id\"]\n favourite = request.form.get(\"wine_id\")\n\n mongo.db.users.update({\"_id\": ObjectId(user_id)}, {\"$pull\":\n {'favourites': {\"wine_id\": favourite}}})\n\n flash(\"Wine has now been removed from your favourites\")\n return redirect(url_for('profile'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update a Wishlist This endpoint will update a Wishlist based the body that is posted
|
def update_wishlists(wishlist_id):
app.logger.info("Request to update wishlist with id: %s", wishlist_id)
check_content_type("application/json")
wishlist = WishList.find(wishlist_id)
if not wishlist:
raise NotFound("Wishlist with id '{}' was not found.".format(wishlist_id))
wishlist.deserialize(request.get_json())
wishlist.id = wishlist_id
wishlist.save()
return make_response(jsonify(wishlist.serialize()), status.HTTP_200_OK)
|
[
"def update_wishlist(wishlist_id: UUID, wishlist: WishlistUpdateRequestBody):\n updated_wishlist = update_wishlist_service.run(\n wishlist_id=wishlist_id,\n title=wishlist.title,\n description=wishlist.description,\n )\n return model_to_dict(updated_wishlist)",
"def update_watchlist():\n \n watchlist_id = request.args.get('watchlist')\n ticker = request.args.get('ticker')\n Watchlist.add_or_remove_stock(watchlist_id, ticker)\n \n \n return \"OK\"",
"def checklist_update(user, user_id, checklist_id):\n\n checklist_fields = checklist_schema.load(request.json)\n\n checklists = Checklist.query.filter_by(id=checklist_id)\n\n if checklists[0].owner_id != user.id:\n return abort(401, description=\"You do not have permission to update this checklist.\")\n\n if checklists.count() != 1:\n return abort(404, description=\"Checklist not found.\")\n\n checklists.update(checklist_fields)\n db.session.commit()\n\n return jsonify(checklist_schema.dump(checklists[0]))",
"def UpdateShoppingList(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def update_wishlist_by_id(id, mail):\n row = client.GetListFeed(key=app.config['SPREADSHEET_KEY'], visibility='public', projection='full', wksht_id=wishlist.id.text.split('/')[-1], row_id=id)\n r = Record(content=None, row_entry=row,\n spreadsheet_key=app.config['SPREADSHEET_KEY'],\n worksheet_id=wishlist.id.text.split('/')[-1], database_client=client)\n\n if r is not None:\n r.content['mail'] = mail\n r.content['date'] = datetime.now().strftime('%Y/%m/%d %H:%M')\n client.UpdateRow(row, r.content)\n return True\n\n return False",
"def update_lists(self, list_id, extra=None):\n data = {}\n\n if extra:\n data.update(**extra)\n\n return self.post(\n \"{}/{}\".format(\n ENDPOINT[\"lists\"],\n list_id,\n ),\n data=data\n )",
"def put(self,id,item_id):\n\n data = request.get_json(force = True)\n if get_single_bucketlist(id,g.user.id) == \"Bucketlist doesn't exist\":\n raise NotFound(\"Bucketlist doesn't exist\")\n elif get_single_bucketlist_item(id,item_id) == \"Item doesn't exist\":\n raise NotFound(\"Item does not exist\")\n update_bucket_list_item(id,item_id,data)\n return {\"message\":\"BucketList item successfully updated\"},200",
"def add_to_wishlist(user_id):\n try:\n data = request.json\n wishlist = Wishlist(user_id=user_id, book_id=data.get('book_id'))\n db.session.add(wishlist)\n db.session.commit()\n return jsonify(message='Books added to wishlist', success=True, data={\"Book id\": data.get('book_id')})\n except Exception as e:\n logger.exception(e)\n return jsonify(message='Books not added to wishlist', success=False)",
"def put(self,id):\n if get_single_bucketlist(id,g.user.id) == \"Bucketlist doesn't exist\":\n raise NotFound(\"Bucketlist doesn't exist\")\n data = request.get_json(force = True)\n update_bucket_list(id,g.user.id,data)\n return {\"message\":\"BucketList successfully updated\"},200",
"def update(self, board_id, repo_id, list_id, trello_member_id=None):\n subscribed_list = SubscribedList.query.get(\n [board_id, repo_id, list_id]\n )\n subscribed_list.trello_member_id = trello_member_id\n\n # Persist the changes\n db.session.commit()",
"def test_api_course_wish_update_success(self):\n course = factories.CourseFactory()\n user = factories.UserFactory()\n token = self.get_user_token(user.username)\n models.CourseWish.objects.create(course=course, owner=user)\n\n response = self.client.put(\n f\"/api/v1.0/courses/{course.id}/wish/\",\n content_type=\"application/json\",\n HTTP_AUTHORIZATION=f\"Bearer {token}\",\n )\n\n self.assertEqual(response.status_code, 405)\n self.assertEqual(response.json(), {\"detail\": 'Method \"PUT\" not allowed.'})",
"def update_list(list_id):\n db_connection = connect_to_database()\n\n # display current data\n if request.method == 'GET':\n cursor = db_connection.cursor()\n cursor.callproc('getList', [list_id, ])\n rtn = cursor.fetchall()\n cursor.close()\n context = {'list_id': rtn[0][0], 'list_name': rtn[0][2], 'list_desc': rtn[0][3]}\n db_connection.close() # close connection before returning\n return render_template('update_list.html', context=context)\n elif request.method == 'POST':\n cursor = db_connection.cursor()\n cursor.callproc('updateList', [request.form['list_name'], request.form['list_desc'], list_id, ])\n db_connection.commit()\n cursor.close()\n db_connection.close() # close connection before returning\n webapp.logger.info(\"Update list. userid: %s, list_id: %s, list_name: %s, list_desc: %s\", current_user.id, list_id, request.form['list_name'], request.form['list_desc'])\n return redirect('/home')",
"def edit_shopping_list(shoppingid, listname, duedate):\n db.execute(\"UPDATE shopping SET listname = :listname, duedate = :duedate WHERE shoppingid = :shoppingid\",\n listname=listname, duedate=duedate, shoppingid=shoppingid)",
"def sale_list_update_by_id(current_user, sale_list_id):\n sale_list = SaleList.query.filter_by(id=sale_list_id).first()\n data = request.get_json()\n\n try:\n if data[\"name\"]:\n sale_list.name = data[\"name\"]\n\n if data[\"customer_name\"]:\n sale_list.customer_name = data[\"customer_name\"]\n\n if data[\"customer_contact\"]:\n sale_list.customer_contact = data[\"customer_contact\"]\n\n except KeyError:\n return jsonify({\"message\": \"Wrong data passed\"})\n\n db.sesion.commit()\n return jsonify({\"message\": \"Product Sale list updated sucessfully\"}), 200",
"def update_a_list(uuid: str, list):\n with get_db_cursor() as cursor:\n cursor.execute(\"UPDATE list SET box_id = %s, item_name= %s, item_id = %s WHERE id = %s\", [list['box_id'], list['item_name'], list['item_id'], uuid])",
"def add_to_wishlist(request, product_id):\n redirect_url = request.POST.get('redirect_url')\n\n user = get_object_or_404(UserProfile, user=request.user)\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n\n product = Product.objects.get(pk=product_id)\n if request.POST:\n test = WishlistItem.objects.filter(\n wishlist=wishlist_user, product=product).exists()\n if test:\n messages.error(request, \"You already wish you had these socks\")\n return redirect(redirect_url)\n\n else:\n added_item = WishlistItem(\n wishlist=wishlist_user,\n product=product, date_added=timezone.now())\n added_item.save()\n messages.success(request, \"Socks added to your wishlist\")\n return redirect(redirect_url)\n else:\n messages.error(request, \"Click 'Add to wishlist' to add a item \")\n return render(request, 'home/index.html')",
"def get_wishlist(wishlist_id):\n app.logger.info('Request for an wishlist')\n wl = WishList()\n found_wl = wl.find(wishlist_id)\n found_wl_serialized = found_wl.serialize()\n found_wl_id = str(found_wl_serialized['id'])\n app.logger.info(f'Returning item: {found_wl_id}')\n response_body = {\n 'data': found_wl_serialized,\n 'id': found_wl_id\n }\n return make_response(jsonify(response_body), status.HTTP_200_OK)",
"def add_item(self, wishlist_id, item_data):\n\n item_id = item_data.get('id')\n item_description = item_data.get('description')\n\n if self._verify_wishlist_exists(wishlist_id):\n if item_id in self._wishlist_resources[wishlist_id]['items']:\n # one cannot add an item that already exists\n # note: although it would not be an issue to merely overwrite the data,\n # that would not be a proper result of a POST request to add an item\n raise WishlistOperationNotPermittedException\n else:\n # add a new item\n self._wishlist_resources[wishlist_id]['items'][item_id] = {'description': item_description}\n return json.dumps({'id': item_id, 'description': item_description}, indent=4)\n else:\n raise WishlistNotFoundException",
"def show_wishlist(wishlist_id: UUID):\n wishlist = list_wishlist_service.run(wishlist_id)\n wishlist_dict = model_to_dict(wishlist)\n products_dict = [asdict(product) for product in wishlist.products]\n wishlist_dict['products'] = products_dict\n\n return wishlist_dict"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns all of the items in a wishlist
|
def get_items(wishlist_id):
app.logger.info('Request for items in wishlist')
#item = Item()
items = []
name = request.args.get("name")
price = request.args.get("price")
if name:
items = Item.find_by_name(name)
elif price:
items = Item.find_by_price(price)
else:
items = Item.all()
#items = item.find_by_wishlist_id(wishlist_id)
results = [item.serialize() for item in items]
app.logger.info(f'Returning {len(results)} items')
response_body = {
'data': results,
'count': len(results)
}
return make_response(jsonify(response_body), status.HTTP_200_OK)
|
[
"def load_wishlist():\n #records = wishlist.GetRecords(1,300)\n\n row_query = ListQuery()\n row_query.start_index = str(1)\n rows_feed = client.GetListFeed(key=app.config['SPREADSHEET_KEY'], visibility='public', projection='full', wksht_id=wishlist.id.text.split('/')[-1])\n\n records = []\n\n for row in rows_feed.entry:\n records.append ( Record ( spreadsheet_key=app.config['SPREADSHEET_KEY'],\n worksheet_id=wishlist.id.text.split('/')[-1],\n row_entry=row,\n )\n )\n\n return [Item(r) for r in records]",
"def list_wishlists():\n app.logger.info(\"Request for wishlists\")\n \n wishlists = []\n category = request.args.get(\"category\")\n name = request.args.get(\"name\")\n\n if category:\n wishlists = WishList.find_by_category(category)\n elif name:\n wishlists = WishList.find_by_name(name)\n else:\n wishlists = WishList.all()\n # wishlists = WishList.all()\n \n results = [wishlist.serialize() for wishlist in wishlists]\n app.logger.info(f'Returning {len(results)}')\n \n response_body = {\n \t'data': results,\n \t'count': len(results)\n }\n return make_response(jsonify(response_body), status.HTTP_200_OK)",
"def retrieve_all_items(self, wishlist_id=None):\n items_to_retrieve = {}\n\n if wishlist_id:\n if self._verify_wishlist_exists(wishlist_id):\n # collect all items from single wishlist\n items_to_retrieve[wishlist_id] = self._collect_items(wishlist_id)\n else:\n raise WishlistNotFoundException\n else:\n # collect all items from all wishlists\n for wishlist_key in self._wishlist_resources.keys():\n items_to_retrieve[wishlist_key] = self._collect_items(wishlist_key)\n\n return json.dumps(items_to_retrieve, indent=4)",
"def _collect_items(self, wishlist_id):\n\n items = self._wishlist_resources[wishlist_id]['items']\n formatted_items = [{'id': key, 'description': value['description']} for key, value in items.iteritems()]\n return formatted_items",
"def retrieve_all_wishlists(self, include_deleted=False):\n\n all_wishlists = []\n\n if include_deleted:\n # use a list comprehension to easily retrieve the dictionaries and merge them together into a JSON string\n all_wishlists = [{key: contents} for key, contents in self._wishlist_resources.iteritems()]\n else:\n # filter out those key, contents pairs where contents['deleted'] == True \n all_wishlists = [{key: contents} for key, contents in self._wishlist_resources.iteritems() if contents['deleted'] == False]\n\n return json.dumps(all_wishlists, indent=4)",
"def show_user_wishlists(user_id: UUID):\n user = users_repository.find_by_id(user_id)\n\n if not user:\n raise WishlistException(\n status_code=404,\n detail=f'User {user_id} does not exists'\n )\n\n wishlists = wishlists_repository.find_by_user_id(user_id)\n return [wishlist for wishlist in wishlists.dicts()]",
"def get_book_details_for_wishlist(self, wishlist_id):\n c = self.db.cursor()\n try:\n c.execute(\"\"\"\n SELECT\n Books.title AS title, \n Books.coverImageUrl AS coverImageUrl,\n Books.author AS author,\n Books.ISBN AS ISBN,\n COUNT(UserBooks.id) AS numberAvailable,\n min(UserBooks.points) AS minPoints,\n WishlistsBooks.wishlistId AS wishlistId,\n Books.id AS bookId\n FROM \n WishlistsBooks \n INNER JOIN \n Books \n ON\n WishlistsBooks.bookId=Books.id \n INNER JOIN\n Wishlists\n ON\n WishlistsBooks.wishlistId = Wishlists.id\n LEFT JOIN\n UserBooks\n ON\n WishlistsBooks.bookId = UserBooks.bookId \n WHERE\n WishlistsBooks.wishlistId = ? \n AND\n (\n UserBooks.userId IS NULL OR\n (\n UserBooks.userId != Wishlists.userId\n AND\n UserBooks.available = 1\n )\n )\n GROUP BY\n WishlistsBooks.bookId\"\"\",\n (wishlist_id,))\n rows = c.fetchall()\n except sqlite3.Error as e:\n log.error(f\"Error getting books in wishlist {wishlist_id} -- {e}\")\n raise Exception\n return rows",
"def get_wish(user_id):\n\n return db.session.query(Answer).filter(Answer.wish.isnot(None)).filter_by(user_id=user_id).all()",
"def get_wishlist(wishlist_id):\n app.logger.info('Request for an wishlist')\n wl = WishList()\n found_wl = wl.find(wishlist_id)\n found_wl_serialized = found_wl.serialize()\n found_wl_id = str(found_wl_serialized['id'])\n app.logger.info(f'Returning item: {found_wl_id}')\n response_body = {\n 'data': found_wl_serialized,\n 'id': found_wl_id\n }\n return make_response(jsonify(response_body), status.HTTP_200_OK)",
"def get_all(self):\n return [self.item]",
"def show_wishlist(wishlist_id: UUID):\n wishlist = list_wishlist_service.run(wishlist_id)\n wishlist_dict = model_to_dict(wishlist)\n products_dict = [asdict(product) for product in wishlist.products]\n wishlist_dict['products'] = products_dict\n\n return wishlist_dict",
"def get_all_items(self):\n\t\treturn self.items.values()",
"def search_all_items(self, keyword, user_id ,include_deleted = False):\n desired_products = {}\n desired_wishlists = []\n if include_deleted:\n desired_wishlists = [{key: contents} for key, contents in self._wishlist_resources.iteritems() if contents['user_id']==user_id]\n else:\n desired_wishlists = [{key: contents} for key, contents in self._wishlist_resources.iteritems() if contents['deleted'] == False and contents['user_id'] == user_id]\n for wishlist in desired_wishlists:\n for k,v in wishlist.iteritems():\n for key in v['items'].keys():\n if keyword in key:\n desired_products[key] = v['items'][key]\n for nested_key,nested_value in v['items'].iteritems():\n if keyword in nested_value['description']:\n if nested_key not in desired_products:\n desired_products[nested_key] = v['items'][nested_key]\n if desired_products:\n return json.dumps(desired_products, indent=4)\n else: \n raise ItemNotFoundException",
"def get_wishlists_by_userid(self, user_id):\n c = self.db.cursor()\n try:\n c.execute(\"\"\"\n SELECT \n id\n FROM \n Wishlists \n WHERE \n userId = ?\"\"\",\n (user_id,))\n rows = c.fetchall()\n except sqlite3.Error as e:\n log.error(f\"{e}\")\n log.error(f\"Error getting wishlists for user {user_id}\")\n raise Exception\n return rows",
"def wishlist(request):\n wishlist = None\n try:\n wishlist = WishList.objects.get(user=request.user)\n except WishList.DoesNotExist:\n pass\n\n context = {\n 'wishlist': wishlist,\n }\n\n return render(request, 'wishlist/wishlist.html', context)",
"def showAllItems():\n\n items = readAllItems()\n return render_template('show_all_items.html', items=items)",
"def get_suggesteditems(self):\n suggested_items = super(ListItemView, self).get_queryset().filter(approved=True)\n return suggested_items",
"def get_test_items():\n out = []\n out.append(Item('potion','potion_base'))\n out.append(Item('gold','gold',))\n out.append(Item('sword','sword_a',allow_stack=False))\n return out[0],out[1],out[2]",
"def fruits_list():\r\n\r\n fruits_meta = client_raw.commodities_meta.find({\"Commodity_Type\":\"fruits\"})\r\n vegetables_meta = client_raw.commodities_meta.find({\"Commodity_Type\":\"vegetables\"})\r\n herbs_meta = client_raw.commodities_meta.find({\"Commodity_Type\":\"herbs\"})\r\n ornamentals_meta = client_raw.commodities_meta.find({\"Commodity_Type\":\"ornamentals\"})\r\n nuts_meta = client_raw.commodities_meta.find({\"Commodity_Type\":\"nuts\"})\r\n return render_template('admin_commodities_meta.html', result_items=fruits_meta,total_fruits=fruits_meta.count(),\r\n result_items1=vegetables_meta,total_vegetables=vegetables_meta.count(),\r\n result_items2=herbs_meta,total_herbs=herbs_meta.count(),\r\n result_items3=ornamentals_meta,total_ornamentals=ornamentals_meta.count(),\r\n result_items4=nuts_meta, total_nuts=nuts_meta.count())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns one item in a wishlist
|
def get_item(wishlist_id, item_id):
app.logger.info('Request for an item in wishlist')
item = Item()
found_item = item.find(item_id)
found_item_serialized = found_item.serialize()
found_item_id = str(found_item_serialized['id'])
app.logger.info(f'Returning item: {found_item_id}')
response_body = {
'data': found_item_serialized,
'id': found_item_id
}
return make_response(jsonify(response_body), status.HTTP_200_OK)
|
[
"def get_wishlist(wishlist_id):\n app.logger.info('Request for an wishlist')\n wl = WishList()\n found_wl = wl.find(wishlist_id)\n found_wl_serialized = found_wl.serialize()\n found_wl_id = str(found_wl_serialized['id'])\n app.logger.info(f'Returning item: {found_wl_id}')\n response_body = {\n 'data': found_wl_serialized,\n 'id': found_wl_id\n }\n return make_response(jsonify(response_body), status.HTTP_200_OK)",
"def retrieve_item(self, wishlist_id, item_id):\n desired_item = None\n\n if self._verify_wishlist_exists(wishlist_id):\n all_items = self._collect_items(wishlist_id)\n for item in all_items:\n if item.get('id') == item_id:\n desired_item = item\n else:\n raise WishlistNotFoundException\n \n if desired_item:\n return json.dumps(desired_item, indent=4)\n \n else:\n raise ItemNotFoundException",
"def wishlist(request):\n wishlist = None\n try:\n wishlist = WishList.objects.get(user=request.user)\n except WishList.DoesNotExist:\n pass\n\n context = {\n 'wishlist': wishlist,\n }\n\n return render(request, 'wishlist/wishlist.html', context)",
"def retrieve_wishlist(self, wishlist_id):\n\n if self._verify_wishlist_exists(wishlist_id):\n return json.dumps(self._wishlist_resources[wishlist_id], indent=4)\n else:\n raise WishlistNotFoundException",
"def load_wishlist():\n #records = wishlist.GetRecords(1,300)\n\n row_query = ListQuery()\n row_query.start_index = str(1)\n rows_feed = client.GetListFeed(key=app.config['SPREADSHEET_KEY'], visibility='public', projection='full', wksht_id=wishlist.id.text.split('/')[-1])\n\n records = []\n\n for row in rows_feed.entry:\n records.append ( Record ( spreadsheet_key=app.config['SPREADSHEET_KEY'],\n worksheet_id=wishlist.id.text.split('/')[-1],\n row_entry=row,\n )\n )\n\n return [Item(r) for r in records]",
"def find_drink(self, order_name):\r\n for item in self.menu:\r\n if item.code == order_name:\r\n return item\r\n print(\"Sorry that item is not available.\")\r\n return False",
"def get_items(wishlist_id):\n app.logger.info('Request for items in wishlist')\n #item = Item()\n items = []\n\n name = request.args.get(\"name\")\n price = request.args.get(\"price\")\n\n if name: \n items = Item.find_by_name(name)\n elif price: \n items = Item.find_by_price(price)\n else: \n items = Item.all()\n\n #items = item.find_by_wishlist_id(wishlist_id)\n results = [item.serialize() for item in items] \n \n app.logger.info(f'Returning {len(results)} items')\n response_body = {\n \t'data': results,\n \t'count': len(results)\n }\n return make_response(jsonify(response_body), status.HTTP_200_OK)",
"def __getitem__(self, item) -> HuntPilot:\n return self.by_hunt_pilot[item][0]",
"def get_item(menu, item):\n for i in menu['items']:\n if i['id'] == item:\n return i",
"def show_wishlist(wishlist_id: UUID):\n wishlist = list_wishlist_service.run(wishlist_id)\n wishlist_dict = model_to_dict(wishlist)\n products_dict = [asdict(product) for product in wishlist.products]\n wishlist_dict['products'] = products_dict\n\n return wishlist_dict",
"def get_book_details_for_wishlist(self, wishlist_id):\n c = self.db.cursor()\n try:\n c.execute(\"\"\"\n SELECT\n Books.title AS title, \n Books.coverImageUrl AS coverImageUrl,\n Books.author AS author,\n Books.ISBN AS ISBN,\n COUNT(UserBooks.id) AS numberAvailable,\n min(UserBooks.points) AS minPoints,\n WishlistsBooks.wishlistId AS wishlistId,\n Books.id AS bookId\n FROM \n WishlistsBooks \n INNER JOIN \n Books \n ON\n WishlistsBooks.bookId=Books.id \n INNER JOIN\n Wishlists\n ON\n WishlistsBooks.wishlistId = Wishlists.id\n LEFT JOIN\n UserBooks\n ON\n WishlistsBooks.bookId = UserBooks.bookId \n WHERE\n WishlistsBooks.wishlistId = ? \n AND\n (\n UserBooks.userId IS NULL OR\n (\n UserBooks.userId != Wishlists.userId\n AND\n UserBooks.available = 1\n )\n )\n GROUP BY\n WishlistsBooks.bookId\"\"\",\n (wishlist_id,))\n rows = c.fetchall()\n except sqlite3.Error as e:\n log.error(f\"Error getting books in wishlist {wishlist_id} -- {e}\")\n raise Exception\n return rows",
"def getSpecificItem(itemName):\r\n return session.query(Item).filter_by(name=itemName).one()",
"def get_wish(user_id):\n\n return db.session.query(Answer).filter(Answer.wish.isnot(None)).filter_by(user_id=user_id).all()",
"def get_item(name):\n for item in globals().values():\n if isinstance(item, MarketItem) and item.name == name:\n return item\n\n raise Exception(\"Invaid item '{}'\".format(name))",
"def first_item(self):\n ...",
"def add_to_wishlist(request, product_id):\n redirect_url = request.POST.get('redirect_url')\n\n user = get_object_or_404(UserProfile, user=request.user)\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n\n product = Product.objects.get(pk=product_id)\n if request.POST:\n test = WishlistItem.objects.filter(\n wishlist=wishlist_user, product=product).exists()\n if test:\n messages.error(request, \"You already wish you had these socks\")\n return redirect(redirect_url)\n\n else:\n added_item = WishlistItem(\n wishlist=wishlist_user,\n product=product, date_added=timezone.now())\n added_item.save()\n messages.success(request, \"Socks added to your wishlist\")\n return redirect(redirect_url)\n else:\n messages.error(request, \"Click 'Add to wishlist' to add a item \")\n return render(request, 'home/index.html')",
"def show_user_wishlists(user_id: UUID):\n user = users_repository.find_by_id(user_id)\n\n if not user:\n raise WishlistException(\n status_code=404,\n detail=f'User {user_id} does not exists'\n )\n\n wishlists = wishlists_repository.find_by_user_id(user_id)\n return [wishlist for wishlist in wishlists.dicts()]",
"def get_item(self, item_name):\n if len(self.items) > 0: # if there is at least one item in that location\n for element in self.items:\n if element.get_name() == item_name:\n return element\n return False\n else:\n return False",
"async def wear_item(self, item_name: str):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates an item in a wishlist
|
def create_item(wishlist_id):
app.logger.info("Request to create an item in a wishlist")
check_content_type("application/json")
item = Item()
item.deserialize(request.get_json())
item.create()
message = item.serialize()
location_url = url_for("get_item", wishlist_id=item.wishlist_id, item_id=item.id, _external=True)
app.logger.info(f'Item with ID {item.id} created')
return make_response(
jsonify(message), status.HTTP_201_CREATED, {"Location": location_url}
)
|
[
"def create_wishlist(user_id: UUID, wishlist: WishlistRequestBody):\n new_wishlist = create_wishlist_service.run(\n user_id=user_id,\n title=wishlist.title,\n description=wishlist.description,\n )\n return model_to_dict(new_wishlist)",
"def create_wishlist(self, name, user_id):\n new_wishlist = {}\n new_wishlist['name'] = name\n new_wishlist['user_id'] = str(user_id)\n new_wishlist['items'] = {}\n # get a nicely formatted, human-readable datetime\n new_wishlist['created'] = str(datetime.utcnow())\n new_wishlist['deleted'] = False\n self._index += 1\n new_wishlist['id'] = self._index\n self._wishlist_resources[self._index] = new_wishlist\n\n return json.dumps(new_wishlist, indent=4)",
"def create_wish(self, _title, _level):\n\n wish = Wishes(creator=self.id, title=_title, creation_date=datetime.datetime.now(), level=_level)\n\n splitTitle = _title.split(' ')\n for word in splitTitle:\n if word.startswith('#'):\n tag = Tags.objects.get_or_create(title=word)\n wish.tags.append(tag[0])\n\n wish.save()\n\n return wish",
"def add_item(self, wishlist_id, item_data):\n\n item_id = item_data.get('id')\n item_description = item_data.get('description')\n\n if self._verify_wishlist_exists(wishlist_id):\n if item_id in self._wishlist_resources[wishlist_id]['items']:\n # one cannot add an item that already exists\n # note: although it would not be an issue to merely overwrite the data,\n # that would not be a proper result of a POST request to add an item\n raise WishlistOperationNotPermittedException\n else:\n # add a new item\n self._wishlist_resources[wishlist_id]['items'][item_id] = {'description': item_description}\n return json.dumps({'id': item_id, 'description': item_description}, indent=4)\n else:\n raise WishlistNotFoundException",
"def add_to_wishlist(user_id):\n try:\n data = request.json\n wishlist = Wishlist(user_id=user_id, book_id=data.get('book_id'))\n db.session.add(wishlist)\n db.session.commit()\n return jsonify(message='Books added to wishlist', success=True, data={\"Book id\": data.get('book_id')})\n except Exception as e:\n logger.exception(e)\n return jsonify(message='Books not added to wishlist', success=False)",
"def create_item(self) -> pywikibot.ItemPage:\n data = {\n 'sitelinks': {\n self.site.dbName(): {\n 'site': self.site.dbName(),\n 'title': self.current_page.title()\n }\n },\n 'labels': {\n self.site.lang: {\n 'language': self.site.lang,\n 'value': self.current_page.title()\n }\n }\n }\n for site, page in self.iwlangs.items():\n if not page.exists():\n continue\n dbname = site.dbName()\n title = page.title()\n data['sitelinks'][dbname] = {'site': dbname, 'title': title}\n data['labels'][site.lang] = {'language': site.lang, 'value': title}\n summary = ('Bot: New item with sitelink(s) from '\n + self.current_page.title(as_link=True, insite=self.repo))\n\n item = pywikibot.ItemPage(self.repo)\n item.editEntity(data, new='item', summary=summary)\n info(f'Created item {item.getID()}')\n return item",
"def add_to_wishlist(request, product_id):\n redirect_url = request.POST.get('redirect_url')\n\n user = get_object_or_404(UserProfile, user=request.user)\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n\n product = Product.objects.get(pk=product_id)\n if request.POST:\n test = WishlistItem.objects.filter(\n wishlist=wishlist_user, product=product).exists()\n if test:\n messages.error(request, \"You already wish you had these socks\")\n return redirect(redirect_url)\n\n else:\n added_item = WishlistItem(\n wishlist=wishlist_user,\n product=product, date_added=timezone.now())\n added_item.save()\n messages.success(request, \"Socks added to your wishlist\")\n return redirect(redirect_url)\n else:\n messages.error(request, \"Click 'Add to wishlist' to add a item \")\n return render(request, 'home/index.html')",
"def add_item(self, item):",
"def add_to_wishlist(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n\n # Create a wishlist for the user if they don't have one\n wishlist, _ = WishList.objects.get_or_create(user=request.user)\n # Add product to the wishlist\n wishlist.products.add(product)\n messages.info(request, \"A new product was added to your wishlist\")\n\n return redirect(request.META.get('HTTP_REFERER'))",
"def test_user_add_item_to_list(self):\n self.user.create_list(\"joybirthday\")\n self.user.add_item('joybirthday', 'cake','3000')\n self.assertEqual(self.user.slist[-1].items[-1].name, 'cake')",
"def create(self):\n data = {\"labels\": {\"en\": {\"language\": \"en\", \"value\": self.label}}}\n if len(self.description) > 0:\n data[\"descriptions\"] = {\"en\": {\"language\": \"en\", \"value\": self.description}}\n data[\"claims\"] = self.claims\n try:\n self.ID = self.wb_connection.create_entity(\"item\", data)\n print(self.ID)\n return self.ID\n except WBAPIException as e:\n log.error(f\"Item could not be created through the WB API: {str(e)}\")",
"def create_item(cls, item):\n\n connection = sqlite3.connect(\"data.db\")\n cursor = connection.cursor()\n\n cursor.execute(\"INSERT INTO items VALUES (?, ?)\", (item[\"name\"], item[\"price\"]))\n\n connection.commit()\n connection.close()",
"def purchase_list(name_of_item):\n purchased_items.append(name_of_item.lower())\n \n print(f\"{name_of_item.title()} has been added to the list of purchesed items.\")",
"def test_api_course_wish_create_existing(self):\n course = factories.CourseFactory()\n user = factories.UserFactory()\n token = self.get_user_token(user.username)\n models.CourseWish.objects.create(course=course, owner=user)\n\n response = self.client.post(\n f\"/api/v1.0/courses/{course.id}/wish/\",\n content_type=\"application/json\",\n HTTP_AUTHORIZATION=f\"Bearer {token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), {\"status\": True})",
"def create_test_food_item(self):\n food_item = FoodItems.objects.create(\n name=\"food item 1\",\n restaurant=self.restaurant,\n category=self.category1,\n price=600\n )\n food_item.tags.add(self.tag1)\n food_item = FoodItems.objects.create(\n name=\"food item 2\",\n restaurant=self.restaurant,\n category=self.category2,\n price=1000\n )\n food_item.tags.add(self.tag2)",
"def user_add_book_to_wishlist_by_id(self, book_id, user_num):\n c = self.db.cursor()\n # Get the wishlist ID\n c.execute(\"\"\"SELECT id FROM Wishlists WHERE userId = ?;\"\"\", (user_num,))\n wishlist_id = c.fetchone()['id']\n\n # If the book is already in the wishlist, don't add it\n c.execute(\"\"\"SELECT * FROM WishlistsBooks WHERE wishlistId = ? AND bookId = ?;\"\"\", (wishlist_id, book_id))\n if c.fetchall():\n flash(\"Book already in your wishlist\", \"warning\")\n log.warning(f\"Book {book_id} already in \" +\n f\"user {user_num}'s wishlist\")\n # otherwise, add book to the wishlist\n else:\n c.execute(\"\"\"INSERT INTO WishlistsBooks (wishlistId, bookId) VALUES (?, ?);\"\"\", (wishlist_id, book_id))\n self.db.commit()\n flash(\"Book successfully added to your wishlist\", \"success\")\n log.info(f\"Book {book_id} added to wishlist {wishlist_id}\")",
"def insert_product_wishlist(wishlist_id: UUID, product_id: str):\n wishlist = insert_product_wishlist_service.run(\n wishlist_id=wishlist_id,\n product_id=product_id,\n )\n wishlist_dict = model_to_dict(wishlist)\n products_dict = [asdict(product) for product in wishlist.products]\n wishlist_dict['products'] = products_dict\n\n return wishlist_dict",
"def add_apple():\n items['apple'] = {'description': 'yummy fruit',\n 'health': '+3'\n }",
"def add_item(item):\r\n item_list.append(item)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Competetion between the given two competitors. Their ranks are updated as a result of the competition. If a winner_id is not specified or an id that does not belong to one of the competitors is specified, the winner will be determined semirandomly, with a winner chosen at random, but weighted towards the competitor with the higher rank. Competitors are represented as dicts that include an id and a rank and no other fields. If a competitor does not have an id or rank, then an error will be thrown. The minimum uncertainty and the amount that uncertainty is decreased by can also be specified for updating the uncertainties of each competitor.
|
def compete(comp1, comp2, winner_id=None, decr_uncertainty=0.002,
min_uncertainty=0.05):
# Check that both competitors are valid.
try:
isValidCompetitor(comp1)
isValidCompetitor(comp2)
except:
raise TypeError("Invalid competitor")
# If a winner_id isn't specified, one is semi-randomly determined.
if not winner_id or not winner_id in [comp1["id"], comp2["id"]]:
favored = dict()
unfavored = dict()
if comp1["rank"] > comp2["rank"]:
favored = comp1
unfavored = comp2
else:
favored = comp2
unfavored = comp1
if random.random() >= (favored["rank"] / (favored["rank"] +
unfavored["rank"])):
winner_id = unfavored["id"]
else:
winner_id = favored["id"]
# Assign the winner and loser
winner = dict()
loser = dict()
if winner_id == comp1["id"]:
winner = comp1
loser = comp2
else:
winner = comp2
loser = comp1
# Update ranks and uncertainties
winner, loser = updateRanks(winner, loser, decr_uncertainty=decr_uncertainty,
min_uncertainty=min_uncertainty)
if winner["id"] == comp1["id"]:
return winner, loser, winner_id
return loser, winner, winner_id
|
[
"def updateRanks(winner, loser, decr_uncertainty=0.002, min_uncertainty=0.05):\n # Check that both competitors are valid.\n try:\n isValidCompetitor(winner)\n isValidCompetitor(loser)\n except:\n raise TypeError(\"Invalid competitor\")\n\n # Determine the favored competitor.\n favored = None\n favored_rank = 0\n unfavored_rank = 0\n if winner[\"rank\"] > loser[\"rank\"]:\n favored = winner[\"id\"]\n favored_rank = winner[\"rank\"]\n unfavored_rank = loser[\"rank\"]\n else:\n favored = loser[\"id\"]\n favored_rank = loser[\"rank\"]\n unfavored_rank = winner[\"rank\"]\n\n # Update winner's rank\n uncertainty = 0.10 # Default uncertainty value.\n if \"uncertainty\" in winner:\n uncertainty = winner[\"uncertainty\"]\n if uncertainty > (min_uncertainty + decr_uncertainty):\n winner[\"uncertainty\"] = uncertainty - decr_uncertainty\n else:\n winner[\"uncertainty\"] = min_uncertainty\n if favored == winner[\"id\"]:\n winner[\"rank\"] = favored_rank + (uncertainty * unfavored_rank)\n else:\n winner[\"rank\"] = unfavored_rank + (uncertainty * favored_rank)\n\n # Update loser's rank\n uncertainty = 0.10 # Default uncertainty value.\n if \"uncertainty\" in loser:\n uncertainty = loser[\"uncertainty\"]\n if uncertainty > (min_uncertainty + decr_uncertainty):\n loser[\"uncertainty\"] = uncertainty - decr_uncertainty\n else:\n loser[\"uncertainty\"] = min_uncertainty\n if favored == loser[\"id\"]:\n loser[\"rank\"] = favored_rank - (uncertainty * favored_rank)\n else:\n loser[\"rank\"] = unfavored_rank - (uncertainty * unfavored_rank)\n\n return winner, loser",
"def rankTest():\n # Set default rank and uncertainty.\n base = 1000\n rank = 500\n uncertainty = 0.15\n # Test Competitors.\n competitors = list()\n for i in range(1, 9):\n comp = {\"id\": \"Player_{0}\".format(i), \"rank\": (base + (rank * i)),\n \"uncertainty\": uncertainty}\n competitors.append(comp)\n\n print \"\\n\\n*****INITIAL*****\\n\\n\"\n for comp in competitors:\n print \"ID: {0}, Rank: {1}, Uncertainty: {2}\".format(comp[\"id\"],\n comp[\"rank\"], comp[\"uncertainty\"])\n\n # Loop and print the results of each iteration.\n for i in range(0, 10):\n compA = dict()\n compB = dict()\n while True:\n compA = choice(competitors)\n compB = choice(competitors)\n if not compA[\"id\"] == compB[\"id\"]:\n break\n# print \"\\nBEFORE {0}: {5}: {1}, {2}; {6}: {3}, {4}\".format(i + 1,\n# compA[\"rank\"], compA[\"uncertainty\"], compB[\"rank\"],\n# compB[\"uncertainty\"], compA[\"id\"], compB[\"id\"])\n compA, compB, winner_id = compete(compA, compB, None)\n# print \"AFTER {3}: Winner: {0}, A: {1}; {4}, B: {2}; {5}\".format(winner_id,\n# compA[\"rank\"], compB[\"rank\"], i + 1, compA[\"uncertainty\"],\n# compB[\"uncertainty\"])\n\n print \"\\n\\n*****RESULTS*****\\n\\n\"\n for comp in competitors:\n print \"ID: {0}, Rank: {1}, Uncertainty: {2}\".format(comp[\"id\"],\n comp[\"rank\"], comp[\"uncertainty\"])",
"def set_winner(self, competitor):\n if competitor == self.__left_participant.get_competitor():\n self.__winner.set_competitor(competitor)\n self.__loser.set_competitor(self.__right_participant.get_competitor())\n elif competitor == self.__right_participant.get_competitor():\n self.__winner.set_competitor(competitor)\n self.__loser.set_competitor(self.__left_participant.get_competitor())\n else:\n raise Exception(\"Invalid competitor\")",
"def start_competition(self, competitor_names, randomise=True):\n if randomise:\n random.shuffle(competitor_names)\n \n competitors = pd.DataFrame(competitor_names, columns=['name'])\n competitors['level'] = 0\n competitors['active'] = False\n n_competitors = len(competitors.index)\n assert n_competitors > 1, \"Error: competition initialised with less than two competitors\"\n\n losers = pd.DataFrame(columns=competitors.columns)\n\n #setup db tables\n self.db['competitors'] = competitors\n self.db['losers'] = losers\n if 'match_records' not in self.db.keys():\n self.db['match_records'] = pd.DataFrame(columns= ['winner', 'loser', 'timestamp'])",
"def get_competitor(self, name: str, secret: str, exec_channel: IExecutionChannel) -> Optional[ICompetitor]:\n if name in self.competitors or name not in self.config[\"Traders\"] or self.config[\"Traders\"][name] != secret:\n return None\n\n instrument = self.config[\"Instrument\"]\n limits = self.config[\"Limits\"]\n\n account = CompetitorAccount(instrument[\"TickSize\"], instrument[\"EtfClamp\"])\n competitor = Competitor(name, self, exec_channel, self.future_book, self.etf_book, account, self.match_events,\n limits[\"PositionLimit\"], limits[\"ActiveOrderCountLimit\"], limits[\"ActiveVolumeLimit\"],\n instrument[\"TickSize\"])\n self.competitors[name] = competitor\n\n self.logger.info(\"'%s' is ready!\", name)\n\n if self.start_time != 0.0:\n self.logger.warning(\"competitor logged in after market open: name='%s'\", name)\n competitor.set_start_time(self.start_time)\n\n return competitor",
"def updateRaceState(self, timestamp):\n increases = {}\n for c in self.competitors:\n dynamicDistractions = self.dynamicDistractions(c)\n increase = c.consistency * c.responsiveness * dynamicDistractions * (c.alignment * random.randint(c.speed[0], c.speed[1]))\n increases[c.id] = increase\n\n winners = []\n for c in self.competitors:\n if c in self.injuredCompetitors or c.id in self.finished: continue\n cappedDist = self.calcInterference(c, increases)\n if cappedDist == -1: c.distance = min(self.race_attributes.length, c.distance + increases[c.id])\n else: c.distance = min(self.race_attributes.length, min(cappedDist, c.distance + increases[c.id]))\n # check if moved into next stage of race\n if c.distance >= self.race_attributes.length:\n if self.winner == None:\n winners.append(c.id)\n #self.winner = c.id\n self.winningTimestep = timestamp\n if c.id not in self.finished: self.finished.append(c.id)\n\n if len(winners) > 0:\n r = random.randint(0, len(winners)-1)\n self.winner = winners[r]\n\n # update competitor attributes\n self.updateEnergy(increases)\n self.updateResponsiveness()\n\n self.time_lapsed = time.time() - self.time_lapsed",
"def updateResponsiveness(self):\n\n def injury(self):\n # 1 / 200 chance of 'break down'\n # if random.randint(1, 5000) == 666: return True\n # else: return False\n return False\n\n\n def runningStyleImpact(self, c):\n sortedComps = sorted(self.competitors, key = operator.attrgetter('distance'))\n topThird = []\n middleThird = []\n bottomThird = []\n topRange = (int((NUM_OF_COMPETITORS / 3) * 2), NUM_OF_COMPETITORS)\n middleRange = (int((NUM_OF_COMPETITORS / 3)), int((NUM_OF_COMPETITORS / 3) * 2))\n bottomRange = (0, int((NUM_OF_COMPETITORS / 3)))\n for i in range(bottomRange[0], bottomRange[1]): bottomThird.append(sortedComps[i])\n for i in range(middleRange[0], middleRange[1]): middleThird.append(sortedComps[i])\n for i in range(topRange[0], topRange[1]): topThird.append(sortedComps[i])\n if self.raceSplit['start'][0] <= c.distance <= self.raceSplit['start'][1]:\n if c in topThird:\n if c.running_style == \"frontrunner\" and c.id not in self.runningStyleImpactChanged:\n c.responsiveness = c.responsiveness * random.gauss(1.2, 0.05)\n self.runningStyleImpactChanged.append(c.id)\n if self.raceSplit['middle'][0] <= c.distance <= self.raceSplit['middle'][1]:\n if c in middleThird:\n if c.running_style == \"stalker\" and c.id not in self.runningStyleImpactChanged:\n c.responsiveness = c.responsiveness * random.gauss(1.2, 0.05)\n self.runningStyleImpactChanged.append(c.id)\n if c.running_style == \"frontrunner\" and c.id in self.runningStyleImpactChanged:\n c.responsiveness = c.responsiveness / random.gauss(1.2, 0.05)\n self.runningStyleImpactChanged.remove(c.id)\n if self.raceSplit['end'][0] <= c.distance <= self.raceSplit['end'][1]:\n if c in bottomThird or c in middleThird:\n if c.running_style == \"closer\" and c.id not in self.runningStyleImpactChanged:\n c.responsiveness = c.responsiveness * random.gauss(1.1, 0.05)\n self.runningStyleImpactChanged.append(c.id)\n if c.running_style == \"stalker\" and c.id in self.runningStyleImpactChanged:\n c.responsiveness = c.responsiveness / random.gauss(1.1, 0.05)\n self.runningStyleImpactChanged.remove(c.id)\n\n\n def finalStretch(self, c):\n if c.id in self.finished: return\n if c.distance >= self.race_attributes.length - self.finalStretchDist[self.race_attributes.race_type] and c.id not in self.finalStretchIncreases:\n # in final stretch\n distanceLeft = float(self.race_attributes.length - c.distance)\n energyLeft = int(c.energy / distanceLeft)\n buildUp = energyLeft / distanceLeft\n # multiply buildUp by 2 for more dramatic race events\n self.finalStretchIncreases[c.id] = buildUp * 3\n\n if c.id in self.finalStretchIncreases:\n c.responsiveness = c.responsiveness + self.finalStretchIncreases[c.id]\n\n # if race is long then competitors should have lower responsiveness at start and middle with burst at end\n # if race is short then competitors should have resonably consistent responsiveness throughout\n for c in self.competitors:\n if c in self.injuredCompetitors or c.id in self.finished:\n continue\n if injury(self) == True:\n c.responsiveness = 0\n self.injuredCompetitors.append(c)\n runningStyleImpact(self, c)\n finalStretch(self, c)",
"def c_for_benefit_score(pairs):\n \n # mapping pair outcomes to benefit\n obs_benefit_dict = {\n (0, 0): 0,\n (0, 1): -1,\n (1, 0): 1,\n (1, 1): 0,\n }\n \n # compute observed benefit for each pair\n obs_benefit = [obs_benefit_dict[(i[1], j[1])] for (i, j) in pairs]\n\n # compute average predicted benefit for each pair\n pred_benefit = [np.mean([i[0], j[0]]) for (i, j) in pairs]\n\n concordant_count, permissible_count, risk_tie_count = 0, 0, 0\n\n # iterate over pairs of pairs\n for i in range(len(pairs)):\n for j in range(i + 1, len(pairs)):\n \n # if the observed benefit is different, increment permissible count\n if obs_benefit[i] != obs_benefit[j]:\n\n # increment count of permissible pairs\n permissible_count = permissible_count +1\n \n # if concordant, increment count\n \n if ((pred_benefit[i] > pred_benefit[j] and obs_benefit[i] > obs_benefit[j]) or (pred_benefit[i] < pred_benefit[j] and obs_benefit[i] < obs_benefit[j])): # change to check for concordance\n \n concordant_count = concordant_count + 1\n\n # if risk tie, increment count\n if (pred_benefit[i] == pred_benefit[j]): #change to check for risk ties\n risk_tie_count = risk_tie_count + 1\n\n\n # compute c-statistic-for-benefit\n cstat = (concordant_count + (0.5 * risk_tie_count)) / permissible_count\n\n return cstat",
"def winning_probability(potential_winners: list, candidate: Candidate):\n raise NotImplementedError",
"def coombs_with_data(profile):\n \n num_cands = profile.num_cands \n candidates = profile.candidates\n strict_maj_size = profile.strict_maj_size()\n \n rs, rcounts = profile.rankings_counts # get all the ranking data\n \n cands_to_ignore = np.empty(0)\n\n winners = [c for c in candidates \n if _num_rank_first(rs, rcounts, cands_to_ignore, c) >= strict_maj_size]\n\n elims_list = list()\n while len(winners) == 0:\n \n last_place_scores = {c: _num_rank_last(rs, rcounts, cands_to_ignore, c) for c in candidates \n if not isin(cands_to_ignore,c)} \n max_last_place_score = max(last_place_scores.values())\n greatest_last_place_votes = np.array([c for c in last_place_scores.keys() \n if last_place_scores[c] == max_last_place_score])\n\n elims_list.append(list(greatest_last_place_votes))\n # remove candidates ranked last by the greatest number of voters\n cands_to_ignore = np.concatenate((cands_to_ignore, greatest_last_place_votes), axis=None)\n \n if len(cands_to_ignore) == num_cands:\n winners = list(greatest_last_place_votes)\n else:\n winners = [c for c in candidates \n if not isin(cands_to_ignore,c) and _num_rank_first(rs, rcounts, cands_to_ignore, c) >= strict_maj_size]\n\n return sorted(winners), elims_list",
"def coombs(profile):\n \n num_cands = profile.num_cands \n candidates = profile.candidates\n strict_maj_size = profile.strict_maj_size()\n \n rs, rcounts = profile.rankings_counts # get all the ranking data\n \n cands_to_ignore = np.empty(0)\n\n winners = [c for c in candidates \n if _num_rank_first(rs, rcounts, cands_to_ignore, c) >= strict_maj_size]\n\n while len(winners) == 0:\n \n last_place_scores = {c: _num_rank_last(rs, rcounts, cands_to_ignore, c) for c in candidates \n if not isin(cands_to_ignore,c)} \n max_last_place_score = max(last_place_scores.values())\n greatest_last_place_votes = np.array([c for c in last_place_scores.keys() \n if last_place_scores[c] == max_last_place_score])\n\n # remove candidates ranked last by the greatest number of voters\n cands_to_ignore = np.concatenate((cands_to_ignore, greatest_last_place_votes), axis=None)\n \n if len(cands_to_ignore) == num_cands:\n winners = list(greatest_last_place_votes)\n else:\n winners = [c for c in candidates \n if not isin(cands_to_ignore,c) and _num_rank_first(rs, rcounts, cands_to_ignore, c) >= strict_maj_size]\n\n return sorted(winners)",
"def computerLogic(self, opponent):\n # Initialize Largest Value of most played choice\n largest_value = 0\n\n # Initialize Top Choices to choose from dictionary for computer\n topChoice = {}\n\n for choice, numChoicePlayed in opponent._history.items():\n if numChoicePlayed > largest_value:\n # Create new topChoice dict\n topChoice = {choice: numChoicePlayed}\n\n # Update Largest Value Choice\n largest_value = numChoicePlayed\n\n elif numChoicePlayed == largest_value:\n topChoice[choice] = numChoicePlayed\n\n opponentChoice = random.choice(list(topChoice.keys()))\n computerChoice = self.__optimalChoice__(opponentChoice)\n\n return computerChoice",
"def createCompetitors(self, numOfCompetitors):\n\n # IMPROVEMENT: ONLY ADD COMPETITOR TO POOL IF SUFFICIENTLY CLOSELY\n # ALIGNED TO RACE CONDIITIONS, ie. create competitor, check alignment,\n # if less than a sys constant variable then don't append- continue loop\n\n print('CREATING COMPS')\n\n comps = []\n for i in range(numOfCompetitors):\n found = False\n while(found == False):\n c = Competitor(i, self.race_attributes)\n if c.alignment >= 0.95:\n found = True\n comps.append(c)\n if c.alignment<0:\n print('NEGATIVE')\n\n return comps",
"def registerCompetitor(tournament_id, competitor_id):\n dbconnection = connect()\n dbcursor = dbconnection.cursor()\n\n dbcursor.execute(\"\"\"INSERT INTO competitors (tournament_id, competitor_id,\n competitor_bye)\n VALUES (%s, %s, %s);\"\"\",\n (tournament_id, competitor_id, False,))\n\n dbconnection.commit()\n dbconnection.close()",
"def define_critic_joint(self, joint_name):\n self.picked_actions[joint_name] = tf.placeholder(shape=(None,), dtype=tf.int32, name=\"picked_action_{}\".format(joint_name))\n imputs = []\n _2_frames = self.turn_2_frames_vergence_on and joint_name == \"vergence\"\n if _2_frames:\n inputs = [tf.stop_gradient(self.scale_latent_conv_2_frames[ratio]) for ratio in self.ratios]\n else:\n inputs = [tf.stop_gradient(self.scale_latent_conv_4_frames[ratio]) for ratio in self.ratios]\n flats = []\n for inp in inputs:\n conv1 = tl.conv2d(inp, 32 if _2_frames else 64, 2, 1, \"valid\", activation_fn=lrelu)\n pool1 = tl.max_pool2d(conv1, 2, 2, \"valid\")\n size = np.prod(pool1.get_shape()[1:])\n flats.append(tf.reshape(pool1, (-1, size)))\n flat = tf.concat(flats, axis=-1)\n fc1 = tl.fully_connected(flat, 200, activation_fn=lrelu)\n critic_values = tl.fully_connected(fc1, self.n_actions_per_joint, activation_fn=None)\n self.critic_values[joint_name] = critic_values\n self.returns[joint_name] = tf.placeholder(shape=critic_values.get_shape()[:1], dtype=tf.float32, name=\"return_{}\".format(joint_name))\n actions = self.picked_actions[joint_name]\n indices = tf.stack([tf.range(tf.shape(actions)[0]), actions], axis=1)\n self.critic_values_picked_actions[joint_name] = tf.gather_nd(critic_values, indices)\n self.critic_loss[joint_name] = tf.losses.huber_loss(\n self.returns[joint_name] * self.reward_scaling_factor,\n self.critic_values_picked_actions[joint_name],\n delta=0.5\n )\n ### ACTIONS:\n self.greedy_actions_indices[joint_name] = tf.cast(tf.argmax(self.critic_values[joint_name], axis=1), dtype=tf.int32)\n shape = tf.shape(self.greedy_actions_indices[joint_name])\n condition = tf.greater(tf.random_uniform(shape=shape), self.epsilon)\n random = tf.random_uniform(shape=shape, maxval=self.n_actions_per_joint, dtype=tf.int32)\n self.sampled_actions_indices[joint_name] = tf.where(condition, x=self.greedy_actions_indices[joint_name], y=random)\n # summaries\n summaries = []\n mean_abs_return = tf.reduce_mean(tf.abs(self.returns[joint_name] * self.reward_scaling_factor))\n summaries.append(tf.summary.scalar(\"/joint/{}/mean_abs_return\".format(joint_name), mean_abs_return))\n mean, var = tf.nn.moments(tf.abs(self.critic_values_picked_actions[joint_name] - self.returns[joint_name] * self.reward_scaling_factor), axes=[0])\n summaries.append(tf.summary.scalar(\"/joint/{}/mean_abs_distance\".format(joint_name), mean))\n summaries.append(tf.summary.scalar(\"/joint/{}/std_abs_distance\".format(joint_name), tf.sqrt(var)))\n summaries.append(tf.summary.histogram(\"return_{}\".format(joint_name), self.returns[joint_name]))\n self.joint_summary[joint_name] = tf.summary.merge(summaries)",
"def ranking():\n \n # Make sure my-team is set. Error if not.\n if session.get('my-team') in [None, '@@']:\n return {'success': 0, 'errors': \"You haven't selected a team as YOUR team.\", \"data\": list()}, 400\n \n # Get the raw scores for all non-excluded teams.\n raw_data = [db.row_to_dict(r) for r in db.query_db(\"select * from raw_scores left join teams on raw_scores.teamId = teams.teamId where teams.exclude=0\")]\n \n # Split into rounds\n rounds = clu.split_into_rounds(raw_data, round_col='round')\n \n # Calculate scores\n raw_scores = list()\n \n point_vals = {\n 'autonomous': session.get('params.autonomous-points', 1), \n 'climb': session.get('params.climb-points', 1),\n 'spin_by_colour': session.get('params.spin-by-col-points', 1),\n 'spin_by_rotate': session.get('params.spin-by-rot-points', 1)\n }\n \n missing_my_team = set()\n for rnd in rounds.values():\n try:\n rnd_scores = clu.calc_scores_for_round(rnd, \n us_id=session['my-team'], \n id_col='teamId',\n point_values=point_vals, \n zero_balls=session.get('params.zero-balls', 0),\n balls_low_col='low_balls', \n balls_high_col='high_balls',\n auto_col='autonomous', \n climb_col='climb', \n spin_clr_col='spin_by_colour',\n spin_rot_col='spin_by_rotate', \n round_col='round')\n raw_scores.extend(rnd_scores)\n except ValueError: # My team not in this round -- ignore for now.\n missing_my_team.add(rnd[0]['round'])\n\n # Aggregate scores\n ag_scores = pd.DataFrame(clu.aggregate_scores(raw_scores), columns=['pair', 'score', 'std_dev', 'adj_score'])\n total = len(ag_scores)\n\n rqv = request.values # saves me some typing.\n \n # Ordering\n if 'order[0][column]' in rqv:\n col = rqv['order[0][column]']\n col_name = rqv['columns[{}][name]'.format(col)]\n asc = rqv['order[0][dir]'] not in ['dsc', 'des', 'desc']\n ag_scores = ag_scores.sort_values(by=[col_name], ascending=asc)\n \n # Filter ...\n if ('search[value]' in rqv) and rqv['search[value]'].strip():\n sv = rqv['search[value]'].strip()\n ag_scores = ag_scores[[sv in str(x) for x in ag_scores['pair'].to_list()]]\n\n filtered = len(ag_scores)\n \n # Any searching / filtering?\n if 'start' in rqv:\n ag_scores = ag_scores[int(rqv['start']):]\n if 'length' in rqv:\n ag_scores = ag_scores[:int(rqv['length'])]\n \n return {'success': 1,\n 'warning': \"My Team not set in round(s): {}\".format(missing_my_team) if missing_my_team else None,\n 'data': ag_scores.to_dict(orient='records'),\n 'rounds': len(rounds),\n \"recordsTotal\": total,\n \"recordsFiltered\": filtered,\n }, 200",
"def isValidCompetitor(comp):\n if not isinstance(comp, dict):\n raise TypeError(\"Invalid object; competitor must be a dict\")\n if not \"id\" in comp:\n raise KeyError(\"Invalid competitor; competitor must include an id\")\n if not \"rank\" in comp:\n raise KeyError(\"Invalid competitor; competitor must include a rank\")\n if not comp[\"id\"]:\n raise LookupError(\"Invalid competitor; competitor must include an id\")\n if not comp[\"rank\"]:\n raise LookupError(\"Invalid competitor; competitor must include a rank\")\n if not comp[\"rank\"] > 0:\n raise LookupError(\"Invalid rank; rank must be greater than 0\")\n\n return True",
"def retrieve_player_stats(player1,player2,date,r,sur,year):\n\t#COMMON OPPONENTS APPROACH\n\t#print(\"Retrieving data about {} with respect to {} for matches before {}...\".format(player1,player2,date))\n\t\n\t#TIME DISCOUNTING\n\t#we try to give higher weight to most recent matches\n\t#to do so, we select the rows of interest AND the difference (in years) from the present date which will serve as weight\n\n\t####\n\t#games played by player1 in the most recent 5 years\n\tg1=df[((df[\"winner_name\"]==player1) | (df[\"loser_name\"]==player1)) & ((df[\"tourney_date\"]<date) | (\\\n\t\t(df[\"tourney_date\"]==date) & (df[\"round\"]<r))) & (year-df[\"year\"]<=5)]\n\t\n\tow=list(g1.loc[(g1.winner_name==player1, 'loser_name')].values[:])\n\tol=list(g1.loc[(g1.loser_name==player1, 'winner_name') ].values[:])\n\to1=set(ow+ol) #player 1 opponents\n\n\t#games played by player2\n\tg2=df[((df[\"winner_name\"]==player2) | (df[\"loser_name\"]==player2)) & ((df[\"tourney_date\"]<date) | (\\\n\t\t(df[\"tourney_date\"]==date) & (df[\"round\"]<r))) & (year-df[\"year\"]<=5)]\n\t\n\tow=list(g2.loc[(df.winner_name==player2, 'loser_name')].values[:])\n\tol=list(g2.loc[(df.loser_name==player2, 'winner_name') ].values[:])\n\to2=set(ow+ol) #player 2 opponents\n\n\t#list of common opponents \n\tco=[x for x in o1 if x in o2]\n\t#print(\"Common opponents in the last 5 years:\")\n\t#print(co)\n\n\tcolumn_names=[\"fs\",\"w1sp\",\"w2sp\",\"wsp\",\"wrp\",\"tpw\",\"aces\",\"df\",\"bpc\",\"bps\",\"bpo\",\"bpw\",\"tmw\",\"data_amount\",\"opponent\",]\n\taverages=pd.DataFrame(columns=column_names) #df to be filled with one row per opponent\n\t\n\tif len(co)>=5:\n\t\t\n\t\tcount=0\n\t\t#now evaluate average statistics of player1 wrt to each common opponent, then we'll do the average\n\t\tfor o in co:\n\t\t\t#print(\"Matches of {} vs {}...\".format(player1,o))\n\t\t\ttot_w=0\n\t\t\ttot_l=0\n\n\t\t\t#select matches of player 1 vs opponent o\n\t\t\tm=df[((((df[\"winner_name\"]==player1) & (df[\"loser_name\"]==o))) | ((df[\"winner_name\"]==o) & (df[\"loser_name\"]==player1))) & \\\n\t\t\t((df[\"tourney_date\"]<date) | ((df[\"tourney_date\"]==date) & (df[\"round\"]<r))) & (year-df[\"year\"]<=5)]\n\t\t\tif m.shape[0] > 0:\n\t\t\t\t#we have min 2 past matches against opponent o\n\t\t\t\t#won matches\n\t\t\t\tw=m[m[\"winner_name\"]==player1].loc[:,['w_fs', 'w_w1s', 'w_w2s', 'w_wsp', 'w_wrp', 'w_tpw', 'w_apg', 'w_dfpg', 'w_bppg', 'w_bps', 'l_bppg', 'l_bps', 'loser_name',\\\n\t\t\t\t'tourney_date','surface']].rename(columns={'w_fs':'fs','w_w1s':'w1s','w_w2s':'w2s','w_wsp':'wsp','w_wrp':'wrp','w_tpw':'tpw','w_apg':'apg','w_dfpg':'dfpg','w_bppg':'bppg',\\\n\t\t\t\t'w_bps':'bps','l_bppg':'bpo','l_bps':'l_bps','loser_name':'opponent', 'tourney_date':'date','surface':'s'})\n\t\t\t\tif w.shape[0]>0:\n\t\t\t\t\tw[\"bpc\"]=w.apply(lambda row: 1-row[\"l_bps\"],axis=1)\n\t\t\t\t\t#set year difference param.\n\t\t\t\t\tw[\"year_diff\"]=w.apply(lambda row: int(date.year-row[\"date\"].year), axis=1)\n\n\t\t\t\t\ttot_w=w.shape[0]\n\t\t\t\tw=w.drop(\"l_bps\", axis=1)\n\n\t\t\t\t#lost matches\n\t\t\t\tl=m[m[\"loser_name\"]==player1].loc[:,['l_fs', 'l_w1s', 'l_w2s', 'l_wsp', 'l_wrp', 'l_tpw', 'l_apg', 'l_dfpg', 'l_bppg', 'l_bps', 'w_bppg', 'w_bps', 'winner_name',\\\n\t\t\t\t'tourney_date','surface']].rename(columns={'l_fs':'fs','l_w1s':'w1s','l_w2s':'w2s','l_wsp':'wsp','l_wrp':'wrp','l_tpw':'tpw','l_apg':'apg','l_dfpg':'dfpg','l_bppg':'bppg',\\\n\t\t\t\t'l_bps':'bps','w_bppg':'bpo','w_bps':'w_bps','winner_name':'opponent','tourney_date':'date','surface':'s'})\n\t\t\t\tif l.shape[0]>0:\n\t\t\t\t\tl[\"bpc\"]=l.apply(lambda row: 1-row[\"w_bps\"],axis=1)\n\t\t\t\t\t\n\t\t\t\t\tl[\"year_diff\"]=l.apply(lambda row: int(date.year-row[\"date\"].year), axis=1)\n\n\t\t\t\t\ttot_l=l.shape[0]\n\t\t\t\t\t\n\t\t\t\tl=l.drop(\"w_bps\", axis=1)\n\n\t\t\t\t#join the two datframes, so that we have all the matches\n\t\t\t\tj = pd.concat([w, l],sort=False)\n\t\t\t\t#weight for surface\n\t\t\t\tj[\"s_ref\"]=j.apply(lambda row: sur,axis=1) #reference surface of match under study\n\t\t\t\tj[\"s_w\"]=j.apply(surface_weighting,axis=1) #surface weight of each previous match\n\t\t\t\tj=j.drop(\"s\", axis=1) #not useful anymore\n\n\t\t\t\t#assign weight which decreases as year_diff is higher\n\t\t\t\tj[\"discounting\"]=j.apply(time_discount,axis=1)\n\t\t\t\t#further multiply time weights by surface weights\n\t\t\t\tj[\"discounting\"]=j.apply(lambda row: row[\"discounting\"]*row[\"s_w\"],axis=1)\n\t\t\t\tj=j.drop(\"s_ref\", axis=1)\n\t\t\t\tj=j.drop(\"s_w\", axis=1)\n\t\t\t\tj=j.drop(\"year_diff\", axis=1)\n\n\t\t\t\t#print(j)\n\t\t\t\ttot_weights=j[\"discounting\"].sum()\n\t\t\t\t#normalize weights to sum to 1\n\t\t\t\tj[\"discounting\"]=j.apply(lambda row: row[\"discounting\"]/j[\"discounting\"].sum(),axis=1)\n\t\t\t\t#print(j)\n\t\t\t\t#weight all the matches for the discounting param\n\t\t\t\t#hence, multiply columns 0-11 for column \"discounting\"\n\t\t\t\tj.update(j.iloc[:, 0:11].mul(j.discounting, 0))\n\t\t\t\tj[\"bpc\"]=j.apply(lambda row: row[\"bpc\"]*row[\"discounting\"],axis=1)\n\t\t\t\t#now to have the weghted average of each stat, sum all the column\n\t\t\t\tavg=list(j.sum(axis=0,numeric_only=True)[0:12])\n\t\t\t\tavg.append(tot_w/(tot_w+tot_l)) #append % of matches won against o\n\t\t\t\t#UNCERTAINTY\n\t\t\t\t#print(\"Uncertainty: 1/{}\".format(tot_weights))\n\t\t\t\tavg.append(tot_weights) #add \"data amount\" CHANGED FROM BEFORE!!\n\t\t\t\tavg.append(o)\n\t \t\t\n\t \t\t#NOW we have data for past matches of player1 against common opponent o\n\t\t\t\t#add to dataframe, go to next one\n\t\t\t\taverages.loc[count]=avg\n\t\t\t\tcount+=1\n\n\t\t\t\t#print(j)\n\t\t\t\n\t\t\t\n\t#at the end of the loop, return the dataframe\n\t#in the outer function, compute general uncertainties with data of the two players combined, \n\t#then evaluate average statistics btw all the common opponents for each player - finally, build the ultimate feature vector\n\t#print(averages)\n\treturn averages",
"def update_user_ranking(winner_id,movie_ranks,user_id): \n #winning movie rank increases by 1\n movie_ranks[winner_id] += 1\n #update sql database with new rankings\n cnx = mysql.connector.connect(user='root', password='Tnci12!UHbs94',\n database = 'moviematchupdb', host='localhost')\n winner = cnx.cursor()\n winner_movie = (\"\"\"UPDATE user_rankings\n SET movie_rank = %s\n WHERE user_id = %s and movie_id = %s;\"\"\")\n winner.execute(winner_movie,(movie_ranks[winner_id],user_id,winner_id))\n cnx.commit()\n winner.close()\n cnx.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks whether the given object is a valid competitor that includes an id and a rank. If the object is not a valid an error will be thrown.
|
def isValidCompetitor(comp):
if not isinstance(comp, dict):
raise TypeError("Invalid object; competitor must be a dict")
if not "id" in comp:
raise KeyError("Invalid competitor; competitor must include an id")
if not "rank" in comp:
raise KeyError("Invalid competitor; competitor must include a rank")
if not comp["id"]:
raise LookupError("Invalid competitor; competitor must include an id")
if not comp["rank"]:
raise LookupError("Invalid competitor; competitor must include a rank")
if not comp["rank"] > 0:
raise LookupError("Invalid rank; rank must be greater than 0")
return True
|
[
"def valid_object(obj):\n return cmds.objExists(obj)",
"def validate_object_id(object_id):\n\n rex = re.compile('^[A-Za-z0-9-]+$')\n if is_it_a_string(object_id):\n if not rex.match(object_id):\n msg = \"Object ID failed validation: {}\".format(object_id)\n raise CloudPassageValidation(msg)\n else:\n return True\n elif isinstance(object_id, list):\n for individual in object_id:\n if not rex.match(individual):\n msg = \"Object ID failed validation: {}\".format(object_id)\n raise CloudPassageValidation(msg)\n return True\n else:\n msg = \"Wrong type for object ID: {}\".format(type(object_id))\n raise TypeError(msg)",
"def is_valid(self, object_name):\n try:\n resp = self.get_object_id(object_name)\n return True\n except FacebookError as e:\n return False",
"def oidIsValid(oid):\r\n try:\r\n ObjectId(oid)\r\n return True\r\n except:\r\n return False",
"def validate_object(self, objectString, target, isStatus = False):\n # example 5 > 2\n targetObject = target \n\n if objectString.isdigit():\n return True\n \n #list of valid terms other than digit i.e d20\n if re.search(self._regexForDice, objectString) is not None:\n return True\n\n #special case where the target is the point\n if target == \"point\":\n targetObject = self._entityTarget\n \n if isStatus != False:\n return self.is_valid_status(target, objectString)\n\n # entity.hp > 4\n if objectString.find(targetObject) != self._cannotFindSubstring and objectString.find(\".\") != self._cannotFindSubstring:\n dotSplitter = objectString.find(\".\")\n ourObject = objectString[:dotSplitter]\n objectAttribute = objectString[dotSplitter + 1:]\n if ourObject == \"target\" or ourObject == \"entity\" or ourObject == \"self\":\n return self.validate_generic_object_attribute(ourObject.strip(), objectAttribute.strip())\n else:\n return self.validate_object_attribute_or_action(ourObject.strip(), objectAttribute.strip())\n \n if objectString in self._variables_list:\n return True\n\n if objectString == \"target\" or objectString == \"self\" or objectString == \"entity\":\n return True\n\n return False",
"def test_valid_position(self, obj, pos, orn=None):\n\t\tis_robot = isinstance(obj, BaseRobot)\n\n\t\tself.set_pos_orn_with_z_offset(obj, pos, orn)\n\t\t#self.set_pos_orn(obj, pos, orn)\n\n\t\tif is_robot:\n\t\t\tobj.robot_specific_reset()\n\t\t\tobj.keep_still()\n\n\t\tbody_id = obj.robot_ids[0] if is_robot else obj.body_id\n\t\thas_collision = self.check_collision(body_id)\n\t\treturn has_collision",
"def check_contribution_object(case, classes, contributions):\n if case == \"regression\" and isinstance(contributions, (np.ndarray, pd.DataFrame)) == False:\n raise ValueError(\n \"\"\"\n Type of contributions parameter specified is not compatible with \n regression model.\n Please check model and contributions parameters. \n \"\"\"\n )\n elif case == \"classification\":\n if isinstance(contributions, list):\n if len(contributions) != len(classes):\n raise ValueError(\n \"\"\"\n Length of list of contributions parameter is not equal\n to the number of classes in the target.\n Please check model and contributions parameters.\n \"\"\"\n )\n else:\n raise ValueError(\n \"\"\"\n Type of contributions parameter specified is not compatible with \n classification model.\n Please check model and contributions parameters.\n \"\"\"\n )",
"def check_rank(shape, required_rank):\n return get_rank(shape) == required_rank",
"def check_type(obj, obj_type, message=None):\n if not isinstance(obj, obj_type):\n if message is None:\n message = \"Check failed. Object is of type %s, expected %s.\" % (str(type(obj)), str(obj_type))\n check_failed(message)",
"def _check_is_int(obj):\n return isinstance(obj, int)",
"def is_posn(obj):\n return (isinstance(obj, (list, tuple)) and\n len(obj) == 2 and\n isinstance(obj[0], int) and\n isinstance(obj[1], int))",
"def verify_ranked_choice_ballot(position):\n logging.info('Verifying ranked choice ballot.')\n election_position = models.RankedVotingPosition.get(position['id'])\n if not election_position:\n logging.info('No election position found in models.')\n return False\n assert election_position.position_type == 'Ranked-Choice'\n \n required = election_position.vote_required\n election_position_candidates = models.ElectionPositionCandidate.gql('WHERE election_position=:1 AND written_in=False',\n election_position)\n num_ranks_required = election_position_candidates.count()\n write_in_slots_allowed = election_position.write_in_slots\n write_in_slots_used = 0\n ranks = []\n candidates_verified = {}\n for election_position_candidate in election_position_candidates:\n candidates_verified[str(election_position_candidate.key())] = False\n for candidate_ranking in position['candidate_rankings']:\n if not candidate_ranking['rank']:\n if required: \n logging.info('Ranking required but not provided')\n return False # Ranking required but not provided\n else: return None # Empty ballot\n else:\n ranks.append(candidate_ranking['rank'])\n candidates_verified[candidate_ranking['id']] = True\n if candidate_ranking['id'].startswith('write-in'):\n if not write_in_slots_allowed:\n logging.info('Write-in not allowed.')\n return False # Write in not allowed\n elif candidate_ranking['rank']:\n num_ranks_required += 1\n write_in_slots_used += 1\n else:\n logging.info('Write in was specified but not ranked')\n return False # Write in was specified but not ranked\n \n for verified in candidates_verified.values():\n if not verified: \n logging.info('Not all candidates verified')\n return False # Not all candidates verified\n ranks.sort()\n logging.info(\"Verifying ranks.\")\n if len(ranks) == 0 and not required: return True\n if ranks[0] != 1 or ranks[len(ranks)-1] != num_ranks_required:\n logging.info(num_ranks_required)\n logging.info(ranks)\n logging.warning(\"Number of rankings don't match\")\n return False # Number of rankings don't match\n if write_in_slots_used > write_in_slots_allowed: \n logging.warning(\"More write-in slots used than allowed\")\n return False\n logging.info('Ballot for position %s verified.', election_position.position.name)\n return True",
"def _validate_orgid(ctx, orgid, value):\n try:\n click.echo(f\"Validating Organization ID {value}\")\n apikey = ctx.obj.get(\"apikey\")\n m = meraki.DashboardAPI(\n api_key=apikey, print_console=False, output_log=False, suppress_logging=True\n )\n networks = m.organizations.getOrganizationNetworks(value, total_pages=\"all\")\n return value\n except:\n m = meraki.DashboardAPI(\n api_key=apikey, print_console=False, output_log=False, suppress_logging=True\n )\n orgs = m.organizations.getOrganizations()\n table = Table(show_header=True, header_style=\"bold magenta\")\n table.add_column(\"ORGANIZATION ID\")\n table.add_column(\"ORGANIZATION NAME\")\n for org in orgs:\n table.add_row(\n org.get(\"id\"),\n org.get(\"name\"),\n )\n console = Console()\n console.print(table)\n raise click.BadParameter(f\"Provide a valid Organization ID\\n\")",
"def isPlayerIDValid(playerID):\r\n playersColl = getPlayersColl()\r\n result = False\r\n if oidIsValid(playerID):\r\n pp = playersColl.find_one({'_id': playerID})\r\n result = (pp != None)\r\n return result",
"def _validate_int(obj):\n m = int(obj + 0) # May raise TypeError.\n if obj != m:\n raise ValueError('expected an integer but got %r' % obj)",
"def validate(obj, obj_type):\n # TODO: Deprecate or move. #283\n from plotly import graph_reference\n from plotly.graph_objs import graph_objs\n\n if obj_type not in graph_reference.CLASSES:\n obj_type = graph_reference.string_to_class_name(obj_type)\n\n try:\n cls = getattr(graph_objs, obj_type)\n except AttributeError:\n raise exceptions.PlotlyError(\n \"'{0}' is not a recognizable graph_obj.\".\n format(obj_type))\n cls(obj) # this will raise on invalid keys/items",
"def validate_id_or_name(self, obj_type, id_or_name):\n\n obj = getattr(self.endpoint, pluralize(obj_type))\n\n try:\n id = int(id_or_name)\n return obj[id].id\n\n except ValueError:\n # can't convert string to int, try lookup by name\n matches = [x[1] for x in obj.filter(\n \"name='%s'\" % id_or_name).items()]\n if len(matches) == 1:\n return matches[0].id\n elif len(matches) == 0:\n raise ValueError('No %s found for id or name %s' %\n (obj_type, id_or_name))\n elif len(matches) > 1:\n\n match_string = \"\\n\".join(map(str, matches))\n raise ValueError(\"Multiple %ss matched name %s, \"\n \"please specify an ID \"\n \"instead.\\n\\nMatches:\\n%s\" %\n (obj_type, id_or_name, match_string))\n\n except KeyError:\n #obj[id] lookup failed, so ID is an int but not a valid ID.\n raise ValueError('No %s found for ID %s' % (obj_type, id_or_name))",
"def rank(self, obj):\n if obj == self._object:\n return 0\n else:\n raise \"Not a correct object\"",
"async def verify_club_access(\n id_or_idclub: Union[int, str], idnumber: int, role: ClubRoleNature\n) -> bool:\n idnumber = int(idnumber)\n logger.debug(f\"verify {id_or_idclub} {idnumber} {role}\")\n if isinstance(id_or_idclub, str):\n try:\n club = await get_club(id_or_idclub)\n except RdNotFound:\n club = None\n else:\n club = await find_club(id_or_idclub)\n logger.debug(f\"club in verify {club.idclub}\")\n if club and club.clubroles:\n for r in club.clubroles:\n logger.debug(f\"r: {r.nature} {r.memberlist}\")\n if role == r.nature:\n if idnumber in r.memberlist:\n return True\n else:\n logger.debug(f\"member not in list {r.nature}\")\n raise RdForbidden"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Updates the ranks of the winner and loser. The amount that each's rank is changed is based on which competitor was favored. An optional uncertainty value is also checked to determine the amount that each's rank is altered. If an uncertainty value is not specified, a value of 10% (0.10) will be used. Uncertainty values will be decreased by the specified amount, if an amount is not specified, they will be decreased by 0.2% (0.002) down to a minimum uncertainty value that will be 5% (0.05) by default.
|
def updateRanks(winner, loser, decr_uncertainty=0.002, min_uncertainty=0.05):
# Check that both competitors are valid.
try:
isValidCompetitor(winner)
isValidCompetitor(loser)
except:
raise TypeError("Invalid competitor")
# Determine the favored competitor.
favored = None
favored_rank = 0
unfavored_rank = 0
if winner["rank"] > loser["rank"]:
favored = winner["id"]
favored_rank = winner["rank"]
unfavored_rank = loser["rank"]
else:
favored = loser["id"]
favored_rank = loser["rank"]
unfavored_rank = winner["rank"]
# Update winner's rank
uncertainty = 0.10 # Default uncertainty value.
if "uncertainty" in winner:
uncertainty = winner["uncertainty"]
if uncertainty > (min_uncertainty + decr_uncertainty):
winner["uncertainty"] = uncertainty - decr_uncertainty
else:
winner["uncertainty"] = min_uncertainty
if favored == winner["id"]:
winner["rank"] = favored_rank + (uncertainty * unfavored_rank)
else:
winner["rank"] = unfavored_rank + (uncertainty * favored_rank)
# Update loser's rank
uncertainty = 0.10 # Default uncertainty value.
if "uncertainty" in loser:
uncertainty = loser["uncertainty"]
if uncertainty > (min_uncertainty + decr_uncertainty):
loser["uncertainty"] = uncertainty - decr_uncertainty
else:
loser["uncertainty"] = min_uncertainty
if favored == loser["id"]:
loser["rank"] = favored_rank - (uncertainty * favored_rank)
else:
loser["rank"] = unfavored_rank - (uncertainty * unfavored_rank)
return winner, loser
|
[
"def update_ranking(self):\n self.ranking = self.calculate_ranking()\n self.last_ranked_date = datetime.datetime.now()\n # TODO: should this save?\n self.save()",
"def rankChange(rank1,rank2,PlayerOutcome):\n deltaWin = -48.0*(math.atan((rank1 - rank2)*1.0/1800.0))/math.pi + 24\n deltaLose = deltaWin - 48\n if (PlayerOutcome == 1): return round(deltaWin)\n elif (PlayerOutcome == -1): return round(deltaLose)\n else: return round((deltaWin+deltaLose)/2)",
"def update_rank(self, rank, balled=False):\n if balled:\n # get current maximum rank\n max = UserProfile.objects.filter(active=True).aggregate(max_rank=Max('rank'))\n self.rank = max['max_rank']\n\n # get all profiles above 'rank' (the losers rank) and move them up 1\n for profile in UserProfile.objects.filter(rank__gt=rank):\n profile.rank -= 1\n profile.save()\n\n # also need to alter the pending matches\n for challenge in Match.objects.filter(\n played__isnull=True\n ).filter(\n Q(opponent__userprofile__rank__gt=rank) | Q(challenger__userprofile__rank__gt=rank)\n ):\n if challenge.challenger_rank > rank:\n challenge.challenger_rank -= 1\n\n if challenge.opponent_rank > rank:\n challenge.opponent_rank -= 1\n\n challenge.save()\n\n # set movement to 100 (balled)\n self.movement = 100\n self.save()\n return\n\n self.movement = rank - self.rank\n self.rank = rank\n self.save()",
"def modify_score(self, score):\n self.opponents[0][1] += float(score)\n self.opponents[1][1] += 1 - float(score)\n if float(score) == 1:\n self.winner = f\"{self.opponents[0][0].last_name} {self.opponents[0][0].first_name}\"\n elif float(score) == 0.5:\n self.winner = \"match nul\"\n else:\n self.winner = f\"{self.opponents[1][0].last_name} {self.opponents[1][0].first_name}\"",
"def test_mean_rank_correct_value(self) -> None:\n\n engine = mock.Mock()\n\n engine.user_factors = pd.DataFrame(\n {0: [0.5, 0.1, 0.9], 1: [0.1, 0.2, 0.5]}, index=[\"u1\", \"u2\", \"u3\"]\n )\n\n engine.item_factors = pd.DataFrame(\n {0: [0.1, 0.2, 0.3, 0.3], 1: [0.5, 0.9, 0.9, 0.7]},\n index=[\"i1\", \"i2\", \"i3\", \"i4\"],\n )\n\n test_interactions = pd.DataFrame(\n {\n \"user_id\": [\"u1\", \"u1\", \"u1\", \"u2\", \"u2\", \"u3\", \"u3\", \"u3\", \"u3\"],\n \"item_id\": [\"i1\", \"i2\", \"i4\", \"i2\", \"i3\", \"i1\", \"i2\", \"i3\", \"i4\"],\n \"feedback\": [4, 2, 1, 5, 10, 9, 8, 12, 8],\n }\n )\n\n # R =\n # 4 2 0 1\n # 0 5 10 0\n # 9 8 12 8\n\n # X * Y^T =\n # 1.00 0.75 0.25 0.50\n # 0.11 0.20 0.21 0.17\n # ą0.34 0.63 0.72 0.62\n\n # X * Y^T ranks\n # 1.00 0.75 0.25 0.50\n # 1.00 0.50 0.25 0.75\n # 1.00 0.50 0.25 0.75\n\n result = mean_rank(\n interactions=test_interactions,\n user_column=\"user_id\",\n item_column=\"item_id\",\n feedback_column=\"feedback\",\n engine=engine,\n )\n\n expected = 33 / 59\n self.assertEqual(result, expected)",
"def update_race_winner(self):\n self.data_source.update_race_winner(self.race)",
"def rankTest():\n # Set default rank and uncertainty.\n base = 1000\n rank = 500\n uncertainty = 0.15\n # Test Competitors.\n competitors = list()\n for i in range(1, 9):\n comp = {\"id\": \"Player_{0}\".format(i), \"rank\": (base + (rank * i)),\n \"uncertainty\": uncertainty}\n competitors.append(comp)\n\n print \"\\n\\n*****INITIAL*****\\n\\n\"\n for comp in competitors:\n print \"ID: {0}, Rank: {1}, Uncertainty: {2}\".format(comp[\"id\"],\n comp[\"rank\"], comp[\"uncertainty\"])\n\n # Loop and print the results of each iteration.\n for i in range(0, 10):\n compA = dict()\n compB = dict()\n while True:\n compA = choice(competitors)\n compB = choice(competitors)\n if not compA[\"id\"] == compB[\"id\"]:\n break\n# print \"\\nBEFORE {0}: {5}: {1}, {2}; {6}: {3}, {4}\".format(i + 1,\n# compA[\"rank\"], compA[\"uncertainty\"], compB[\"rank\"],\n# compB[\"uncertainty\"], compA[\"id\"], compB[\"id\"])\n compA, compB, winner_id = compete(compA, compB, None)\n# print \"AFTER {3}: Winner: {0}, A: {1}; {4}, B: {2}; {5}\".format(winner_id,\n# compA[\"rank\"], compB[\"rank\"], i + 1, compA[\"uncertainty\"],\n# compB[\"uncertainty\"])\n\n print \"\\n\\n*****RESULTS*****\\n\\n\"\n for comp in competitors:\n print \"ID: {0}, Rank: {1}, Uncertainty: {2}\".format(comp[\"id\"],\n comp[\"rank\"], comp[\"uncertainty\"])",
"def testOverallRankForCurrentRound(self):\n test_utils.set_competition_round()\n\n user = User(username=\"test_user\", password=\"changeme\")\n user.save()\n\n profile = user.get_profile()\n top_user = player_mgr.points_leader()\n profile.add_points(top_user.points() + 1, datetime.datetime.today(),\n \"Test\")\n profile.save()\n\n self.assertEqual(profile.current_round_overall_rank(), 1,\n \"Check that the user is number 1.\")\n\n user2 = User(username=\"test_user2\", password=\"changeme\")\n user2.save()\n\n profile2 = user2.get_profile()\n profile2.add_points(profile.points() + 1, datetime.datetime.today(),\n \"Test\")\n profile2.save()\n\n self.assertEqual(profile.current_round_overall_rank(), 2,\n \"Check that the user is now number 2.\")",
"def rank_dependent_metrics(self):\n rank = self.last_extr_aut.nbS\n self.ranks.append(rank)\n print(\"Metrics for rank {0} :\".format(rank))\n self.y_test_extr = [self.last_extr_aut.val(w) for w in self.x_test]\n self.y_rnnw_extr = [self.last_extr_aut.val(w) for w in self.x_rnnw]\n self.y_test_extr_prefixes = proba_all_prefixes_aut(self.last_extr_aut, self.x_test)\n self.y_rnnw_extr_prefixes = proba_all_prefixes_aut(self.last_extr_aut, self.x_rnnw)\n self.kld_test_rnn_extr = scores.kullback_leibler(self.y_test_rnn, self.fix_probas(self.y_test_extr))\n self.ndcg1_test_rnn_extr = scores.ndcg(self.x_test, self.rnn_model, self.last_extr_aut, ndcg_l=1,\n dic_ref=self.y_test_rnn_prefixes, dic_approx=self.y_test_extr_prefixes)\n self.ndcg1_rnnw_rnn_extr = scores.ndcg(self.x_rnnw, self.rnn_model, self.last_extr_aut, ndcg_l=1,\n dic_ref=self.y_rnnw_rnn_prefixes, dic_approx=self.y_rnnw_extr_prefixes)\n self.ndcg5_test_rnn_extr = scores.ndcg(self.x_test, self.rnn_model, self.last_extr_aut, ndcg_l=5,\n dic_ref=self.y_test_rnn_prefixes, dic_approx=self.y_test_extr_prefixes)\n self.ndcg5_rnnw_rnn_extr = scores.ndcg(self.x_rnnw, self.rnn_model, self.last_extr_aut, ndcg_l=5,\n dic_ref=self.y_rnnw_rnn_prefixes, dic_approx=self.y_rnnw_extr_prefixes)\n t, e = scores.wer_aut(self.last_extr_aut, self.x_test)\n self.wer_test_extr = e / t\n t, e = scores.wer_aut(self.last_extr_aut, self.x_rnnw)\n self.wer_rnnw_extr = e / t\n self.eps_test_zeros_extr = len([x for x in self.y_test_extr if x <= 0.0]) / len(self.y_test_extr)\n self.eps_rnnw_zeros_extr = len([x for x in self.y_rnnw_extr if x <= 0.0]) / len(self.y_rnnw_extr)\n self.perprnn_test_extr = scores.pautomac_perplexity(self.y_test_rnn, self.fix_probas(self.y_test_extr))\n self.perprnn_rnnw_extr = scores.pautomac_perplexity(self.y_rnnw_rnn, self.fix_probas(self.y_rnnw_extr))\n\n if self.metrics_calc_level > 1:\n self.y_rand_extr = [self.last_extr_aut.val(w) for w in self.x_rand]\n self.perp_test_extr = scores.pautomac_perplexity(self.y_test_target, self.fix_probas(self.y_test_extr))\n self.kld_test_target_extr = scores.kullback_leibler(self.y_test_target, self.fix_probas(self.y_test_extr))\n self.ndcg1_test_target_extr = scores.ndcg(self.x_test, self.true_automaton, self.last_extr_aut, ndcg_l=1,\n dic_ref=self.y_test_target_prefixes,\n dic_approx=self.y_test_extr_prefixes)\n self.ndcg5_test_target_extr = scores.ndcg(self.x_test, self.true_automaton, self.last_extr_aut, ndcg_l=5,\n dic_ref=self.y_test_target_prefixes,\n dic_approx=self.y_test_extr_prefixes)\n self.perp_rand_extr = scores.pautomac_perplexity(self.y_rand_target, self.fix_probas(self.y_rand_extr))\n self.kld_rand_rnn_extr = scores.kullback_leibler(self.fix_probas(self.y_rand_rnn),\n self.fix_probas(self.y_rand_extr))\n self.kld_rand_extr_rnn = scores.kullback_leibler(self.y_rand_extr, self.fix_probas(self.y_rand_rnn))\n self.kld_rand_target_extr = scores.kullback_leibler(self.y_rand_target, self.fix_probas(self.y_rand_extr))\n self.eps_kl_rand_target_extr = neg_zero(self.y_rand_extr, self.y_rand_target)\n self.eps_rand_zeros_extr = len([x for x in self.y_rand_extr if x <= 0.0]) / len(self.y_rand_extr)\n # self.l2dis_target_extr = scores.l2dist(self.true_automaton, extr_aut, l2dist_method=\"gramian\")\n\n # pr(self.quiet, \"\\tEvaluating words and prefixes...\")\n # pr(self.quiet, \"\\tRank-dependent metrics...\")\n\n self.metrics[(rank, \"perp-test-extr\")] = self.perp_test_extr\n self.metrics[(rank, \"perp-test-extr-eps\")] = self.eps_test_zeros_extr\n self.metrics[(rank, \"perp-rand-extr\")] = self.perp_rand_extr\n self.metrics[(rank, \"perp-rand-extr-eps\")] = self.eps_rand_zeros_extr\n self.metrics[(rank, \"kld-test-rnn-extr\")] = self.kld_test_rnn_extr\n self.metrics[(rank, \"kld-test-rnn-extr-eps\")] = self.eps_test_zeros_extr\n self.metrics[(rank, \"kld-test-target-extr\")] = self.kld_test_target_extr\n self.metrics[(rank, \"kld-test-target-extr-eps\")] = self.eps_test_zeros_extr\n self.metrics[(rank, \"kld-rand-rnn-extr\")] = self.kld_rand_rnn_extr\n self.metrics[(rank, \"kld-rand-rnn-extr-eps\")] = self.eps_rand_zeros_extr\n self.metrics[(rank, \"kld-rand-extr-rnn\")] = self.kld_rand_extr_rnn\n self.metrics[(rank, \"kld-rand-target-extr\")] = self.kld_rand_target_extr\n self.metrics[(rank, \"kld-rand-target-extr-eps\")] = self.eps_rand_zeros_extr\n self.metrics[(rank, \"(1-wer)-test-extr\")] = (1 - self.wer_test_extr if self.wer_test_extr is not None else None)\n self.metrics[(rank, \"(1-wer)-rnnw-extr\")] = (1 - self.wer_rnnw_extr if self.wer_rnnw_extr is not None else None)\n self.metrics[(rank, \"ndcg1-test-rnn-extr\")] = self.ndcg1_test_rnn_extr\n self.metrics[(rank, \"ndcg1-test-target-extr\")] = self.ndcg1_test_target_extr\n self.metrics[(rank, \"ndcg1-rnnw-rnn-extr\")] = self.ndcg1_rnnw_rnn_extr\n self.metrics[(rank, \"ndcg5-test-rnn-extr\")] = self.ndcg5_test_rnn_extr\n self.metrics[(rank, \"ndcg5-test-target-extr\")] = self.ndcg5_test_target_extr\n self.metrics[(rank, \"ndcg5-rnnw-rnn-extr\")] = self.ndcg5_rnnw_rnn_extr\n # self.metrics[(rank, \"l2dis-target-extr\")] = self.l2dis_target_extr\n self.metrics[(rank, \"perprnn-test-rnn\")] = self.perprnn_test_rnn\n self.metrics[(rank, \"perprnn-test-extr-eps\")] = self.eps_test_zeros_extr\n self.metrics[(rank, \"perprnn-test-extr\")] = self.perprnn_test_extr\n self.metrics[(rank, \"perprnn-rnnw-rnn\")] = self.perprnn_rnnw_rnn\n self.metrics[(rank, \"perprnn-rnnw-extr-eps\")] = self.eps_rnnw_zeros_extr\n self.metrics[(rank, \"perprnn-rnnw-extr\")] = self.perprnn_rnnw_extr",
"def _calculate_relative_ranking(self, n_feat, tentative, confirmed, imp_history):\n # ranking, confirmed variables are rank 1\n self.ranking_ = np.ones(n_feat, dtype=int)\n # tentative variables are rank 2\n self.ranking_[tentative] = 2\n selected = np.hstack((confirmed, tentative))\n # all rejected features are sorted by importance history\n not_selected = np.setdiff1d(np.arange(n_feat), selected)\n # large importance values should rank higher = lower ranks -> *(-1)\n imp_history_rejected = imp_history[1:, not_selected] * -1\n\n # update rank for not_selected features\n if not_selected.shape[0] > 0:\n # calculate ranks in each iteration, then median of ranks across feats\n iter_ranks = self._nanrankdata(imp_history_rejected, axis=1)\n rank_medians = np.nanmedian(iter_ranks, axis=0)\n ranks = self._nanrankdata(rank_medians, axis=0)\n\n # set smallest rank to 3 if there are tentative feats\n if tentative.shape[0] > 0:\n ranks = ranks - np.min(ranks) + 3\n else:\n # and 2 otherwise\n ranks = ranks - np.min(ranks) + 2\n self.ranking_[not_selected] = ranks\n else:\n # all are selected, thus we set feature supports to True\n self.support_ = np.ones(n_feat, dtype=bool)",
"def RankPlayers(players):\r\n #Weights:\r\n WIN_PER = 10\r\n AVG_PTS = 4 \r\n AVG_DIFF = 1\r\n TM_WIN_PER = -3\r\n GP = -1\r\n OPP_WIN_PER = 3 \r\n ranks = []\r\n initorder = []\r\n\r\n for i in range(len(players)): #Creating Rank List\r\n ranks.append([players[i][0]])\r\n initorder.append(players[i][0])\r\n players[i][6] = players[i][6] / players[i][3] #Average teammate gp \r\n players[i][8] = players[i][8] / players[i][3] #average opp gp\r\n for _ in range(10): #win %, GP rank, avgPts %, team win %, Teammate GP Rank, opp win %, Opp GP Rank, Wins, Losses, Avg Diff\r\n ranks[i].append(0)\r\n #Easy transfer Data\r\n ranks[i][1] = round(players[i][1]/players[i][3],3)\r\n ranks[i][3] = round(players[i][4]/10,3)\r\n ranks[i][4] = players[i][5]\r\n ranks[i][6] = players[i][7]\r\n ranks[i][8] = players[i][1]\r\n ranks[i][9] = players[i][2]\r\n ranks[i][10] = players[i][9]/10 #Dividing by 10 to get a good multiplier\r\n\r\n #GP rank normalized\r\n players.sort(key=lambda x: x[3], reverse=True) #descending order as to create negative percentile\r\n for i in range(len(players)):\r\n ranks[initorder.index(players[i][0])][2] = round(1/(players[i][3]/players[0][3]),2)\r\n if players[i][3] < 5: #Not enough samples\r\n ranks[initorder.index(players[i][0])].append(10)\r\n elif players[i][3] < 10: #Still not enough samples\r\n ranks[initorder.index(players[i][0])].append(4)\r\n else: #Enough games played\r\n ranks[initorder.index(players[i][0])].append(0)\r\n\r\n #Teammate GP rank normalized\r\n players.sort(key=lambda x: x[6]) \r\n for i in range(len(players)):\r\n ranks[initorder.index(players[i][0])][5] = round((i+1)/len(players),2)\r\n\r\n #opp GP rank normalized\r\n players.sort(key=lambda x: x[8]) #ascending order as to create positive precentile\r\n for i in range(len(players)):\r\n ranks[initorder.index(players[i][0])][7] = round((i+1)/len(players),2)\r\n \r\n for i in range(len(ranks)):\r\n rawscore = ranks[i][1] * WIN_PER + ranks[i][11] * GP + ranks[i][3] * AVG_PTS + ranks[i][4] * TM_WIN_PER + ranks[i][6] * OPP_WIN_PER + ranks[i][10] * AVG_DIFF\r\n ranks[i].append(rawscore)\r\n #THEORETICAL MAX SCORE: 19.5\r\n ranks[i][1] = ranks[i][1] * 100 #Adjusting to readable format\r\n ranks[i][4] = ranks[i][4] * 100\r\n ranks[i][6] = ranks[i][6] * 100\r\n ranks[i][3] = ranks[i][3] * 10\r\n ranks[i][10] = ranks[i][10] * 10\r\n ranks[i][2] = len(ranks) - int(round(ranks[i][2] * len(ranks),0)) \r\n ranks[i][5] = len(ranks) - int(round(ranks[i][5] * len(ranks),0)) + 1\r\n ranks[i][7] = len(ranks) - int(round(ranks[i][7] * len(ranks),0)) + 1\r\n\r\n ranks.sort(key=lambda x: x[2],reverse=True) #Fixing GP Rank\r\n for i in range(len(ranks)):\r\n ranks[i][2] = i + 1\r\n\r\n #Final Ranking\r\n ranks.sort(key=lambda x: x[12],reverse=True) \r\n data={'Name':[i[0] for i in ranks], 'WINS':[i[8] for i in ranks], 'LOSSES':[i[9] for i in ranks],\r\n 'WIN %': [i[1] for i in ranks],'GP Rank':[i[2] for i in ranks],\r\n \"AVG PTS\":[i[3] for i in ranks],\"AVG DIFF\":[i[10] for i in ranks],\r\n \"AVG TM WIN %\":[i[4] for i in ranks],\"AVG TM GP Rank\":[i[5] for i in ranks],\"AVG OPP WIN %\":[i[6] for i in ranks],\"AVG OPP GP Rank\":[i[7] for i in ranks],\r\n \"Ranking Score\":[i[12] for i in ranks]}\r\n #Note: Rankings of GP, TM GP, and OPP GM: 1 means most games played, last means least games played\r\n result=pd.DataFrame(data=data)\r\n result=round(result,4)\r\n result.index += 1\r\n print(result) \r\n\r\n result = result.drop([\"WIN %\", \"GP Rank\", \"AVG TM GP Rank\", \"AVG OPP GP Rank\", \"Ranking Score\"], axis=1)\r\n result.to_csv(\"Standings/IndividualRankings.csv\")\r\n\r\n return None",
"def test_convert_vals_to_spearman_ranks(self):\n \n #Example from Spearman Wikipedia page\n #http://en.wikipedia.org/wiki/Spearman's_rank_correlation_coefficient\n #TODO: add some examples from more formal sources\n \n \n unordered_vals = [1.2,0.8,1.2,18,2.3]\n exp_ranks = [3.5,5,3.5,1,2]\n \n obs = convert_vals_to_spearman_ranks(unordered_vals)\n self.assertFloatEqual(obs,exp_ranks)",
"def test_rank_challenge_greater_then_2(self):\n self.opponent.userprofile.rank = 1\n self.challenger.userprofile.rank = 4\n self.opponent.save()\n self.challenger.save()\n self.assertFalse(self.opponent.userprofile.can_challenge(self.challenger))",
"def testOverallRankWithPoints(self):\n user = User(username=\"test_user\", password=\"changeme\")\n user.save()\n profile = user.get_profile()\n\n # Check if the rank works if the user has done nothing.\n rank = 1\n self.assertEqual(profile.overall_rank(), rank,\n \"Check that the user is at least tied for last.\")\n\n # Make the user ranked 1st.\n top_user = Profile.objects.all()[0]\n profile.add_points(top_user.points() + 1, datetime.datetime.today(),\n \"Test\")\n profile.save()\n\n self.assertEqual(profile.overall_rank(), 1,\n \"Check that the user is number 1.\")\n\n user2 = User(username=\"test_user2\", password=\"changeme\")\n user2.save()\n\n profile2 = user2.get_profile()\n profile2.add_points(profile.points() + 1, datetime.datetime.today(),\n \"Test\")\n profile2.save()\n\n self.assertEqual(profile.overall_rank(), 2,\n \"Check that the user is now rank 2.\")",
"def reinforce3(winner,history,moveScoreDict,winWeight = 1,loseWeight=-1,drawWeight=0):\n if winner==1:\n winPlayer = \"X\"\n losePlayer = \"O\"\n drawPlayer1 = \"-\"\n drawPlayer2 = \"-\"\n elif winner==2:\n winPlayer = \"O\"\n losePlayer = \"X\"\n drawPlayer1 = \"-\"\n drawPlayer2 = \"-\"\n elif winner==3:\n winPlayer = \"-\"\n losePlayer = \"-\"\n drawPlayer1 = \"X\"\n drawPlayer2 = \"O\"\n gamma = 0.05\n if winPlayer != \"-\":\n for i,(b,m) in enumerate(history[winPlayer]):\n moveScoreDict[b][\"timesSeen\"] += 1\n moveScoreDict[b][\"scores\"][m] += winWeight*(gamma**(len(history[winPlayer])-1-i))\n \n if losePlayer != \"-\":\n for i,(b,m) in enumerate(history[losePlayer]):\n moveScoreDict[b][\"timesSeen\"] += 1\n moveScoreDict[b][\"scores\"][m] += loseWeight*(gamma**(len(history[losePlayer])-1-i))\n\n if drawPlayer1 != \"-\":\n for i,(b,m) in enumerate(history[drawPlayer1]):\n moveScoreDict[b][\"timesSeen\"] += 1\n moveScoreDict[b][\"scores\"][m] += drawWeight*(gamma**(len(history[drawPlayer1])-1-i))\n if drawPlayer2 != \"-\":\n for i,(b,m) in enumerate(history[drawPlayer2]):\n moveScoreDict[b][\"timesSeen\"] += 1\n moveScoreDict[b][\"scores\"][m] += drawWeight*(gamma**(len(history[drawPlayer2])-1-i))\n return moveScoreDict",
"def checkPlayerRank(self):\n numOfNan = self.matches[self.matches['winner_rank'].isnull() | self.matches['loser_rank'].isnull()].shape[0]\n print(\"Sanity checking winner_rank and loser_rank: \" + str(numOfNan))\n\n \"\"\"Fill NaN players rank with 2000 which represents really high rank\"\"\"\n self.matches['winner_rank'] = self.matches['winner_rank'].fillna(2000)\n self.matches['loser_rank'] = self.matches['loser_rank'].fillna(2000)",
"def ranking_actors_influence():\n reader = initialize_reader()\n actor_list = [{\"name_actor\": row[10], \"number_influence\": int(row[7])} for row in reader]\n actor_for = list(actor_list)\n actors = []\n for actor in actor_for:\n if actor.get('name_actor') not in (list(x.get('name_actor') for x in actors)):\n actors.append({\"name_actor\": actor.get('name_actor'), \"number_influence\": actor.get('number_influence')})\n else:\n actor_for.remove(actor)\n new_list = sorted(actors, key=lambda i: i['number_influence'], reverse=True)\n ranking_ten_list = new_list[:10]\n rank = 0\n print(\"\\nRanking actors social Media influence \\n\")\n for actor in ranking_ten_list:\n rank = rank + 1\n print(f\"Rank {rank} is {actor.get('name_actor')} with {actor.get('number_influence')} followers\")",
"def update_user_ranking(winner_id,movie_ranks,user_id): \n #winning movie rank increases by 1\n movie_ranks[winner_id] += 1\n #update sql database with new rankings\n cnx = mysql.connector.connect(user='root', password='Tnci12!UHbs94',\n database = 'moviematchupdb', host='localhost')\n winner = cnx.cursor()\n winner_movie = (\"\"\"UPDATE user_rankings\n SET movie_rank = %s\n WHERE user_id = %s and movie_id = %s;\"\"\")\n winner.execute(winner_movie,(movie_ranks[winner_id],user_id,winner_id))\n cnx.commit()\n winner.close()\n cnx.close()",
"def update(self, winner: Genotype, loser: Genotype):\n for a, w, l in zip(self._genome, winner, loser):\n a.update(winner=w, loser=l)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests the comepete() function and checks that the ranks are properly updated for each competitor. This is a manual test, meaning that with the exception of errors being raised, the test won't fail. Results will be output to the terminal for analysis.
|
def rankTest():
# Set default rank and uncertainty.
base = 1000
rank = 500
uncertainty = 0.15
# Test Competitors.
competitors = list()
for i in range(1, 9):
comp = {"id": "Player_{0}".format(i), "rank": (base + (rank * i)),
"uncertainty": uncertainty}
competitors.append(comp)
print "\n\n*****INITIAL*****\n\n"
for comp in competitors:
print "ID: {0}, Rank: {1}, Uncertainty: {2}".format(comp["id"],
comp["rank"], comp["uncertainty"])
# Loop and print the results of each iteration.
for i in range(0, 10):
compA = dict()
compB = dict()
while True:
compA = choice(competitors)
compB = choice(competitors)
if not compA["id"] == compB["id"]:
break
# print "\nBEFORE {0}: {5}: {1}, {2}; {6}: {3}, {4}".format(i + 1,
# compA["rank"], compA["uncertainty"], compB["rank"],
# compB["uncertainty"], compA["id"], compB["id"])
compA, compB, winner_id = compete(compA, compB, None)
# print "AFTER {3}: Winner: {0}, A: {1}; {4}, B: {2}; {5}".format(winner_id,
# compA["rank"], compB["rank"], i + 1, compA["uncertainty"],
# compB["uncertainty"])
print "\n\n*****RESULTS*****\n\n"
for comp in competitors:
print "ID: {0}, Rank: {1}, Uncertainty: {2}".format(comp["id"],
comp["rank"], comp["uncertainty"])
|
[
"def testRanks(self): # unit test for ranks 1-13\r\n \r\n for i in range(1,14):\r\n myCard = Card(i,'c') # create i of clubs\r\n self.assertEqual(myCard.rank(),i) # verifies that the card's rank is i\r",
"def test_computer_loop(self):\n\n s = 0\n for i in range(100):\n game = mastermind.ComputerPlayer()\n self.assertEqual(game.play_mastermind(), True)\n s += game.get_count_guesses()\n print(\"Średnia ilość strzałów potrzebnych od odgadnięcia kodu\\n Sprawność: \", s/100)",
"def testRanks(self): #GIVEN\n \n for i in range(2,15):\n myCard = Card(i,'c')\n self.assertEqual(myCard.rank(),i) # verifies that the card's rank is 'i'",
"def test_strategy(self):\n # Become grudged if the opponent defects twice in a row\n self.responses_test([], [], [C], attrs={\"grudged\": False})\n self.responses_test([C], [C], [C], attrs={\"grudged\": False})\n self.responses_test([C, C], [C, C], [C], attrs={\"grudged\": False})\n self.responses_test([C, C, C], [C, C, D], [C], attrs={\"grudged\": False})\n self.responses_test([C, C, C, C], [C, C, D, D], [D],\n attrs={\"grudged\": True})\n\n mem_length = self.player().mem_length\n for i in range(mem_length - 1):\n self.responses_test([C, C, C, C] + [D] * i, [C, C, D, D] + [D] * i,\n [D], attrs={\"grudged\": True,\n \"grudge_memory\": i})\n i = mem_length + 1\n self.responses_test([C, C, C, C] + [D] * i, [C, C, D, D] + [C] * i,\n [C], attrs={\"grudged\": False,\n \"grudge_memory\": 0})",
"def test_player_opponent():\n\n time.sleep(1)\n example_player = Player(headers=HEADERS,\n endpoint='playerdashboardbyopponent')\n\n table_names = example_player.data.keys()\n\n assert 'OverallPlayerDashboard' in table_names\n assert 'ConferencePlayerDashboard' in table_names\n assert 'DivisionPlayerDashboard' in table_names\n assert 'OpponentPlayerDashboard' in table_names\n\n example_overall = example_player.data['OverallPlayerDashboard'][0]\n example_conference = example_player.data['ConferencePlayerDashboard'][0]\n example_division = example_player.data['DivisionPlayerDashboard'][0]\n example_opponent = example_player.data['OpponentPlayerDashboard'][0]\n\n columns = ['GROUP_SET',\n 'GROUP_VALUE',\n 'GP',\n 'W',\n 'L',\n 'W_PCT',\n 'MIN',\n 'FGM',\n 'FGA',\n 'FG_PCT',\n 'FG3M',\n 'FG3A',\n 'FG3_PCT',\n 'FTM',\n 'FTA',\n 'FT_PCT',\n 'OREB',\n 'DREB',\n 'REB',\n 'AST',\n 'TOV',\n 'STL',\n 'BLK',\n 'BLKA',\n 'PF',\n 'PFD',\n 'PTS',\n 'PLUS_MINUS',\n 'NBA_FANTASY_PTS',\n 'DD2',\n 'TD3',\n 'GP_RANK',\n 'W_RANK',\n 'L_RANK',\n 'W_PCT_RANK',\n 'MIN_RANK',\n 'FGM_RANK',\n 'FGA_RANK',\n 'FG_PCT_RANK',\n 'FG3M_RANK',\n 'FG3A_RANK',\n 'FG3_PCT_RANK',\n 'FTM_RANK',\n 'FTA_RANK',\n 'FT_PCT_RANK',\n 'OREB_RANK',\n 'DREB_RANK',\n 'REB_RANK',\n 'AST_RANK',\n 'TOV_RANK',\n 'STL_RANK',\n 'BLK_RANK',\n 'BLKA_RANK',\n 'PF_RANK',\n 'PFD_RANK',\n 'PTS_RANK',\n 'PLUS_MINUS_RANK',\n 'NBA_FANTASY_PTS_RANK',\n 'DD2_RANK',\n 'TD3_RANK',\n 'CFID',\n 'CFPARAMS']\n\n assert list(example_overall.keys()) == columns\n\n assert list(example_conference.keys()) == columns\n\n assert list(example_division.keys()) == columns\n\n assert list(example_opponent.keys()) == columns",
"def compete(comp1, comp2, winner_id=None, decr_uncertainty=0.002,\n min_uncertainty=0.05):\n # Check that both competitors are valid.\n try:\n isValidCompetitor(comp1)\n isValidCompetitor(comp2)\n except:\n raise TypeError(\"Invalid competitor\")\n\n # If a winner_id isn't specified, one is semi-randomly determined.\n if not winner_id or not winner_id in [comp1[\"id\"], comp2[\"id\"]]:\n favored = dict()\n unfavored = dict()\n if comp1[\"rank\"] > comp2[\"rank\"]:\n favored = comp1\n unfavored = comp2\n else:\n favored = comp2\n unfavored = comp1\n if random.random() >= (favored[\"rank\"] / (favored[\"rank\"] +\n unfavored[\"rank\"])):\n winner_id = unfavored[\"id\"]\n else:\n winner_id = favored[\"id\"]\n\n # Assign the winner and loser\n winner = dict()\n loser = dict()\n if winner_id == comp1[\"id\"]:\n winner = comp1\n loser = comp2\n else:\n winner = comp2\n loser = comp1\n\n # Update ranks and uncertainties\n winner, loser = updateRanks(winner, loser, decr_uncertainty=decr_uncertainty,\n min_uncertainty=min_uncertainty)\n\n if winner[\"id\"] == comp1[\"id\"]:\n return winner, loser, winner_id\n return loser, winner, winner_id",
"def test_rank_challenge_within_2(self):\n self.opponent.userprofile.rank = 1\n self.challenger.userprofile.rank = 2\n self.opponent.save()\n self.challenger.save()\n self.assertTrue(self.opponent.userprofile.can_challenge(self.challenger))",
"def test_rank_challenge_greater_then_2(self):\n self.opponent.userprofile.rank = 1\n self.challenger.userprofile.rank = 4\n self.opponent.save()\n self.challenger.save()\n self.assertFalse(self.opponent.userprofile.can_challenge(self.challenger))",
"def test_compare_number_of_receptions(self):\n for name, receptions in self.known_number_of_receptions:\n result = player_stats.get(name).receptions\n self.assertEqual(receptions, result)",
"def test_cli_change_number_of_computer_players(engine):\n assert engine.ui.seats == 7 + 2\n assert len(engine.playerlist) == 7 + 1",
"def updateRanks(winner, loser, decr_uncertainty=0.002, min_uncertainty=0.05):\n # Check that both competitors are valid.\n try:\n isValidCompetitor(winner)\n isValidCompetitor(loser)\n except:\n raise TypeError(\"Invalid competitor\")\n\n # Determine the favored competitor.\n favored = None\n favored_rank = 0\n unfavored_rank = 0\n if winner[\"rank\"] > loser[\"rank\"]:\n favored = winner[\"id\"]\n favored_rank = winner[\"rank\"]\n unfavored_rank = loser[\"rank\"]\n else:\n favored = loser[\"id\"]\n favored_rank = loser[\"rank\"]\n unfavored_rank = winner[\"rank\"]\n\n # Update winner's rank\n uncertainty = 0.10 # Default uncertainty value.\n if \"uncertainty\" in winner:\n uncertainty = winner[\"uncertainty\"]\n if uncertainty > (min_uncertainty + decr_uncertainty):\n winner[\"uncertainty\"] = uncertainty - decr_uncertainty\n else:\n winner[\"uncertainty\"] = min_uncertainty\n if favored == winner[\"id\"]:\n winner[\"rank\"] = favored_rank + (uncertainty * unfavored_rank)\n else:\n winner[\"rank\"] = unfavored_rank + (uncertainty * favored_rank)\n\n # Update loser's rank\n uncertainty = 0.10 # Default uncertainty value.\n if \"uncertainty\" in loser:\n uncertainty = loser[\"uncertainty\"]\n if uncertainty > (min_uncertainty + decr_uncertainty):\n loser[\"uncertainty\"] = uncertainty - decr_uncertainty\n else:\n loser[\"uncertainty\"] = min_uncertainty\n if favored == loser[\"id\"]:\n loser[\"rank\"] = favored_rank - (uncertainty * favored_rank)\n else:\n loser[\"rank\"] = unfavored_rank - (uncertainty * unfavored_rank)\n\n return winner, loser",
"def test_suit_follow(self, hands, round):\n # first player plays a club\n round.play_card(0, hands[0][0])\n\n # second player must also play a club because he has one\n with pytest.raises(exception.InvalidCard):\n round.play_card(1, hands[1][2])\n round.play_card(1, hands[1][0])\n\n # third player does not have a club and may play any card\n round.play_card(2, hands[2][0])\n\n # fourth play must play a club because he has one\n with pytest.raises(exception.InvalidCard):\n round.play_card(3, hands[3][1])\n round.play_card(3, hands[3][0])",
"def crapsSim(handsPerGame, numGames):\n games = []\n #Play numGames games\n for t in range(numGames):\n c = CrapsGame()\n for i in range(handsPerGame):\n c.playHand()\n games.append(c)\n #Produce statistics for each game\n pROIPerGame, dpROIPerGame = [], []\n for g in games:\n wins, losses = g.passResults()\n pROIPerGame.append((wins - losses)/float(handsPerGame))\n wins, losses, pushes = g.dpResults()\n dpROIPerGame.append((wins - losses)/float(handsPerGame))\n #Produce and print summary statistics\n meanROI = str(round((100*sum(pROIPerGame)/numGames), 4)) + '%'\n sigma = str(round(100*stdDev(pROIPerGame), 4)) + '%'\n print('Pass:', 'Mean ROI =', meanROI, 'Std. Dev. =', sigma)\n meanROI = str(round((100*sum(dpROIPerGame)/numGames), 4)) +'%'\n sigma = str(round(100*stdDev(dpROIPerGame), 4)) + '%'\n print('Don\\'t pass:','Mean ROI =', meanROI, 'Std Dev =', sigma)",
"def testOverallRankWithPoints(self):\n user = User(username=\"test_user\", password=\"changeme\")\n user.save()\n profile = user.get_profile()\n\n # Check if the rank works if the user has done nothing.\n rank = 1\n self.assertEqual(profile.overall_rank(), rank,\n \"Check that the user is at least tied for last.\")\n\n # Make the user ranked 1st.\n top_user = Profile.objects.all()[0]\n profile.add_points(top_user.points() + 1, datetime.datetime.today(),\n \"Test\")\n profile.save()\n\n self.assertEqual(profile.overall_rank(), 1,\n \"Check that the user is number 1.\")\n\n user2 = User(username=\"test_user2\", password=\"changeme\")\n user2.save()\n\n profile2 = user2.get_profile()\n profile2.add_points(profile.points() + 1, datetime.datetime.today(),\n \"Test\")\n profile2.save()\n\n self.assertEqual(profile.overall_rank(), 2,\n \"Check that the user is now rank 2.\")",
"def testOverallRankForCurrentRound(self):\n test_utils.set_competition_round()\n\n user = User(username=\"test_user\", password=\"changeme\")\n user.save()\n\n profile = user.get_profile()\n top_user = player_mgr.points_leader()\n profile.add_points(top_user.points() + 1, datetime.datetime.today(),\n \"Test\")\n profile.save()\n\n self.assertEqual(profile.current_round_overall_rank(), 1,\n \"Check that the user is number 1.\")\n\n user2 = User(username=\"test_user2\", password=\"changeme\")\n user2.save()\n\n profile2 = user2.get_profile()\n profile2.add_points(profile.points() + 1, datetime.datetime.today(),\n \"Test\")\n profile2.save()\n\n self.assertEqual(profile.current_round_overall_rank(), 2,\n \"Check that the user is now number 2.\")",
"def crapsSim(handsPerGame, numGames):\n games = []\n #Play numGames games\n for t in xrange(numGames):\n c = CrapsGame()\n for i in xrange(handsPerGame):\n c.playHand()\n games.append(c)\n #Produce statistics for each game\n pROIPerGame, dpROIPerGame = [], []\n for g in games:\n wins, losses = g.passResults()\n pROIPerGame.append((wins - losses)/float(handsPerGame))\n wins, losses, pushes = g.dpResults()\n dpROIPerGame.append((wins - losses)/float(handsPerGame))\n \n #Produce and print summary statistics\n meanROI = str(round((100.0*sum(pROIPerGame)/numGames), 4)) + '%'\n sigma = str(round(100.0*stdDev(pROIPerGame), 4)) + '%'\n print 'Pass:', 'Mean ROI =', meanROI, 'Std. Dev. =', sigma\n meanROI = str(round((100.0*sum(dpROIPerGame)/numGames), 4)) + '%'\n sigma = str(round(100.0*stdDev(dpROIPerGame), 4)) + '%'\n print 'Don\\'t pass:','Mean ROI =', meanROI, 'Std Dev =', sigma",
"def test_correct_estimates(self):\n self.assertEqual(self.ajive.common.rank, 1)\n self.assertEqual(self.ajive.blocks['x'].individual.rank, 1)\n self.assertEqual(self.ajive.blocks['y'].individual.rank, 2)",
"def test_coco():\n assert (\n CocoSubmission().run(\n \"\"\"\nPlayer 1:\n9\n2\n6\n3\n1\n\nPlayer 2:\n5\n8\n4\n7\n10\n\n\"\"\".strip()\n )\n == 306\n )",
"def test_playerScore():\n \"\"\"Test playerScore function\"\"\"\n deck = [card.Card(0, 12), card.Card(1, 10), card.Card(2, 9)]\n game = lab09.Blackjack(deck)\n \n cornell.assert_equals(20, game.playerScore())\n game.playerHand = [card.Card(2, 2), card.Card(3, 1)]\n game.dealerHand = [card.Card(1, 13), card.Card(0, 3)]\n cornell.assert_equals(13, game.playerScore())\n \n print('The playerScore tests passed')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Count newsletters for this group
|
def get_count_newsletters(self, obj):
return obj.newsletter_set.count()
|
[
"def number_of_articles():",
"def test_number_emails_per_all_users(self):\n jotd.settings.DAYCOUNT = 5\n jotd.store_messages()\n \n curs.execute(\"SELECT COUNT(*) FROM jotd_emails\")\n msg_count = curs.fetchone()[0]\n self.assertEqual((jotd.settings.DAYCOUNT * len(self.recipients)), msg_count)",
"def test_number_emails_per_user(self):\n jotd.settings.DAYCOUNT = 10\n jotd.store_messages()\n \n curs.execute(\"SELECT COUNT(*) FROM jotd_emails WHERE msgRecipientAddress='{0}'\".format(jotd.settings.RECIPIENTS[0][1]))\n msg_count = curs.fetchone()[0]\n self.assertEqual(jotd.settings.DAYCOUNT, msg_count)",
"def count(self):\n return len(self.smtp.emails)",
"def count_articles(self) -> int:\n return len(self.data)",
"def get_newsletter_statistics(newsletter):\n\n all_status = Status.objects.filter(newsletter=newsletter)\n recipients = all_status.filter(status=Status.SENT).count()\n post_sending_status = all_status.filter(creation_date__gte=newsletter.sending_date)\n mails_sent = post_sending_status.filter(status=Status.SENT).count()\n if newsletter.status == newsletter.SENDING:\n max_recipients = newsletter.mailing_list.expedition_set().count()\n else:\n max_recipients = recipients\n\n statistics = {'tests_sent': all_status.filter(status=Status.SENT_TEST).count(),\n 'mails_sent': mails_sent,\n 'mails_to_send': recipients,\n 'remaining_mails': max_recipients - mails_sent}\n\n statistics.update(get_newsletter_opening_statistics(post_sending_status, recipients))\n statistics.update(get_newsletter_on_site_opening_statistics(post_sending_status))\n statistics.update(get_newsletter_unsubscription_statistics(post_sending_status, recipients))\n statistics.update(get_newsletter_clicked_link_statistics(post_sending_status, recipients,\n statistics['total_openings']))\n statistics.update(get_newsletter_top_links(post_sending_status))\n\n return statistics",
"def count_publishers(url):\n params = {'rows': 0}\n resp = requests.get(url=url, params=params)\n data = json.loads(resp.text)\n return data['message']['total-results']",
"def number_of_subscribers(subreddit):\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n sub = get(url, allow_redirects=False, headers={'User-agent': ''}).json()\n\n sub_count = sub['data']['subscribers'] if 'data' in sub else 0\n\n return sub_count",
"async def count_daily_messages(self) -> int:\n\n def _count_messages(txn: LoggingTransaction) -> int:\n sql = \"\"\"\n SELECT COUNT(*) FROM events\n WHERE type = 'm.room.message'\n AND stream_ordering > ?\n \"\"\"\n txn.execute(sql, (self.stream_ordering_day_ago,))\n (count,) = cast(Tuple[int], txn.fetchone())\n return count\n\n return await self.db_pool.runInteraction(\"count_messages\", _count_messages)",
"def testTotalMessages(self):\n recpCt = len(recipients)\n daysOut = int(daycount.days)\n totEmails = recpCt * daysOut\n createMessages(recipients, startDate, daycount)\n curs.execute(\"select count(*) from jMessage;\")\n realEmailCt = curs.fetchone()[0]\n self.assertEqual(totEmails, realEmailCt, \"Email counts are inconsistent.\")",
"def count_posts_for_subdomain(self, subdomain):\n # Create the content directory for the Tumblr author, if needed\n content_dir = os.path.join(self.blu_pen_author.tumblr_content_dir, subdomain)\n if not os.path.exists(content_dir):\n os.makedirs(content_dir)\n\n # Get posts for the Tumblr author\n tumblr_author = TumblrAuthor(self.blu_pen_author, subdomain, content_dir)\n tumblr_author.set_posts()\n\n # Count post types for the Tumblr author\n regular = 0\n word = 0\n photo = 0\n for post in tumblr_author.posts:\n if post['type'] == \"regular\":\n regular += 1\n if \"regular-body\" in post and post['regular-body'] != None:\n word += len(self.author_utility.strip_html(post['regular-body']).split())\n elif post['type'] == \"photo\":\n photo += 1\n\n return {\"total\": len(tumblr_author.posts), \"regular\": regular, \"word\": word, \"photo\": photo}",
"def docids_count():",
"def number_of_subscribers(subreddit):\n url = \"https://api.reddit.com/r/{}/about\".format(subreddit)\n headers = {'User-Agent': 'CustomClient/1.0'}\n r = requests.get(url, headers=headers, allow_redirects=False)\n if r.status_code != 200:\n return 0\n r = r.json()\n if 'data' in r:\n return r.get('data').get('subscribers')\n else:\n return 0",
"def numReplies(self):\n\n\t\tnumPosts = len(forum_post.objects.filter(thread = self))\n\n\t\tif numPosts == 0:\n\t\t\treturn 0\n\t\t#End\n\n\t\treturn numPosts - 1",
"def get_lessons_count(self):\n return Lesson.objects.filter(section__module__mnemo=self.mnemo).count()",
"def check_cp_cnt_group(group):\n group.post_count = group.posts.count()\n group.comment_count = group.comments.count()\n group.save()",
"def msg_count(self, domain):\n resp = self._make_request(endpoint=\"/api/3.0/domain/{domain}/msgs/count\".format(domain=domain))\n result = resp.get('count')\n return result",
"def count():\n click.echo('articles: {}'.format(Article.query.count()))\n click.echo('events: {}'.format(Event.query.count()))\n click.echo('stories: {}'.format(Story.query.count()))",
"def count_posts():\n return len(get_all_posts())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a file to the repository
|
def add_file(self, file_path):
self._repo.index.add([str(file_path)])
|
[
"def test_add_file(self):\n filename = \"quux\"\n file_path = os.path.join(self.repo, filename)\n with salt.utils.files.fopen(file_path, \"w\") as fp_:\n fp_.write(\n salt.utils.stringutils.to_str(\n \"This is a test file named {}.\\n\".format(filename)\n )\n )\n ret = self.run_function(\"git.add\", [self.repo, filename])\n self.assertEqual(ret, \"add '{}'\".format(filename))",
"def git_add(new_files_PATH):\n repo.git.add(new_files_PATH)",
"def _add_file(file_path):\n _db_content[\"files\"].append(file_path)",
"def git_add(filename):\n subprocess.call(['git', 'add', filename])",
"def test_add_file():\n author = repo.get_author_info()\n fastimport.start_commit('master', author, \"a commit\")\n fastimport.deleteall()\n testfile = os.path.join(repo.path, '.git', 'description')\n fastimport.add_file('./testfile',\n file(testfile),\n os.path.getsize(testfile))",
"def publish_add_file(self, pth, header=None, trans_id=None):\n\n try:\n self._frepo.add_file(trans_id, pth)\n except svr_repo.RepositoryError as e:\n raise tx.TransportOperationError(str(e))",
"def add(self, filename):\n self.index.add_new_file(filename)",
"def create_file(self, path, file):\n\t\ttry:\n\t\t\twith self.PUSH:\n\t\t\t\tself.repo.create_file(path, f\"Created {path}\", file)\n\t\t\t\tprint(f\"{self.get_emoji('ok')}Created {path}\")\n\t\texcept:\n\t\t\tprint(f\"There was an error to this file: {path}\")",
"def workspace_add_file(ctx, file_grp, file_id, mimetype, page_id, ignore, check_file_exists, force, fname):\n workspace = Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup)\n\n log = getLogger('ocrd.cli.workspace.add')\n if not mimetype:\n try:\n mimetype = EXT_TO_MIME[Path(fname).suffix]\n log.info(\"Guessed mimetype to be %s\" % mimetype)\n except KeyError:\n log.error(\"Cannot guess mimetype from extension '%s' for '%s'. Set --mimetype explicitly\" % (Path(fname).suffix, fname))\n\n log.debug(\"Adding '%s'\", fname)\n local_filename = None\n if not (fname.startswith('http://') or fname.startswith('https://')):\n if not fname.startswith(ctx.directory):\n if not isabs(fname) and exists(join(ctx.directory, fname)):\n fname = join(ctx.directory, fname)\n else:\n log.debug(\"File '%s' is not in workspace, copying\", fname)\n try:\n fname = ctx.resolver.download_to_directory(ctx.directory, fname, subdir=file_grp)\n except FileNotFoundError:\n if check_file_exists:\n log.error(\"File '%s' does not exist, halt execution!\" % fname)\n sys.exit(1)\n if check_file_exists and not exists(fname):\n log.error(\"File '%s' does not exist, halt execution!\" % fname)\n sys.exit(1)\n if fname.startswith(ctx.directory):\n fname = relpath(fname, ctx.directory)\n local_filename = fname\n\n if not page_id:\n log.warning(\"You did not provide '--page-id/-g', so the file you added is not linked to a specific page.\")\n workspace.add_file(file_grp, file_id=file_id, mimetype=mimetype, page_id=page_id, force=force, ignore=ignore, local_filename=local_filename, url=fname)\n workspace.save_mets()",
"def add_file_to_project(self, file, project):\n if project is not None and file is not None:\n LOGGER.debug(\"Adding item '%s' to project '%s'\" % (file.name,\n project.name))\n project.files.append(file)\n self.__SESSION.commit()\n LOGGER.debug(\"File has been added to project successfully!\")\n else:\n raise ValueError(\"Value of parameter 'file' and 'project'\\\n can't be None\")",
"def add_file(self, filename):\r\n file = open(filename, 'r', encoding='utf8', errors='ignore')\r\n text = file.read() # read it all in at once!\r\n file.close()\r\n self.add_string(text)",
"def add_file(self, fpath):\n if not os.path.isfile(fpath):\n print(\"cloudtalker: cannot find file\", fpath)\n return None\n #try to parse filename\n parsed = self.parse_filename(fpath)\n print(\"after parsing:\", parsed)\n if parsed is not None:\n fdata = {\n \"path\": fpath,\n \"type\": parsed[0],\n \"ts\": parsed[1],\n \"segno\": parsed[2],\n }\n self.inq.put(fdata)\n print(\"upload module accepted file\", fpath)",
"def add_file_to_instance(self, *, agent_name: str, instance_name: str, file_id: str, file_path: str) -> None:",
"def add(self, files): \n try:\n br = self.branch()\n assert br == self.INSTALLBRANCH\n except AssertionError:\n self.checkout(self.INSTALLBRANCH)\n return go(\"git add %s\" % files)",
"def add_file(self, root, filename):\n if filename in self.ignored_filenames:\n return\n\n item = File(os.path.join(root, filename, configuration=self.configuration))\n if root in self.__directory_index__:\n item.parent = self.__directory_index__[root]\n self.files.append(item)",
"def test_repo_create_file(self):\n pass",
"def add_file(self, filename):\n self.filenames.insert(0, filename)\n del self.filenames[self.max_no_files:]\n self.filenames = list(dict.fromkeys(self.filenames))\n self.settings.setValue('recent_files_list', [self.filenames])\n self.update_actions()",
"def add_ephemeral_file(self, name, content=None, path=None):\n if name in self.filenames:\n raise ValueError(\"File name '{}' has already been used\".format(name))\n cmd = self._repo._repo.git\n with TemporaryDirectory() as tmpdir:\n if path is None:\n # Write content to a temporary file\n assert content is not None\n path = os.path.join(tmpdir, name)\n with open(path, 'wb') as f:\n f.write(content)\n # Store file in the git object DB\n obj_id = cmd.hash_object('-w', path)\n # Add this file to the list of ephemeral files for this commit\n cmd.notes('--ref', self.FILE_LIST_REF, 'append', '-m', name, self.sha)\n # Add the file as a note\n cmd.notes('--ref', self.FILE_REF_BASE + name, 'add', '-f', '-C', obj_id, self.sha)\n # Clear cached properties so they get recalculated on next access\n del self.ephemeral_file_names\n del self.filenames",
"def add_file_to_cache(self, filename, datastore = None):\n if datastore is not None:\n fullpath = os.path.join(datastore, filename)\n else:\n fullpath = filename\n filename = os.path.basename(filename)\n\n hashval = hashlib.sha1(filename.encode()).hexdigest()\n shutil.copy(fullpath, os.path.join(self._get_local_repo_base_path, \"cache\", hashval[:2], filename))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Are there changes that need to be committed?
|
def has_changes(self):
return self._repo.is_dirty()
|
[
"def hasChanges(self):\n return self.changes",
"def has_pending_changes(self):\n status = self._execute(['git', 'status', '--porcelain',\n '--untracked-files=no',\n '--ignore-submodules=dirty'])\n return status != ''",
"def tracked_files_changed(self):\n\n # Check for unstaged changes. They may interfere with branch switching.\n if self.check_output_in_repo_dir(['git', 'diff']) != '':\n return True\n # Now check for staged changes. They might trigger avoidable merge conflicts when building a release branch.\n elif self.check_output_in_repo_dir(['git', 'diff', '--staged']) != '':\n return True\n return False",
"def can_update_commitments(self):\n return # boolean",
"def has_changes(self):\n return self.dynamic_changes != {}",
"def NeedCommit(self):\n return self._NeedCommit",
"def remote_dirty(self):\n return self.has_remote and any(\n (\n self.has_unpulled_commits,\n self.has_unpushed_commits,\n )\n )",
"def repo_is_dirty(self):\n output = subprocess.check_output(['git', 'status', '--porcelain'], cwd=self.repo_dir)\n\n return self.MODIFIED_FILE_REGEX.search(output.decode('utf-8')) is not None",
"def CheckHasUncommittedChanges(self):\n exit_code, output, _ = self.RunCommand('git status -s')\n if exit_code != 0:\n return False\n\n # Check if 'git status -s' yielded any output.\n for line in output.split('\\n'):\n if line:\n return True\n return False",
"def check_changes(self):\n if not self.has_local_repo():\n raise RuntimeError(\"No local repository connected. Aborting...\")\n self.changed_files = []\n self.removed_files = []\n # modified files\n diff = self.local_repo.index.diff(None)\n for d in diff:\n if d.change_type == 'D':\n self.removed_files.append(d.a_path)\n elif d.a_path == d.b_path:\n self.changed_files.append(d.a_path)\n else:\n UserWarning(\"Diff a_path != b_path ({} vs {})\".format(d.a_path, d.b_path))\n self.changed_files.append(d.a_path)\n self.changed_files.append(d.b_path)\n # new files\n for f in self.local_repo.untracked_files:\n if f in self.GIT_EXCLUDE:\n continue\n if any(f.startswith(rule) for rule in self.GIT_EXCLUDE):\n continue\n self.changed_files.append(f)\n # return just the answer (don't make the lists public)\n if self.changed_files or self.removed_files:\n return True\n else:\n return False",
"def check_modified(self):\n return bool(self._modified)",
"def isDoingBatchChanges(self):\r\n return self._batchChangeDepth > 0",
"def checkchanges(ctx, action=\"check uncommitted files\"):\n res = run(\"git status --porcelain\", hide=\"out\")\n if res.stdout != \"\":\n print(\"{} must be committed\".format(res))\n raise Exit(message=\"#### Uncommitted files found, you may need to {} ####\\n\".format(action))",
"def has_staged(repo):\n if repo.is_dirty(index=True, working_tree=False, untracked_files=False):\n return len(get_staged_filenames(repo)) > 0\n return False",
"def needs_sync(self):\n changes = ChangedEntityLocale.objects.filter(entity__resource__project=self)\n return changes.exists() or self.unsynced_locales",
"def commit(self):\n added = False\n while self._changes:\n st, reason, subtreefunc = self._changes.pop(0)\n if st not in self._states:\n st.index = len(self._ordered_states)\n self._ordered_states.append(st)\n self._states[st] = ChartItem(reason, subtreefunc)\n added = True\n else:\n self._states[st].add(reason, subtreefunc)\n\n return added",
"def hasModifiedFiles(self): #This is the prepared files?\n for qpackage in self.getQPackages():\n if qpackage.hasModifiedFiles():\n return True\n return False",
"def is_dirty(self):\n for _, prop in self._props.items():\n if prop.is_dirty:\n return True\n return False",
"def has_unstaged(repo):\n if repo.is_dirty(index=False, working_tree=True, untracked_files=False):\n return len(get_unstaged_filenames(repo)) > 0\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Roll back repository to previous commit, removing untracked files
|
def rollback(self):
self._repo.head.reset('HEAD~', working_tree=True)
for f in self.untracked_files:
os.remove(os.path.join(self._root, f))
|
[
"def rollback():\n with project():\n with update_changed_requirements():\n update = \"git checkout\" if env.git else \"hg up -C\"\n run(\"%s `cat last.commit`\" % update)\n with cd(join(static(), \"..\")):\n run(\"tar -xf %s\" % join(env.proj_path, \"last.tar\"))\n restore(\"last.db\")\n restart()",
"def DropUncommittedChanges(self):\n self.RunCommand('git stash')\n self.RunCommand('git stash drop')",
"def gitclean():\n local('git rm --cached `git ls-files -i -X .gitignore`')",
"def hard_reset(self):\n if self.repository.reset_head():\n self.index.reset(self.repository.head)\n self.working_directory.reset(self.index)",
"def __exit__(self, exc_type, exc_val, exc_tb):\n if not self.completed and (len(self.already_pushed_repository) > 0):\n self.logger.error(\n \"Removing all pushed references : {}\".format(\n self.already_pushed_repository\n )\n )\n for repo, ref in self.already_pushed_repository:\n repo.handle.remotes.origin.push(\":\" + ref.path)\n\n if self.args.remove_temporary_folder:\n self.logger.info(\n \"Removing temporary folder {}\".format(self.tmp_dir_path)\n )\n rmtree(self.tmp_dir_path)",
"def decode():\n rev = os.popen('git rev-parse HEAD').read().strip()\n\n cmd = os.popen('git show HEAD --format=\"%s\" -s').read().strip()\n\n merge_commit = False\n if cmd != \"GIT_BOTTLE_STATE: unmerged-paths-normalization\":\n if cmd.startswith(SPECIAL_MERGE_COMMIT_PREFIX):\n merge_commit = True\n else:\n return\n\n params = ['git', 'reset', '--soft' ,'HEAD^']\n r = os.spawnvp(os.P_WAIT, 'git', params)\n if r != 0:\n print >>sys.stderr, \"git-reset failed (%d)\" % (r, )\n sys.exit(r)\n\n r = re.compile(\"(.*)[.]([0-9]+)[.]\" + SPECIAL_SUFFIX1 + '$')\n lines = os.popen('git ls-files --stage').read().splitlines()\n unmerged_paths = set()\n removals = set()\n updates = []\n for line in lines:\n meta, filename = line.split('\\t')\n mode, sha1hash, _ = meta.split(' ')\n m = r.match(filename)\n if not m:\n continue\n\n removals.add(filename)\n filename, stage = m.groups(0)\n updates += [\"%s %s %s\\t%s\" % (mode, sha1hash, stage, filename)]\n unmerged_paths.add(filename)\n\n checkouts = []\n for line in lines:\n meta, filename = line.split('\\t')\n mode, sha1hash, stage = meta.split(' ')\n if filename in unmerged_paths:\n checkouts += [filename]\n os.rename(filename, filename + SPECIAL_SUFFIX2)\n removals.add(filename)\n\n remove(removals)\n update_index(updates)\n for filename in checkouts:\n os.rename(filename + SPECIAL_SUFFIX2, filename)\n\n if merge_commit:\n # Restore the merge state\n reset([SPECIAL_FILENAME_MERGE_HEAD,\n SPECIAL_FILENAME_MERGE_MSG,\n SPECIAL_FILENAME_MERGE_MODE])\n os.rename(SPECIAL_FILENAME_MERGE_HEAD, \".git/MERGE_HEAD\")\n os.rename(SPECIAL_FILENAME_MERGE_MSG, \".git/MERGE_MSG\")\n os.rename(SPECIAL_FILENAME_MERGE_MODE, \".git/MERGE_MODE\")",
"def clean(ctx):\n ctx.run(\"git clean --interactive\", **RUN_ARGS)",
"def rollback(commit_id=None):\r\n if commit_id is not None:\r\n raise Exception('Rolling back to a specific commit-id is not yet supported')\r\n \r\n run('if [ [ -e %(previous_path)s ] && [ -e %(current_path)s ] ];then mv %(current_path)s %(next_path)s && mv %(previous_path)s %(current_path)s; fi' % env)\r\n\r\n stop_webserver()\r\n start_webserver()",
"def undo(self):\n\n self.revert_to_checkpoint()",
"def git_cleanup(repo='./'):\n info('Cleaning up git worktree (git reset --hard and git clean -fdx) ...')\n try:\n call_process(['git', '-C', repo, 'reset', '--hard'])\n call_process(['git', '-C', repo, 'clean', '-fdx'])\n except subprocess.CalledProcessError as e:\n warn(f'Failed to clean git worktree: {e}')",
"def git_reset(repo_path, commit=None, git_src=cwd):\n\n # use specified commit or HEAD\n commit = commit or git_head_rev(git_src)\n\n puts(green('Resetting to commit ') + commit)\n\n # reset the repository and working directory\n with cd(repo_path):\n run('git reset --hard %s' % commit)",
"def _reset_deleted_files(self):\n\n status = subprocess.check_output(['git', 'status'], cwd=self.repo_dir)\n deleted_files = self.DELETED_FILE_REGEX.findall(status.decode('utf-8'))\n\n for filename in deleted_files:\n yield from execute_cmd(['git', 'checkout', '--', filename], cwd=self.repo_dir)\n logging.info('Resetted {}'.format(filename))",
"def test_repo_delete_git_hook(self):\n pass",
"def cleanup(self) -> None:\n info('<<lightyellow>>Remove copied files... ', newline=False)\n shutil.rmtree(self.target)\n # restore pywikibot en.json file\n filename = 'en.json'\n self.target.mkdir()\n shutil.copy(self.source / filename, self.target / filename)\n info('<<lightyellow>>done')",
"def rollback():\n pass",
"def clear_scratch_dir(cls):\n for file in os.listdir(cls.SCRATCH_DIR):\n if file != '.gitkeep':\n os.remove(cls.get_path(file))",
"def rollback(self):\n\n # Delete the last entries in workingdb and working counts\n # Decrease transaction count\n if self.num_transactions > 1:\n\n for name in self.working_db:\n self.working_db[name].pop()\n\n for value in self.working_counts:\n self.working_counts[value].pop()\n\n self.num_transactions -= 1\n else:\n print \"NO TRANSACTION\"",
"def remove(self, files):\n self.checkout(self.INSTALLBRANCH)\n out = go(\"git rm -f %s\" % files)\n out += go(\"git commit -m 'figit: deleted %s'\" % files)\n self.checkout(self._init_branch)\n out += self.merge(\"figit: merging deletion of %s\" % files,\n self._init_branch, self.INSTALLBRANCH)\n return out",
"def cleanup_repos():\n\n with Session() as session:\n repos = session.query(SourceRepository).filter(SourceRepository.name.is_(None)).all()\n for repo in repos:\n try:\n repoinfo = giturlparse.parse(repo.url)\n except giturlparse.parser.ParserError:\n logger.warning(\"error parsing git url: {}\".format(repo.url))\n continue\n repo.name = repoinfo.name\n if repos:\n session.commit()\n\n repos = session.query(SourceRepository).filter(SourceRepository.state == \"busy\").all()\n for repo in repos:\n repo.state = \"ready\"\n if repos:\n session.commit()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get commit object relating to version
|
def get_commit(self, version):
if version == 'latest':
return self.latest_commit
else:
return Commit(self, self._repo.commit(version))
|
[
"def get_commit(self, seq_no):\n\n return self.commits[seq_no]",
"def _version_by_commit_checksum(self, commit: str) -> Version:\n checksums = [version.checksum for version in self.versions]\n checksums.append(commit)\n checksums = sorted(checksums)\n candidate_checksum = checksums[checksums.index(commit) + 1]\n\n if not candidate_checksum.startswith(commit):\n raise IndexError(\"No such commit.\")\n\n for version in self.versions:\n if version.checksum == candidate_checksum:\n return version",
"def get_commit_from_hash(self, hash):\n return self._get_commit_from_git_show_with_object(hash)",
"def test_repo_get_single_commit_by_sha(self):\n pass",
"def test_repo_get_single_commit_by_ref(self):\n pass",
"def get_commit_hash():\n return git.Repo().head.object.hexsha",
"def commit_id(self):\n return self._get_build_details()[\"resolvedSourceVersion\"]",
"def commit(self):\n return self._repo.get_commit_for_branch(self)",
"def get_reference_commit(self, ref_name):\n ref = self.repo.lookup_reference('refs' + ref_name)\n return self.repo[ref.target]",
"def get_commit_date(commit):\n return commit['commit']['author']['date']",
"def get_commit(self, sha: str, **kwargs):\n if not self._commits.get(sha):\n from .commit import Commit\n\n self._commits[sha] = Commit(self, sha, **kwargs)\n return self._commits.get(sha)",
"def get_revision(path):\n proc = subprocess.Popen(['git', 'rev-parse', 'HEAD'], cwd=path, stdout=subprocess.PIPE)\n proc.wait()\n return proc.stdout.read().strip()",
"def get_commit(sha):\n commit = get_from_github(f'repos/streamlit/streamlit/commits/{sha}')\n return commit",
"def retrieve_git_info():\n # Is Git installed?\n try:\n subprocess.call(['git', '--version'],\n stdout=subprocess.PIPE)\n except OSError:\n return None\n\n # Decide whether this is a release\n p = subprocess.Popen(\n ['git', 'describe', '--tags', '--candidates=0', 'HEAD'],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n p.wait()\n if p.returncode == 0:\n tag = p.stdout.read()\n logger.debug('Most recent tag: ' + tag)\n if tag.startswith('tulip-'):\n return 'release'\n if len(tag) >= 2 and tag.startswith('v'):\n try:\n int(tag[1])\n return 'release'\n except ValueError:\n pass\n\n # Otherwise, return commit hash\n p = subprocess.Popen(\n ['git', 'log', '-1', '--format=%H'],\n stdout=subprocess.PIPE\n )\n p.wait()\n sha1 = p.stdout.read()\n logger.debug('SHA1: ' + sha1)\n return sha1",
"async def repo_version(self, **kwargs):\n endpoint = 'repo/version'\n args = []\n return await self.client.get_parsed(endpoint, args, kwargs)",
"def get_newest_commit(self, pr: int) -> str:\n commits = sorted(\n self.gh.get_commits_for_pr(pr),\n key=lambda c: f'{c.commit.committer.date}{c.commit.author.date}',\n reverse=True)\n return commits[0].sha if commits else ''",
"def commit(self) -> Optional[str]:\n return pulumi.get(self, \"commit\")",
"def get_commitlog(name, oldversion, newversion):\n REPODIR = \"/home/leaeasy/git-repo/\"\n # get all deepin repos\n try:\n allrepos = [f for f in os.listdir(\n REPODIR) if os.path.isdir(os.path.join(REPODIR, f))]\n except:\n # REPODIR not found\n return 9\n\n # not deepin packages\n if name not in allrepos:\n return 9\n\n # version example:\n # 3.0.1-1\n # 2:1.18.1-1\n # 10.1.0.5503~a20p2\n versionre = re.compile(\"(\\d:)?([\\d.]+).*\")\n oldtag = re.findall(versionre, oldversion)[0][1]\n newtag = re.findall(versionre, newversion)[0][1]\n\n commitcmd = \"cd \" + REPODIR + name + \" && git log --pretty=oneline --abbrev-commit \" + \\\n oldtag + \"..\" + newtag + \" && cd - >/dev/null\"\n\n commitlog = os.popen(commitcmd).read()\n\n if commitlog:\n return gen_commit_url(commitlog, name)\n else:\n return 9",
"def find_installed_version(self):\n\n output, err = self.run_git('rev-parse HEAD') \n\n if not output:\n return \"Error calling git\"\n\n cur_commit_hash = output.strip()\n\n if not re.match('^[a-z0-9]+$', cur_commit_hash):\n return self._git_error()\n \n self._cur_commit_hash = cur_commit_hash\n print \"Git hash is \"+cur_commit_hash\n \n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Path of COMBINE manifest file
|
def manifest_path(self):
return self.full_path(MANIFEST_FILENAME)
|
[
"def manifest_path(self):\n return os.path.join(self._dirname, 'build', 'manifest.txt')",
"def get_qc_manifest_path():\n manifest_path = \"//allen/programs/braintv/workgroups/nc-ophys/visual_behavior/2020_cache/production_cache/manifest.json\"\n return manifest_path",
"def conanfile(self):\n return os.path.join(self.conan(), CONANFILE)",
"def get_config_minimal() -> Path:\n return Path(__file__).parent.parent / \"config_minimal.yaml\"",
"def app_path(args):\r\n \r\n import os.path as p\r\n \r\n path, base = p.split(p.splitext(args.app.__file__)[0])\r\n if base == '__init__':\r\n print p.join(path, '')\r\n else:\r\n if p.splitext(args.app.__file__[-4:])[1] in ('.pyc', '.pyo'):\r\n print args.app.__file__[:-1]\r\n else:\r\n print args.app.__file__",
"def get_meta_conf_file_path():\n meta_dir = EdgeDefault.get_edge_ctl_config_dir()\n meta_conf_file = os.path.join(meta_dir, EdgeDefault._edge_meta_config_file)\n meta_conf_file = os.path.realpath(meta_conf_file)\n return meta_conf_file",
"def _get_manifest_path(repository, manifest=None):\n if manifest:\n return '/acr/v1/{}/_manifests/{}'.format(repository, manifest)\n return '/acr/v1/{}/_manifests'.format(repository)",
"def get_install_path():\n\n return os.path.dirname(__file__)",
"def get_install_scheme_path(self, path):",
"def permissions_file_path(tenant):\n config_path = os.environ.get('CONFIG_PATH', 'config')\n return safe_join(config_path, tenant, 'permissions.json')",
"def getProgramRegistryPath():\r\n return config.getConfig()[\"installed-programs.json\"]",
"def get_config_path():\n return os.path.join(\".deploy\", \"config\")",
"def resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = os.path.dirname(sys.argv[0])\r\n except Exception:\r\n return relative_path\r\n\r\n return os.path.join(base_path, relative_path)",
"def InstalledPaths(self):\n with open(self.manifest_file) as f:\n files = [line.rstrip() for line in f]\n return files",
"def get_path() -> str:\n config_dir: str = appdirs.user_config_dir(\"plotman\")\n return config_dir + \"/plotman.yaml\"",
"def deploy_path(self):\n return os.path.join(self.dotci3_path, 'deploy.yaml')",
"def cluster_ca_path() -> Path:\n return Path(__file__).parent / \"data\" / \"ssl\" / \"cluster.ca.crt\"",
"def _version_path(self) -> str:",
"def installed_skills_file_path(self):\n if self._installed_skills_file_path is None:\n virtual_env_path = os.path.dirname(os.path.dirname(sys.executable))\n if os.access(virtual_env_path, os.W_OK | os.R_OK | os.X_OK):\n self._installed_skills_file_path = os.path.join(\n virtual_env_path,\n '.mycroft-skills'\n )\n else:\n self._installed_skills_file_path = os.path.join(\n xdg.BaseDirectory.save_data_path('mycroft'),\n '.mycroft-skills')\n\n return self._installed_skills_file_path"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate COMBINE manifest file for repository index Stages the manifest file for commit. Will overwrite existing manifest.
|
def generate_manifest(self, master_filename=None):
writer = ManifestWriter()
for entry in sorted(e for (e, _) in self._repo.index.entries):
writer.add_file(entry, is_master=entry == master_filename)
writer.write(self.manifest_path)
self.add_file(self.manifest_path)
|
[
"def write_manifest (self):\r\n self.execute(file_util.write_file,\r\n (self.manifest, self.filelist.files),\r\n \"writing manifest file '%s'\" % self.manifest)",
"def create_manifest(output_dir, pipeline_name, paths):\n if not os.path.isdir(output_dir):\n create_dir(output_dir)\n new_manifest_path = os.path.join(output_dir, pipeline_name + '.manifest')\n header = \"\"\"##fileformat=SOMATIC,1.0\nIN_PIPELINE\tOUT_PIPELINE\tBATCH_ID\tCID_ID\tNAE_ID\tSPECIMEN_ID\tSPECIMEN_TYPE\tP5_BARCODE\tP7_BARCODE\tRUN_FOLDER\tR1\tR2\tR3\tR4\tBAM\tVCF\"\"\"\n with open(new_manifest_path, 'w') as fileobj:\n fileobj.write(header + \"\\n\")\n for idx, fraction in enumerate(paths.keys()):\n somatic_fq1 = paths[fraction]['somatic'].get('fastq', '')[0]\n somatic_fq2 = paths[fraction]['somatic'].get('fastq', '')[1]\n somatic_bam = paths[fraction]['somatic'].get('bam', '')\n somatic_vcf = paths[fraction]['somatic'].get('vcf', '')\n if pipeline_name == 'onek':\n normal_fq1 = paths[fraction]['normal'].get('fastq', '')[0]\n normal_fq2 = paths[fraction]['normal'].get('fastq', '')[1]\n normal_bam = paths[fraction]['normal'].get('bam', '')\n normal_vcf = paths[fraction]['normal'].get('vcf', '')\n # make a copy of the normal and save it with its somatic pair\n line = [pipeline_name, '', str(fraction), 'cid_{}'.format(idx), 'nae_{}'.format(idx),\n 'specimen_{}'.format(fraction), 'normal', 'A{}'.format(idx), 'P{}'.format(idx), '',\n normal_fq1, normal_fq2, '', '',\n normal_bam, normal_vcf]\n fileobj.write(\"\\t\".join(line) + \"\\n\")\n line = [pipeline_name, '', str(fraction), 'cid_{}'.format(idx), 'nae_{}'.format(idx),\n 'specimen_{}'.format(fraction), 'tumor', 'A{}'.format(idx), 'P{}'.format(idx), '',\n somatic_fq1, somatic_fq2, '', '',\n somatic_bam, somatic_vcf]\n fileobj.write(\"\\t\".join(line) + \"\\n\")\n return new_manifest_path",
"def setup_assembly_file(self):\n\t\t# Add comment lines just to make the parser work harder\n\t\tcontent = '# Comment Line\\n' + json.dumps(self._assembly_desc, indent=4) + \\\n\t\t\t'\\n# Comment Line\\n'\n\t\tcreate_repo_with_files(os.path.join(self.tempfolder, 'gitrepos',\n\t\t\t\t'assemblies'), {'testassembly.katipo': {'content': content}})",
"def _create_project_manifest(self, napdr):\n # base structure\n pm = {\n \"descriptor_extension\": \"yml\",\n \"version\": \"0.5\",\n \"package\": {\n \"vendor\": napdr.vendor,\n \"name\": napdr.name,\n \"version\": napdr.version,\n \"maintainer\": napdr.maintainer,\n \"description\": napdr.description\n },\n \"files\": []\n }\n self.sources = []\n self.destinations = []\n # add entries for artifacts\n for pc in napdr.package_content:\n tmp = pc.copy()\n # remove checksum information\n del tmp[\"algorithm\"]\n del tmp[\"hash\"]\n # re-write path (source -> path)\n tmp[\"path\"] = os.path.join(self.remove_Definitions(tmp[\"source\"]))\n self.sources.append(tmp[\"source\"])\n self.destinations.append(tmp[\"path\"])\n del tmp[\"source\"]\n # re-write content type (content-type -> type)\n tmp[\"type\"] = tmp[\"content-type\"]\n del tmp[\"content-type\"]\n # add to pm\n pm.get(\"files\").append(tmp)\n return pm",
"def _generate_and_add_master_file_to_orchestrator_folder(folder):\n with open(os.path.join(folder, ORCH_MAIN_FILE), \"w\") as f:\n f.write(ORCH_STR_FILE)",
"def bundle_coverage(opts):\n info = firmware_pb2.FirmwareArtifactInfo()\n info.bcs_version_info.version_string = opts.bcs_version\n bundle_dir = get_bundle_dir(opts)\n zephyr_dir = pathlib.Path(__file__).parent\n platform_ec = zephyr_dir.resolve().parent\n build_dir = platform_ec / 'build/zephyr-coverage'\n tarball_name = 'coverage.tbz2'\n tarball_path = bundle_dir / tarball_name\n cmd = ['tar', 'cvfj', tarball_path, 'lcov.info']\n subprocess.run(cmd, cwd=build_dir, check=True)\n meta = info.objects.add()\n meta.file_name = tarball_name\n meta.lcov_info.type = firmware_pb2.FirmwareArtifactInfo.LcovTarballInfo.LcovType.LCOV\n\n write_metadata(opts, info)",
"def create_readme(histfile, vb):\r\n\tme = \"LE_Utils.create_readme: \"\r\n\treadmefile = os.path.dirname(histfile)+\"/README.txt\"\r\n\ttry:\r\n\t\tassert os.path.isfile(readmefile)\r\n\texcept AssertionError:\r\n\t\tnow = str(datetime.now().strftime(\"%Y-%m-%d %H.%M\"))\r\n\t\tcommit = subprocess.check_output(['git', 'rev-parse', 'HEAD'])\r\n\t\theader = \"Time:\\t\"+now+\"\\nCommit hash:\\t\"+commit+\"\\n\\n\"\r\n\t\twith open(readmefile,\"w\") as f:\r\n\t\t\tf.write(header)\r\n\t\tif vb: print me+\"Created readme file \"+readmefile\r\n\treturn",
"def bundle_coverage(opts):\n info = firmware_pb2.FirmwareArtifactInfo()\n info.bcs_version_info.version_string = opts.bcs_version\n bundle_dir = get_bundle_dir(opts)\n ec_dir = os.path.dirname(__file__)\n tarball_name = 'coverage.tbz2'\n tarball_path = os.path.join(bundle_dir, tarball_name)\n cmd = ['tar', 'cvfj', tarball_path, 'lcov.info']\n subprocess.run(cmd, cwd=os.path.join(ec_dir, 'build/coverage'), check=True)\n meta = info.objects.add()\n meta.file_name = tarball_name\n meta.lcov_info.type = (\n firmware_pb2.FirmwareArtifactInfo.LcovTarballInfo.LcovType.LCOV)\n\n write_metadata(opts, info)",
"def createCbz(self,name,src,dst = '.',page_list = []):\n \n os.chdir(src)\n zip_name = ''.join((name,'.zip'))\n if page_list:\n pages = page_list\n else: \n pages = glob.glob('*jpg')\n pages.sort(key = self.natural_key)\n with zipfile.ZipFile(zip_name,mode = 'w') as page:\n for i in pages:\n page.write(i)\n os.remove(i)\n new_name = ''.join((os.path.splitext(zip_name)[0],'.cbz'))\n os.rename(zip_name,new_name)",
"def _GenerateManifest(self):\n manifest = { FILES_KEY: {}, PROGRAM_KEY: {} }\n\n needed = self.GetNeeded()\n\n extra_files_kv = [(key, ArchFile(name=key,\n arch=arch,\n path=url,\n url=url))\n for key, arch, url in self.extra_files]\n\n manifest_items = list(needed.items()) + extra_files_kv\n\n # Add dynamic loader to the program section.\n for need, archinfo in manifest_items:\n if IsLoader(need):\n urlinfo = { URL_KEY: archinfo.url }\n manifest[PROGRAM_KEY][archinfo.arch] = urlinfo\n\n for need, archinfo in manifest_items:\n urlinfo = { URL_KEY: archinfo.url }\n name = archinfo.name\n arch = archinfo.arch\n\n if IsLoader(need):\n continue\n\n if need in self.main_files:\n if need.endswith(\".nexe\"):\n # Place it under program if we aren't using the runnable-ld.so.\n program = manifest[PROGRAM_KEY]\n if arch not in program:\n program[arch] = urlinfo\n continue\n # Otherwise, treat it like another another file named main.nexe.\n name = MAIN_NEXE\n\n name = self.remap.get(name, name)\n fileinfo = manifest[FILES_KEY].get(name, {})\n fileinfo[arch] = urlinfo\n manifest[FILES_KEY][name] = fileinfo\n self.manifest = manifest",
"def generateManifest(syn, allFiles, filename, provenance_cache=None):\n keys, data = _extract_file_entity_metadata(\n syn, allFiles, provenance_cache=provenance_cache\n )\n _write_manifest_data(filename, keys, data)",
"def generate_sync_manifest(syn, directory_path, parent_id, manifest_path):\n manifest_cols = [\"path\", \"parent\"]\n manifest_rows = _walk_directory_tree(syn, directory_path, parent_id)\n _write_manifest_data(manifest_path, manifest_cols, manifest_rows)",
"def deploy():\n tmpdir = tempfile.mkdtemp()\n shutil.copytree('output', os.path.join(tmpdir, 'output'))\n\n cmd = 'git checkout gh-pages'\n logging.info('EXEC: {}'.format(cmd))\n subprocess.call(cmd, shell=True)\n\n try:\n for root, dirnames, filenames in os.walk(os.path.join(tmpdir, 'output')):\n for filename in filenames:\n if filename.endswith('.png'): # for now\n continue\n filename = os.path.relpath(os.path.join(root, filename), os.path.join(tmpdir, 'output'))\n shutil.copy(os.path.join(tmpdir, 'output', filename), filename)\n cmd = 'git add ' + filename\n logging.info('EXEC: {}'.format(cmd))\n subprocess.call(cmd, shell=True)\n\n cmd = 'git commit -m \"Latest build\"'\n logging.info('EXEC: {}'.format(cmd))\n subprocess.call(cmd, shell=True)\n\n finally:\n cmd = 'git checkout master'\n logging.info('EXEC: {}'.format(cmd))\n subprocess.call(cmd, shell=True)",
"def generate_readme(self):\n\n if os.path.exists(self.install_config.install_location + \"/INSTALL_README.txt\"):\n os.remove(self.install_config.install_location + \"/INSTALL_README.txt\")\n readme_fp = open(self.install_config.install_location + \"/INSTALL_README.txt\", \"w+\")\n readme_fp.write(\"Autogenerated installSynApps README file created on {}\\n\".format(datetime.datetime.now()))\n readme_fp.write(\"https://github.com/epicsNSLS2-deploy/installSynApps\\n\")\n readme_fp.write(\"-------------------------------------------------------\\n\")\n readme_fp.write(\"The following modules were installed with the following version numbers:\\n\\n\")\n for module in self.install_config.get_module_list():\n if module.build == \"YES\":\n readme_fp.write(\"{} -> {}\\n\".format(module.name, module.version))\n \n readme_fp.write(\"-------------------------------------------------------\\n\")\n readme_fp.write(\"The following modules were cloned with the given versions but not auto-built\\n\\n\")\n \n for module in self.install_config.get_module_list():\n if module.build == \"NO\" and module.clone == \"YES\":\n readme_fp.write(\"{} -> {}\\n\".format(module.name, module.version))\n \n readme_fp.close()",
"def _write_build_script(self, dir, manifest):\n buildfile = Buildfile()\n for step in manifest.build:\n buildfile.cmd(step)\n with open(os.path.join(dir, 'build.sh'), 'w') as fp:\n fp.write(buildfile.build())",
"def mbl_manifest_repo_push(self):\n repo = self.external_repo_name_to_cloned_repo[\n mnf.MBL_MANIFEST_REPO_NAME\n ]\n repo.handle.git.add(update=True)\n repo.handle.index.commit(\"release manager automatic commit\")\n\n if self.diagnostic_repo_push(repo):\n self.repo_push(repo, repo.handle.active_branch)\n self.summary_logs.append(\n SUMMARY_H_PUSH + \"Pushed from repository clone path={} a new \"\n \"branch={} to remote url={},\"\n \"\\nNew commit hash={}\".format(\n repo.clone_dest_path,\n repo.handle.active_branch.name,\n repo.url,\n repo.handle.active_branch.commit.hexsha,\n )\n )\n else:\n self.logger.info(\"Skip pushing...\")",
"def _write_component_manifest(\n manifest_entries, component_info, manifest_path, out_dir):\n\n for component_manifest in _get_component_manifests(component_info):\n manifest_source = component_manifest.get('source')\n manifest_basename = os.path.basename(manifest_source)\n if 'output_name' in component_manifest:\n _, extension = os.path.splitext(manifest_basename)\n manifest_basename = component_manifest.get('output_name') + \\\n extension\n\n manifest_dest_file_path = os.path.join(\n os.path.dirname(manifest_path), manifest_basename)\n shutil.copy(manifest_source, manifest_dest_file_path)\n\n manifest_entry_key = os.path.join('meta', manifest_basename)\n manifest_entries[manifest_entry_key] = os.path.relpath(\n manifest_dest_file_path, out_dir)\n return manifest_dest_file_path",
"def main():\n\n parser = argparse.ArgumentParser(\n description='Perform initial loading of build database from manifests'\n )\n parser.add_argument('-c', '--config', dest='add_proj_config',\n help='Configuration file for build database loader',\n default='build_db_loader_conf.ini')\n\n args = parser.parse_args()\n\n # Check configuration file information\n add_proj_config = configparser.ConfigParser()\n add_proj_config.read(args.add_proj_config)\n\n if any(key not in add_proj_config for key in ['build_db', 'repos']):\n print(\n f'Invalid or unable to read config file {args.add_proj_config}'\n )\n sys.exit(1)\n\n db_info = add_proj_config['build_db']\n db_required_keys = ['db_uri', 'username', 'password']\n\n if any(key not in db_info for key in db_required_keys):\n print(\n f'One of the following DB keys is missing in the config file:\\n'\n f' {\", \".join(db_required_keys)}'\n )\n sys.exit(1)\n\n repo_info = add_proj_config['repos']\n repo_required_keys = ['manifest_dir', 'manifest_url', 'repo_basedir']\n\n if any(key not in repo_info for key in repo_required_keys):\n print(\n f'One of the following repo keys is missing in the '\n f'config file:\\n {\", \".join(repo_required_keys)}'\n )\n sys.exit(1)\n\n # Now run through all the manifests in build-manifests and update\n # the database with new project documents\n add_projects = AddProject(db_info, repo_info)\n last_manifest = [] # Start from beginning\n manifest_repo = repo_info['manifest_dir']\n\n print('Checking out/updating the build-manifests repo...')\n cbutil_git.checkout_repo(manifest_repo, repo_info['manifest_url'])\n\n manifest_walker = cbutil_git.ManifestWalker(manifest_repo, last_manifest)\n\n for commit_info, manifest_xml in manifest_walker.walk():\n try:\n manifest_info = add_projects.get_manifest_info(manifest_xml)\n except mf_parse.InvalidManifest as exc:\n # If the file is not an XML file, simply move to next one\n print(f'{commit_info[0]}: {exc}, skipping...')\n continue\n\n add_projects.update_project_documents(manifest_info)",
"def CommitManifest(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
All filenames in this commit
|
def filenames(self):
return {blob.name for blob in self._commit.tree.blobs} | self.ephemeral_file_names
|
[
"def _get_list_of_committed_files():\n files = []\n # pylint: disable=E1103\n diff_index_cmd = 'git diff-index --cached %s' % _current_commit()\n output = subprocess.check_output(\n diff_index_cmd.split()\n )\n for result in output.split('\\n'):\n if result != '':\n result = result.split()\n if result[4] in ['A', 'M']:\n files.append(result[5])\n\n return files",
"def files_in_commit(commit: Optional[str] = None) -> Iterable[str]:\n cmd = ['git', 'diff', '--name-status']\n if commit is not None:\n cmd.append(f'{commit}..{commit}~1')\n\n output = subprocess.check_output(cmd)\n\n for line in output.decode('utf-8').split('\\n'):\n if line == '':\n break\n\n path = line.split('\\t')[-1]\n\n if not path.endswith('.py'):\n continue\n\n abs_path = os.path.abspath(path)\n\n yield abs_path",
"def FilenamesFromGit(branch_name, extensions):\n lines = GitShell('git diff --stat=600,500 %s' % branch_name)\n filenames = []\n for line in lines:\n line = line.lstrip()\n # Avoid summary line, and files that have been deleted (no plus).\n if line.find('|') != -1 and line.find('+') != -1:\n filename = line.split()[0]\n if filename:\n filename = filename.rstrip()\n ext = filename.rsplit('.')[-1]\n if not extensions or ext in extensions:\n filenames.append(filename)\n return filenames",
"def all_paths():\n repo_root = os.path.abspath(os.path.join(INFRABOTS_DIR, os.pardir, os.pardir))\n output = subprocess.check_output(['git', 'ls-files'], cwd=repo_root).rstrip()\n return output.splitlines()",
"def list_ephemeral_files(self):\n cmd = self._repo._repo.git\n try:\n names_note = cmd.notes('--ref', self.FILE_LIST_REF, 'show', self.sha)\n return {n for n in names_note.split('\\n') if n}\n except GitCommandError:\n return set()",
"def _StringifyFilenames(self):\n # Prefix each filename with a hyphen so that Gerrit will format it as an\n # unordered list.\n return '\\n\\n'.join('- %s' % x for x in self.files)",
"def get_changed_files_in_commit(self, commit_hash):\r\n output = self._execute_command(get_changed_files_in_commit.format(commit_id=commit_hash))\r\n return re.match(r\"(?P<content>.*)\\ncommit {}\".format(commit_hash), output, re.DOTALL).group('content').splitlines()",
"def files(self):\n return chain(self._commit.tree.blobs, self.ephemeral_files)",
"def _getFileNames(request):\n return request[\"filenames\"][0].split(\",\")",
"def files(self):\n return map(os.path.basename,template.files_from_results(self.results))",
"def get_file_name(repo_path):\n \n files = []\n \n s = subprocess.check_output(\"cd %s; ls \" % repo_path, shell=True)\n r = re.compile(\"((.*))\\n\")\n matches = r.findall(s)\n for m in matches:\n files.append(dict(file_name=m[0].strip()))\n\n\n return files",
"def list_modified_files(self):\n return gitinfo.list_staged_files(gitinfo.current_commit())",
"def raw_files(self, files):\n return [os.path.basename(f) for f in files if \"_raw\" in f]",
"def _collect_file_names(self, out_dir: str) -> List[str]:\n file_names = sp.Popen(['ls', out_dir], stdout=sp.PIPE)\n\n downloaded_files = [\"{}/{}\".format(out_dir, str(name, 'utf-8'))[:-1]\n for name in file_names.stdout.readlines()]\n self._log_minion.debug('Downloaded Files: {}'.format(downloaded_files))\n return downloaded_files",
"def files(self):\n files = File.objects.filter(hash=self)\n return files",
"def get_modified_files(commit, file_extension):\n current_modified_files = [mod_file for mod_file in commit.modifications\n if mod_file.filename.endswith(file_extension)]\n return current_modified_files",
"def get_names_of_src_files(self):\r\n assert self.__is_valid, \"No valid run path: \" + self.__run_path\r\n return self.__names_of_src_files",
"def get_all_files_from_commit(cid, repo, filter_config, verbose=False):\n c = repo.commit(cid)\n git_tree = c.tree\n raw_files = get_all_files_from_tree(git_tree)\n if verbose:\n print 'Total raw files:', len(raw_files)\n subset_files = [f for f in raw_files\n if filter_file(f, filter_config)]\n if verbose:\n print 'Total selected files:', len(subset_files)\n return subset_files",
"def get_all_files_in_current_revision(self, input_path):\n input_files = []\n for root, dirs, files in os.walk(input_path, topdown=True):\n for name in files:\n full_file_path = os.path.join(root, name).replace(input_path, \"\")\n\n # Filter out git directory\n if not full_file_path.startswith(\"/.git/\"):\n input_files.append(full_file_path.lstrip('/'))\n\n return input_files"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
All files in this commit
|
def files(self):
return chain(self._commit.tree.blobs, self.ephemeral_files)
|
[
"def _get_list_of_committed_files():\n files = []\n # pylint: disable=E1103\n diff_index_cmd = 'git diff-index --cached %s' % _current_commit()\n output = subprocess.check_output(\n diff_index_cmd.split()\n )\n for result in output.split('\\n'):\n if result != '':\n result = result.split()\n if result[4] in ['A', 'M']:\n files.append(result[5])\n\n return files",
"def get_all_files_from_commit(cid, repo, filter_config, verbose=False):\n c = repo.commit(cid)\n git_tree = c.tree\n raw_files = get_all_files_from_tree(git_tree)\n if verbose:\n print 'Total raw files:', len(raw_files)\n subset_files = [f for f in raw_files\n if filter_file(f, filter_config)]\n if verbose:\n print 'Total selected files:', len(subset_files)\n return subset_files",
"def files_in_commit(commit: Optional[str] = None) -> Iterable[str]:\n cmd = ['git', 'diff', '--name-status']\n if commit is not None:\n cmd.append(f'{commit}..{commit}~1')\n\n output = subprocess.check_output(cmd)\n\n for line in output.decode('utf-8').split('\\n'):\n if line == '':\n break\n\n path = line.split('\\t')[-1]\n\n if not path.endswith('.py'):\n continue\n\n abs_path = os.path.abspath(path)\n\n yield abs_path",
"def all_paths():\n repo_root = os.path.abspath(os.path.join(INFRABOTS_DIR, os.pardir, os.pardir))\n output = subprocess.check_output(['git', 'ls-files'], cwd=repo_root).rstrip()\n return output.splitlines()",
"def get_changed_files_in_commit(self, commit_hash):\r\n output = self._execute_command(get_changed_files_in_commit.format(commit_id=commit_hash))\r\n return re.match(r\"(?P<content>.*)\\ncommit {}\".format(commit_hash), output, re.DOTALL).group('content').splitlines()",
"def list_modified_files(self):\n return gitinfo.list_staged_files(gitinfo.current_commit())",
"def __get_all_files(self):\n\n ret_files = []\n for root, dirs, files in os.walk(self.target):\n for filename in files:\n ret_files.append(os.path.join(root, filename))\n return ret_files",
"def process_commit_files_unfiltered(c, verbose=False):\n\n files = []\n # for p in c.parents: # iterate through each parent\n if len(c.parents) > 0:\n p = c.parents[0]\n for d in c.diff(p, create_patch=False):\n if False: # verbose:\n print\n print 'A:', d.a_blob,\n if isValidBlob(d.a_blob):\n print d.a_blob.path\n print 'B:', d.b_blob,\n if isValidBlob(d.b_blob):\n print d.b_blob.path\n sys.stdout.flush()\n if not isValidBlob(d.a_blob):\n if verbose:\n print 'Delete'\n continue\n elif not isValidBlob(d.b_blob):\n if verbose:\n print 'Add A'\n files.append(d.a_blob.path)\n elif (isValidBlob(d.a_blob) and isValidBlob(d.b_blob)\n and d.b_blob.path.endswith('.py')):\n files.append(d.b_blob.path)\n elif len(c.parents) == 0:\n # inaugural commit, so can't use diff\n files = [b for b in get_all_blob_paths(c.tree)]\n return files",
"def filenames(self):\n return {blob.name for blob in self._commit.tree.blobs} | self.ephemeral_file_names",
"def files(self):\n files = File.objects.filter(hash=self)\n return files",
"def get_modified_files(commit, file_extension):\n current_modified_files = [mod_file for mod_file in commit.modifications\n if mod_file.filename.endswith(file_extension)]\n return current_modified_files",
"def get_changed_files(self):",
"def files_touched(self, commit):\n if commit.parents:\n par_list = commit.parents\n else:\n par_list = [empty_tree_oid()]\n new_oid_set = set()\n for p in par_list:\n diff = self._repo.diff(p, commit)\n for dd in diff.deltas:\n new_oid_set.add((dd.new_file.path, dd.new_file.id))\n return new_oid_set",
"def get_all_files_in_current_revision(self, input_path):\n input_files = []\n for root, dirs, files in os.walk(input_path, topdown=True):\n for name in files:\n full_file_path = os.path.join(root, name).replace(input_path, \"\")\n\n # Filter out git directory\n if not full_file_path.startswith(\"/.git/\"):\n input_files.append(full_file_path.lstrip('/'))\n\n return input_files",
"def returnFiles(self):\n return self.files",
"def read_all_raw_files():\n pass",
"def files(self):\n return [File(self, p) for p in fileList(self.paths['build'], relative=True)]",
"def _get_changed_files(base_branch):\n # Get file changes between branch and merge-base of specified branch\n base_commit = check_output([\"git\", \"merge-base\", base_branch, \"HEAD\"]).rstrip()\n return check_output([\"git\", \"diff\", base_commit, \"--name-only\"]).splitlines()",
"def f_files(self):\n return self._f_files"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SHA of the commit
|
def sha(self):
return self._commit.hexsha
|
[
"def get_commit_hash():\n return git.Repo().head.object.hexsha",
"def get_git_commit_sha():\n\n return os.getenv(\"GIT_COMMIT\")",
"def get_head_sha(self, path): # type: (str) -> str\n command = [\n 'git',\n 'rev-parse',\n 'HEAD',\n ]\n abspath = os.path.abspath(path)\n self.logger.debug('Using dir ' + abspath)\n self.logger.debug('Executing ' + ' '.join(command))\n output = subprocess.check_output(command, cwd=abspath)\n sha = output.decode('UTF-8').strip()\n assert len(sha) == 40, 'Invalid SHA: \"%s\"' % sha\n return sha",
"def repo_get_sha(self):\n raise NotImplementedError('Method repo_get_sha not implemented in root(Git*Connect) class')",
"def latest_hash() -> str:\n ret = subprocess.run([\"git\", \"rev-parse\", \"HEAD\"], capture_output=True, check=True)\n assert ret.returncode == 0, \"Failed to get latest commit hash.\"\n commit_hash = ret.stdout.decode(\"utf-8\").strip()\n return commit_hash",
"def sha(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sha\")",
"def get_last_hash(self):\n last_commit_hash = subprocess.check_output(\n ['git', 'rev-parse', 'HEAD'],\n universal_newlines=True, cwd=self._destination\n )\n return last_commit_hash.strip()",
"def get_repo_git_commit_hash(repo_path):\n import subprocess\n\n githash = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd = repo_path).strip()\n # Typecast to fix python3 TypeError (Object of type bytes is not JSON serializable)\n # subprocess.check_output() returns a byte string\n githash = str(githash)\n\n return githash",
"def get_commit_sha(self, project, branch):\n\n try:\n commit_sha = subprocess.check_output(\n [self.repo_bin, 'forall', project, '-c',\n f'git show-ref --hash $REPO_REMOTE/{branch}'],\n cwd=self.product_dir, stderr=subprocess.STDOUT\n ).decode()\n except subprocess.CalledProcessError as exc:\n print(f'The \"repo forall\" command failed: {exc.output}')\n sys.exit(1)\n\n return commit_sha.strip()",
"def get_commit(sha):\n commit = get_from_github(f'repos/streamlit/streamlit/commits/{sha}')\n return commit",
"def get_head_sha():\n from sh import git, ErrorReturnCode\n try:\n result = git('rev-parse', 'HEAD')\n except ErrorReturnCode as error:\n raise Ci3Error(\"Failed to get SHA1 of the local git HEAD: %s\" % error)\n return result.strip()",
"def test_repo_get_single_commit_by_sha(self):\n pass",
"def commit(self):\n return self._repo.get_commit_for_branch(self)",
"def pkg_commit_hash(pkg_path):\n\n # maybe we are in a repository, check for a .git folder\n p = os.path\n cur_path = None\n par_path = pkg_path\n while cur_path != par_path:\n cur_path = par_path\n if p.exists(p.join(cur_path, '.git')):\n try:\n proc = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=pkg_path)\n repo_commit, _ = proc.communicate()\n except OSError:\n repo_commit = None\n\n if repo_commit:\n return 'repository', repo_commit.strip().decode('ascii')\n else:\n return u'', u''\n par_path = p.dirname(par_path)\n \n return u'', u''",
"def revision_hash(self):\n return self._to_str(self.h5py_file.attrs[\"revision_hash\"])",
"def entry_sha1(entry):\n if entry.kind == 'symlink':\n return osutils.sha_string(entry.symlink_target)\n else:\n return entry.text_sha1",
"def commit_id(self):\n return self._get_build_details()[\"resolvedSourceVersion\"]",
"def get_commit_from_hash(self, hash):\n return self._get_commit_from_git_show_with_object(hash)",
"def hexdigest(self, msg):\n\t\tcommitment = SHA256.new()\n\t\tcommitment.update(msg.encode())\n\t\treturn commitment.hexdigest()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Author of the commit
|
def author(self):
return self._commit.author
|
[
"def get_author(self, commit_hash):\n\n command = []\n command.append(COMMAND_GIT)\n command.append(OPTION_SHOW)\n command.append(\"-s\")\n command.append(\"--format=%cE\")\n command.append(commit_hash)\n\n std_out, std_err = self._run(command)\n\n author_email = std_out.strip()\n\n return author_email",
"def extract_username(self,commit):\n if commit['author']:\n return commit['author']['login']\n return \"\"",
"def author_name(self) -> str:",
"def get_author(self):\n\t\treturn self._author",
"def get_author(self):\n return self.user.first_name +\" \"+ self.user.last_name",
"def book_author(self) -> str:\n return self._book_author",
"def get_author_name(obj):\n return obj.author.username",
"def author(self) -> SAuthor:\n return self._raw_author",
"def get_line_author_name(self, commit_hash, file_path, line):\r\n line = self._execute_command(git_blame_command_name.format(commit_id=commit_hash, file_path=file_path, line=line))\r\n return re.match(r\".* \\((?P<name>.*) \\d{4}-\\d{2}-\\d{2} .*\", line.strip()).group('name')",
"def get_author():\n\tif 'author' not in color_dict or 'name' not in color_dict['author']:\n\t\traise NameError('Author field not exists or empty')\n\n\tif 'email' not in color_dict['author'] or not color_dict['author']['email']:\n\t\treturn color_dict['author']['name']\n\n\treturn '{} <{}>'.format(color_dict['author']['name'], color_dict['author']['email'])",
"def author_info(self):\n return User.objects.get(pk=self.author)",
"def author_email():\n if \"GIT_AUTHOR_EMAil\" in os.environ:\n return os.environ[\"GIT_AUTHOR_EMAIL\"].strip()\n return git.config(\"--get\", \"user.email\").strip()",
"def get_author(self):\n return self.get_abstract_item(\"General\", \"Author\")",
"def contributor(self) -> Optional[str]:\n return self.author",
"def author_info(self):\n return User.objects.get(pk=self.user_id)",
"def get_commit_date(commit):\n return commit['commit']['author']['date']",
"def quote_author(self):\n return f'- {self._quote_author}'",
"def author(self) -> Dict[str, str]:\n\n # No display names in DMs\n if isinstance(self.dest, DMChannel):\n name = self.bot.user.name\n else:\n name = self.ctx.guild.me.display_name\n\n author = {\n 'name': f'{name} Help Manual',\n 'icon_url': self.avatar\n }\n return author",
"def get_line_author_email(self, commit_hash, file_path, line):\r\n line = self._execute_command(git_blame_command_email.format(commit_id=commit_hash, file_path=file_path, line=line))\r\n return re.match(r\".* \\(<(?P<email>.*@.*)>.*\", line.strip()).group('email')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Datetime representation of commit timestamp
|
def timestamp(self):
return datetime.fromtimestamp(self._commit.committed_date).replace(tzinfo=utc)
|
[
"def timestamp_to_str(commit_timestamp) -> str:\n date = datetime.fromtimestamp(float(str(commit_timestamp)[:-3]))\n d = date.strftime(\"%A, %B %d, %Y %H:%M\")\n return str(commit_timestamp) + \" (\" + d + \")\"",
"def get_commit_date(commit):\n return commit['commit']['author']['date']",
"def get_release_date() -> datetime.datetime:\n dt_str, _ = run_cmd([\"git\", \"log\", \"-1\", \"--format=%cI\", args.tag])\n return datetime.datetime.fromisoformat(dt_str.strip())",
"def timestamp(self) -> Decimal:\n return self.__dict__[\"timestamp\"]",
"def timestamp() -> str:\n return datetime.datetime.now().astimezone().replace(microsecond=0).isoformat()",
"def git_date(d):\n return datetime.datetime.strptime(d, '%Y-%m-%dT%H:%M:%SZ')",
"def timestamp_as_string(self):\n return (\n f\"{self.timestamp.year}-{self.timestamp.month}-\"\n f\"{self.timestamp.day}-{self.timestamp.hour}-\"\n f\"{self.timestamp.minute}-{self.timestamp.second}\"\n )",
"def last_updated(path):\n # %at is author time as unix timestamp\n git_cmd = \"git log --format=%at -- {}\".format(path).split()\n stdout = subprocess.run(git_cmd, capture_output=True).stdout\n ts = stdout.split(b\"\\n\")[0]\n dt = datetime.datetime.fromtimestamp(int(ts)).date().isoformat()\n return dt",
"def timestamp_for(self, path):\n if path.samefile(self.workdir):\n git_cmd = [\n 'git', 'log', '-1', '--format=format:%ct',\n ]\n else:\n git_cmd = [\n 'git', 'log', '-1', '--format=format:%ct', '--', path.strpath,\n ]\n\n # Get the timestamp\n return int(subprocess.check_output(\n git_cmd,\n stderr=subprocess.STDOUT,\n cwd=self.workdir.strpath,\n ))",
"def get_last_commit_time() -> str:\n return _get_last_commit_with_format('%cd')",
"def timestamp(self) -> datetime:\n return self.context['embryo'].get('timestamp')",
"def dbTimestampPythonNow():\n return dbTimestampToZuluDatetime(datetime.datetime.utcnow());",
"def databaseCloneTime(self):\n global databaseCloneTime\n import time\n Format = \"%Y-%m-%d\"\n ktime = time.mktime(time.strptime(databaseCloneTime, Format))\n return ktime",
"def get_git_changeset():\n repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True,\n cwd=repo_dir,\n universal_newlines=True)\n\n timestamp = git_log.communicate()[0]\n try:\n timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))\n except ValueError: # pragma: nocover\n return None # pragma: nocover\n return timestamp.strftime('%Y%m%d%H%M%S')",
"def current_timestamp():\n # return \"%d-%02d-%02dT%02d:%02d:%02dZ\" % utime.localtime()[:6]\n return utime.time()",
"def _get_timestamp(self):\n return datetime.datetime.now()",
"def getFixDate(self):\r\n sort_date_stamp = []\r\n for commit_id in self.getFixCommits():\r\n cmd = [\"git\",\"log\",\"--pretty=format:\\\"%ad\\\"\",\"-1\", commit_id]\r\n p = Popen(cmd, cwd=self.repo, stdout=PIPE)\r\n date, res = p.communicate()\r\n date = unicodedata.normalize(u'NFKD', date.decode(encoding=\"utf-8\", errors=\"ignore\"))\r\n dateStamp = time.mktime(time.strptime(date[:-7].strip('\"'),\"%a %b %d %H:%M:%S %Y\")) # trun the date into timestamp\r\n sort_date_stamp.append(dateStamp)\r\n sort_date_stamp.sort()\r\n return sort_date_stamp",
"def get_date_of_last_update(self):\n # We obtain this information by checking the last modification time of\n # the .git/FETCH_HEAD file. This is not bulletproof (see the comments\n # in http://stackoverflow.com/a/9229377), but I don't know of any\n # better way.\n return datetime.datetime.fromtimestamp(\n os.path.getmtime(os.path.join(self.path, '.git', 'FETCH_HEAD'))\n )",
"def iso_timestamp():\n return datetime.now().isoformat() + 'Z'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get name of master file on this commit, as defined by COMBINE manifest
|
def master_filename(self):
reader = ManifestReader()
for file_ in self.files:
if file_.name == MANIFEST_FILENAME:
reader.read(file_.data_stream)
return reader.master_filename
|
[
"def cctFileName(self):\n p = os.path.basename(self.cctFilePath())\n return p",
"def name(self):\n # `git rev-parse --show-toplevel` prints the path to the top-level\n # directory of the repository.\n return os.path.basename(\n self.run_git_cmd(['rev-parse', '--show-toplevel']).strip()\n )",
"def filename(self):\n return f\"{self.sha}{self.extension}\"",
"def git_name(self):\n\n if self._git_name is False:\n path = os.path.expanduser('~/.gitconfig')\n if os.path.isfile(path):\n with open(path, 'r') as open_f:\n contents = open_f.read()\n match = re.search(r'name = (.+)$', contents, re.MULTILINE)\n if match:\n self._git_name = match.group(1).strip()\n else:\n self._git_name = None\n else:\n self._git_name = None\n return self._git_name",
"def cctFileName(self):\n return os.path.basename(self.cctFilePath())",
"def extract_master_commit(commits):\n master_cid = False\n for k, c in commits.items():\n if 'is_master_commit' in c and c['is_master_commit']:\n master_cid = k\n break\n assert(master_cid)\n return master_cid",
"def get_current_branch_name() -> str:\n return exec_cmd(\"git rev-parse --abbrev-ref HEAD\").strip()",
"def current_name():\n return os.path.basename(os.getcwd())",
"def workflow_filename():\n stacks = inspect.stack()\n frame = inspect.stack()[len(stacks) - 1]\n full_path = frame[0].f_code.co_filename\n filename, _ = os.path.splitext(os.path.basename(full_path))\n filename = argo_safe_name(filename)\n return filename",
"def get_token_name(self):\n base = os.path.basename(self.file_path)\n return os.path.splitext(base)[0]",
"def base_branch_name(self) -> str:\n return self.data['base']['ref']",
"def head_branch_name(self) -> str:\n return self.data['head']['ref']",
"def get_commit_message(self):\n fullName = self.get_object_string(self.request_data, \"fullName\")\n name = self.get_object_string(self.request_data, \"name\")\n\n commitmessage = fullName.replace(name + \": \", \"\")\n\n self.logger.debug('get_commit_message: %s' % commitmessage)\n return commitmessage",
"def file_name(self):\n ret = self._get_attr(\"fileName\")\n return ret",
"def get_brief_file_name(connect):\n logging.warn(\"try to get brief file from server: %s\" % connect.ip)\n get_slave_brief_file_name_result = connect.execute_cmd(r\"ls -t result/*_brief.txt | head -1\")\n tmp = get_slave_brief_file_name_result.split('\\r\\n')[0]\n\n return tmp.split('/')[1]",
"def obb_file_name(self) -> str:\n return pulumi.get(self, \"obb_file_name\")",
"def file_name(self) -> str:\n return self._occurrence_data.get('fileName') # type: ignore",
"def getBaseName(self):\n a = self._attr.split(\".\")\n b = a[1].split(\"[\")\n return b[0]",
"def get_filename(self):\r\n self.filename = self.history_lines[0].split('=')[1].rstrip()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add a git note to this commit
|
def add_note(self, note):
cmd = self._repo._repo.git
cmd.notes('--ref', self.NOTE_REF, 'add', '-f', '-m', note, self.sha)
|
[
"def write_note(data, commit='HEAD'):\n command = 'git notes --ref=%s add ' % NOTES_REF\n for k, v in data.iteritems():\n command = '%s -m \"%s: %s\"' % (command, k, v)\n run_command(command)",
"def add_log_note(self, note):",
"def add_note(self):\n note_id = __notes__.new_note()\n self.set_note_id(note_id)",
"def add_note(self,note):\n q=\"insert into note(msg) values('%s')\"%(note.get_msg())\n try:\n NoteDB.cursor.execute(q)\n NoteDB.db.commit()\n except Exception as e:\n print(e)\n NoteDB.db.rollback()\n raise",
"def add_note(self, token, task_id, note_content):\n params = {\n 'token': token,\n 'item_id': task_id,\n 'content': note_content\n }\n return self._post('addNote', params)",
"def make_ticket_comment(self, repos, changeset):\n revstring = str(changeset.rev)\n if repos.reponame:\n revstring += '/' + repos.reponame\n return \"\"\"\\\nIn [%s]:\n{{{\n#!CommitTicketReference repository=\"%s\" revision=\"%s\"\n%s\n}}}\"\"\" % (revstring, repos.reponame, changeset.rev, changeset.message.strip())",
"def addLog(self, comment, path='.'):\n if not path.endswith('/'): path += '/'\n path = getAddonPath(path) + 'docs/CHANGES.rst'\n if not fileExists(path): addFile(path)\n insertAfterNthLine(path, '- ' + comment + '. [' + os.getenv('USER') + ']\\n', 6)\n os.system('git commit -am \"' + comment + '\"')",
"def _add_note_entry(self):\n note = self.faker.sentence()\n instance = models.Note.objects.create(child=self.child, note=note)\n instance.save()\n self._add_tags(instance)",
"def note(self, msg, raw=False):\n self._msg(('' if raw else 'NOTE: ') + str(msg), self.NOTE, raw)",
"def add_note(self, note):\n self.note = note\n self.set_box_height_width(self.note)",
"def record_note(self, tag, note):\n if self.record_file:\n rec = [NOTE, tag, note]\n f = open(self.record_file, 'a')\n labeled_dump('note', rec, f, 1)\n f.close()",
"def add_note(submission_id):\n query = {'_id': ObjectId(submission_id)}\n note = request.form.get('note')\n submission = db.submissions.find_one(query)\n db.submissions.update_one(query, {'$push': {'notes' : note }})\n return redirect('/admin/manage-submissions/view/' + submission_id)",
"def do_notes(self, arg):\n fn = os.path.normpath(self.filename)\n with open('test_notes.txt', 'a') as notes:\n notes.write('{}: {}\\n'.format(fn, arg))",
"def add_commit(self, commit, date):\n\n self.commits.append([commit, date])",
"def add_note_to_dataset(self, text_to_add):\n try:\n note_id = __datasets__.current.get_note_id()\n except AttributeError:\n # The dataset may be already deleted?\n return False\n if note_id:\n __notes__.add_auto_text_to_note(note_id, text_to_add)\n else:\n # There was no note yet. Create it and add the text.\n note_id = __notes__.new_note()\n __datasets__.current.set_note_id(note_id)\n __notes__.add_auto_text_to_note(note_id, text_to_add)",
"def newNote(self):\n self.note_ref = str(\"note_%d\" % StickyNotes.note_id)\n StickyNotes().show()\n StickyNotes.note_id += 1",
"def add_comment(session, text, note, user):\n\n check_permission(session, PermissionType.COMMENT, user, note)\n\n comment = Comment(body=text, note_id=note.id, owner_id=user.id)\n\n session.add(comment)",
"def add_comment(self, message):\n self.repo.gh.post(self.path(\"comments\"),\n {\"body\": message})",
"def _AmendCommitMessage(self, git_repo):\n git.RunGit(git_repo, ['commit', '--amend', '-m', self.commit_message])\n self.sha1 = ParseSHA1(self._PullData('HEAD', git_repo)[0], error_ok=False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the git note of this commit
|
def get_note(self):
cmd = self._repo._repo.git
try:
return cmd.notes('--ref', self.NOTE_REF, 'show', self.sha)
except GitCommandError:
return None
|
[
"def read_note(commit='HEAD'):\n try:\n output, _ = run_command('git notes --ref=%s show' % NOTES_REF,\n trap_stdout=True, trap_stderr=True,\n output_on_error=False)\n except CalledProcessError:\n return {}\n\n data = {}\n for line in output.split('\\n'):\n if line:\n k, v = line.split(': ', 1)\n data[k] = v\n return data",
"def commit(self) -> Optional[str]:\n return pulumi.get(self, \"commit\")",
"def get_commit_message(self):\n fullName = self.get_object_string(self.request_data, \"fullName\")\n name = self.get_object_string(self.request_data, \"name\")\n\n commitmessage = fullName.replace(name + \": \", \"\")\n\n self.logger.debug('get_commit_message: %s' % commitmessage)\n return commitmessage",
"def get_git_changeset():\n repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True,\n cwd=repo_dir,\n universal_newlines=True)\n\n timestamp = git_log.communicate()[0]\n try:\n timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))\n except ValueError: # pragma: nocover\n return None # pragma: nocover\n return timestamp.strftime('%Y%m%d%H%M%S')",
"def git_last_non_student_commit() -> str:\n ...",
"def commit(self):\n return self._repo.get_commit_for_branch(self)",
"def git_tag() -> str:\n p = subprocess.run(\"git log --format='%h' -n 1\".split(' '), capture_output=True)\n p.check_returncode()\n return p.stdout.decode('utf-8').strip().strip(\"'\")",
"def get_git_changeset():\n repo_dir = os.path.dirname(\n os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))\n )\n )\n git_log = subprocess.run(\n ['git', 'log', '--pretty=format:%ct', '--quiet', '-1', 'HEAD'],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=repo_dir,\n )\n timestamp = git_log.stdout\n try:\n timestamp = datetime.utcfromtimestamp(int(timestamp))\n except ValueError:\n return None\n return timestamp.strftime('%Y%m%d%H%M%S')",
"def GetLastCommitMessage(self):\n exit_code, output, _ = self.RunCommand('git log -1')\n if exit_code != 0:\n return None\n\n # Expecting 6 lines of output where the 5th line contains\n # the commit message.\n output_lines = output.split('\\n')\n if len(output_lines) != 6:\n return None\n\n return output_lines[4].strip()",
"def get_note():\n\tif 'note' not in color_dict or not color_dict['note']:\n\t\treturn 'This colorscheme file is generated by colorvim command'\n\treturn color_dict['note']",
"def get_raw_commit_message(self, revisions):\n return self._execute(\n [self.git, 'log', '--reverse', '--pretty=format:%s%n%n%b',\n '^%s' % revisions['base'], revisions['tip']],\n ignore_errors=True).strip()",
"def get_git_desc():\n\n gitdir = get_git_repo_path()\n assert os.path.isdir(gitdir)\n\n return git.Repo(gitdir).git.describe('--always', '--dirty', '--abbrev')",
"def get_commit_hash():\n return git.Repo().head.object.hexsha",
"def write_note(data, commit='HEAD'):\n command = 'git notes --ref=%s add ' % NOTES_REF\n for k, v in data.iteritems():\n command = '%s -m \"%s: %s\"' % (command, k, v)\n run_command(command)",
"def last_modified_commit(*paths, **kwargs):\n return check_output(\n [\"git\", \"log\", \"-n\", \"1\", \"--pretty=format:%h\", \"--\", *paths], **kwargs\n ).decode(\"utf-8\")",
"def get_commit_date(commit):\n return commit['commit']['author']['date']",
"def make_ticket_comment(self, repos, changeset):\n revstring = str(changeset.rev)\n if repos.reponame:\n revstring += '/' + repos.reponame\n return \"\"\"\\\nIn [%s]:\n{{{\n#!CommitTicketReference repository=\"%s\" revision=\"%s\"\n%s\n}}}\"\"\" % (revstring, repos.reponame, changeset.rev, changeset.message.strip())",
"def get_commit(self, seq_no):\n\n return self.commits[seq_no]",
"def get_git_commit_sha():\n\n return os.getenv(\"GIT_COMMIT\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add an ephemeral file as a git note.
|
def add_ephemeral_file(self, name, content=None, path=None):
if name in self.filenames:
raise ValueError("File name '{}' has already been used".format(name))
cmd = self._repo._repo.git
with TemporaryDirectory() as tmpdir:
if path is None:
# Write content to a temporary file
assert content is not None
path = os.path.join(tmpdir, name)
with open(path, 'wb') as f:
f.write(content)
# Store file in the git object DB
obj_id = cmd.hash_object('-w', path)
# Add this file to the list of ephemeral files for this commit
cmd.notes('--ref', self.FILE_LIST_REF, 'append', '-m', name, self.sha)
# Add the file as a note
cmd.notes('--ref', self.FILE_REF_BASE + name, 'add', '-f', '-C', obj_id, self.sha)
# Clear cached properties so they get recalculated on next access
del self.ephemeral_file_names
del self.filenames
|
[
"def add_note(self, note):\n cmd = self._repo._repo.git\n cmd.notes('--ref', self.NOTE_REF, 'add', '-f', '-m', note, self.sha)",
"def write_note(data, commit='HEAD'):\n command = 'git notes --ref=%s add ' % NOTES_REF\n for k, v in data.iteritems():\n command = '%s -m \"%s: %s\"' % (command, k, v)\n run_command(command)",
"def add_note(self):\n note_id = __notes__.new_note()\n self.set_note_id(note_id)",
"def open(ctx, note):\n directory = ctx.obj[\"config\"][\"owner\"][\"dir\"]\n note = Note(directory, note)\n click.edit(filename=note.path)",
"def add_log_note(self, note):",
"def test_add_file():\n author = repo.get_author_info()\n fastimport.start_commit('master', author, \"a commit\")\n fastimport.deleteall()\n testfile = os.path.join(repo.path, '.git', 'description')\n fastimport.add_file('./testfile',\n file(testfile),\n os.path.getsize(testfile))",
"def add_note(self, token, task_id, note_content):\n params = {\n 'token': token,\n 'item_id': task_id,\n 'content': note_content\n }\n return self._post('addNote', params)",
"def do_notes(self, arg):\n fn = os.path.normpath(self.filename)\n with open('test_notes.txt', 'a') as notes:\n notes.write('{}: {}\\n'.format(fn, arg))",
"def _add_note_entry(self):\n note = self.faker.sentence()\n instance = models.Note.objects.create(child=self.child, note=note)\n instance.save()\n self._add_tags(instance)",
"def git_add(new_files_PATH):\n repo.git.add(new_files_PATH)",
"def record_note(self, tag, note):\n if self.record_file:\n rec = [NOTE, tag, note]\n f = open(self.record_file, 'a')\n labeled_dump('note', rec, f, 1)\n f.close()",
"def create_note(store, path, notebook):\n ext = utils.get_file_ext(path)\n processor_cls = note_processors.get(ext)\n processor = processor_cls(path)\n note = Note()\n note.title = processor.get_title()\n note.content = processor.get_content()\n attributes = NoteAttributes()\n attributes.sourceURL = utils.path_to_source_url(notebook, path)\n note.attributes = attributes\n note.notebookGuid = notebook.guid\n try:\n return store.createNote(dev_token, note)\n except EDAMUserException as e:\n evernote_api_error(e, note)",
"def git_add(filename):\n subprocess.call(['git', 'add', filename])",
"def note(text):\n date = datetime.datetime.now()\n file_name = 'notes' + '/' + str(date).replace(':', '_') + '_note.txt'\n with open(file_name, 'w') as f:\n f.write(text)\n\n subprocess.Popen(['notepad.exe', file_name])",
"def _add_file(file_path):\n _db_content[\"files\"].append(file_path)",
"def create_file(self, path, file):\n\t\ttry:\n\t\t\twith self.PUSH:\n\t\t\t\tself.repo.create_file(path, f\"Created {path}\", file)\n\t\t\t\tprint(f\"{self.get_emoji('ok')}Created {path}\")\n\t\texcept:\n\t\t\tprint(f\"There was an error to this file: {path}\")",
"def newNote(self):\n self.note_ref = str(\"note_%d\" % StickyNotes.note_id)\n StickyNotes().show()\n StickyNotes.note_id += 1",
"def generate_note(stem: str) -> str:\n note = f\"\"\"\n.. note::\n An *xml* file containing the defaults for the `{stem}` calculator can be created via `-p {stem} -o FILENAME` command line options `\n\"\"\"\n return note",
"def test_add_file(self):\n filename = \"quux\"\n file_path = os.path.join(self.repo, filename)\n with salt.utils.files.fopen(file_path, \"w\") as fp_:\n fp_.write(\n salt.utils.stringutils.to_str(\n \"This is a test file named {}.\\n\".format(filename)\n )\n )\n ret = self.run_function(\"git.add\", [self.repo, filename])\n self.assertEqual(ret, \"add '{}'\".format(filename))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the contents of an ephemeral file as a Blob object.
|
def get_ephemeral_file(self, name):
cmd = self._repo._repo.git
try:
blob_hexsha = cmd.notes('--ref', self.FILE_REF_BASE + name, 'list', self.sha)
binsha = binascii.a2b_hex(blob_hexsha)
return Blob(self._repo._repo, binsha, path=name)
except GitCommandError:
return None
|
[
"def _get_blob(self):\n return self.bucket.get_blob(self.flowerpot_path)",
"def get_blob(self):\n return Blob.Blob(self._internal.get_blob())",
"def fetch_blob(server, uuid, instance, reference, as_json=False, *, session=None):\n r = session.get(f'{server}/api/node/{uuid}/{instance}/blobstore/{reference}')\n r.raise_for_status()\n if as_json:\n return r.json()\n return r.content",
"def getBlobStream(self) -> ghidra.app.util.bin.format.pe.cli.streams.CliStreamBlob:\n ...",
"def strict_getblob(gs_path, credentials=None, user_project=None):\n blob = getblob(gs_path, credentials, user_project)\n if not blob.exists():\n raise FileNotFoundError(\"No such blob: \"+gs_path)\n return blob",
"def read_file(file_name : str) -> bytes:\n\n with open(file_name, mode='rb') as f:\n file_content = f.read()\n\n return file_content",
"def get_file(self):\n return files.StorageFile(self.storage, self.get_path())",
"def get_data(self, bucket_name, file_name):\n\n bucket = self.client.get_bucket(bucket_name)\n blob = bucket.get_blob(file_name)\n\n return json.loads(blob.download_as_string())",
"def read(self, file, type='pickle'):\n blob_client = self.blobservice.get_blob_client(container=self.container_name, blob=file)\n dl = blob_client.download_blob()\n obj = BytesIO(dl.content_as_bytes())\n if type == 'parquet':\n val = pickle.load(obj)\n else:\n val = pd.read_parquet(obj)\n return val",
"def file_object(self) -> BufferedReader:\n return self.reader.file_object",
"def get_file_content(filename):\n\n file = open(filename, mode='r')\n file_data = file.read()\n file.close()\n return file_data",
"def azure_blob_storage_data_source(self) -> 'outputs.AzureBlobStorageDataResponse':\n return pulumi.get(self, \"azure_blob_storage_data_source\")",
"def get_blob_as_string(bucket_name, blob_name):\n global storage_client\n global context_key\n\n if storage_client is None:\n storage_client = storage.Client()\n\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.get_blob(blob_name)\n return blob.download_as_string()",
"def test_get_blob(self):\n pass",
"def get_the_content_of_a_file(file_path):\r\n return open(file_path, encoding=\"utf8\").read()",
"def get_object_content(self, key):\n with self.open(key) as fhandle:\n return fhandle.read()",
"def get_file_content(token):\r\n filters = ((\"token\", token),)\r\n file_record = None\r\n with DB.session.begin():\r\n file_record = get_files_by_filter((filters))\r\n\r\n if not file_record:\r\n raise exceptions.DataValidationException(\r\n \"No record exists with token '%s'\" % token)\r\n file_record = file_record[0]\r\n\r\n try:\r\n with codecs.open(\r\n file_record.file_path, \"r\", encoding=\"utf-8\") as file_handle:\r\n return file_handle.read()\r\n except OSError as oe:\r\n LOG.error(\"Error occurred for updating content\", exc_info=True)\r\n raise oe\r\n except IOError:\r\n # File entry exists, but file not created yet\r\n LOG.warning(\"File '%s' not created yet\")\r\n return \"\"",
"def files(self):\n return chain(self._commit.tree.blobs, self.ephemeral_files)",
"def content_file(self):\n return self._content_file"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the names of any ephemeral files associated with this commit.
|
def list_ephemeral_files(self):
cmd = self._repo._repo.git
try:
names_note = cmd.notes('--ref', self.FILE_LIST_REF, 'show', self.sha)
return {n for n in names_note.split('\n') if n}
except GitCommandError:
return set()
|
[
"def filenames(self):\n return {blob.name for blob in self._commit.tree.blobs} | self.ephemeral_file_names",
"def ephemeral_files(self):\n for name in self.ephemeral_file_names:\n yield self.get_ephemeral_file(name)",
"def files(self):\n return chain(self._commit.tree.blobs, self.ephemeral_files)",
"def orphaned_files(self):\n return self._orphaned_files",
"def get_artifacts(self):\n artifact_files = []\n for frame in self.file_frames:\n artifact_files.append(frame.filename)\n return artifact_files",
"def _get_list_of_committed_files():\n files = []\n # pylint: disable=E1103\n diff_index_cmd = 'git diff-index --cached %s' % _current_commit()\n output = subprocess.check_output(\n diff_index_cmd.split()\n )\n for result in output.split('\\n'):\n if result != '':\n result = result.split()\n if result[4] in ['A', 'M']:\n files.append(result[5])\n\n return files",
"def observed_filenames(self):\n return self._attribute('observed_filename', [])",
"def get_tracked_files(self) -> List[str]:\n return list(self.__tracked_files)",
"def files(self):\n files = File.objects.filter(hash=self)\n return files",
"def files_in_commit(commit: Optional[str] = None) -> Iterable[str]:\n cmd = ['git', 'diff', '--name-status']\n if commit is not None:\n cmd.append(f'{commit}..{commit}~1')\n\n output = subprocess.check_output(cmd)\n\n for line in output.decode('utf-8').split('\\n'):\n if line == '':\n break\n\n path = line.split('\\t')[-1]\n\n if not path.endswith('.py'):\n continue\n\n abs_path = os.path.abspath(path)\n\n yield abs_path",
"def get_files(self):\n files = []\n files.extend(self.child_files)\n if not self.special:\n for child in self.child_packages:\n files.extend(child.get_files())\n return files",
"def get_state_filenames(self) -> List[str]:\n return []",
"def f_files(self):\n return self._f_files",
"def files(self):\n return self._file_manager",
"def _get_file_list(self):\n files = [f for f in os.listdir(FILESTORE_PATH) if os.path.isfile(os.path.join(FILESTORE_PATH, f))]\n return files",
"def keys(self, *, ext : str = None ) -> list:\n if self._path is None:\n return []\n ext = ext if not ext is None else self._ext\n ext_l = len(ext)\n keys = []\n with os.scandir(self._path) as it:\n for entry in it:\n if not entry.is_file():\n continue\n if ext_l > 0:\n if len(entry.name) <= ext_l or entry.name[-ext_l:] != ext:\n continue\n keys.append( entry.name[:-ext_l] )\n else:\n keys.append( entry.name )\n return keys",
"def _get_unsaved_files(self):\r\n unsaved = []\r\n files = self.filesystem.get_files()\r\n for f in files:\r\n editable = self.__neditables.get(files[f])\r\n if editable is not None:\r\n if editable.editor.is_modified:\r\n unsaved.append(f)\r\n return unsaved",
"def retrieve_untracked_files(self):\n return self.c.retrieve_untracked_files()",
"def get_modified_files(commit, file_extension):\n current_modified_files = [mod_file for mod_file in commit.modifications\n if mod_file.filename.endswith(file_extension)]\n return current_modified_files"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
An iterable of `git.Blob` objects representing ephemeral files.
|
def ephemeral_files(self):
for name in self.ephemeral_file_names:
yield self.get_ephemeral_file(name)
|
[
"def files(self):\n return chain(self._commit.tree.blobs, self.ephemeral_files)",
"def list_ephemeral_files(self):\n cmd = self._repo._repo.git\n try:\n names_note = cmd.notes('--ref', self.FILE_LIST_REF, 'show', self.sha)\n return {n for n in names_note.split('\\n') if n}\n except GitCommandError:\n return set()",
"def get_blobs(self, container=None):\n generator = self.blob_srv.list_blobs(container)\n blobs = [blob for blob in generator]\n return blobs",
"def filenames(self):\n return {blob.name for blob in self._commit.tree.blobs} | self.ephemeral_file_names",
"def get_ephemeral_file(self, name):\n cmd = self._repo._repo.git\n try:\n blob_hexsha = cmd.notes('--ref', self.FILE_REF_BASE + name, 'list', self.sha)\n binsha = binascii.a2b_hex(blob_hexsha)\n return Blob(self._repo._repo, binsha, path=name)\n except GitCommandError:\n return None",
"def blobs(self, tag, ignore_missing=True):\n for path, tags, blobs in self.walk(tag, ignore_missing=ignore_missing):\n if tags != blobs:\n for replicas in blobs:\n yield replicas",
"def persistent_blobs(self):\n value = super(DeferredBlobMixin, self).blobs\n if self._deferred_blobs:\n value = value.copy()\n for name in self._deferred_blobs:\n value.pop(name, None)\n return value",
"def generate_blobs(blobs):\n if len(blobs) == 0:\n blobs = []\n\n for _ in range(MAX_BLOB_AMOUNT - len(blobs)):\n x = random.randint(0, 1400)\n y = random.randint(0, 800)\n blobs.append(Blob(x, y))\n\n return blobs",
"def blobs(self):\n if not self._migrating_blobs_from_couch or not self._attachments:\n return self.external_blobs\n value = {name: BlobMetaRef._from_attachment(info)\n for name, info in self._attachments.items()}\n value.update(self.external_blobs)\n return value",
"def files(self):\n files = File.objects.filter(hash=self)\n return files",
"def list_blobs_object_path(self):\n return self.list_blobs(self.object_path)",
"def _path_component_blobs(self):\n # This shall be implemented by subclasses, but shouldn't halt execution if called.\n return []",
"def list(self):\n for x in self.container.list_blobs():\n sz = filesize(x.size)\n print(x.name + '\\t' + str(sz[0]) + ' ' + sz[1])",
"def list_blobs(account_name: str, account_key: str, container_name: str) -> List[BlobProperties]:\n\n account_url = make_account_url(account_name)\n client: ContainerClient = ContainerClient(account_url, container_name, credential=account_key)\n blobs = client.list_blobs()\n return [b for b in blobs]",
"def list_blobs(bucket_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n\n blobs = bucket.list_blobs()\n\n # for blob in blobs:\n # print(blob.name)\n return blobs",
"def list_blobs(bucket_name, max_blobs):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n\n blobs = bucket.list_blobs()\n\n json_blobs = []\n for b in blobs:\n if b.name.endswith('.json'):\n json_blobs.append(b)\n\n recent_blobs = sorted(json_blobs, key=lambda blob: blob.updated, reverse=True)\n d = collections.OrderedDict()\n num_blobs = 0\n for b in recent_blobs:\n formatted_date = b.updated.strftime('%Y-%m-%d %H:%M:%S')\n d[formatted_date] = b\n num_blobs += 1\n if num_blobs == max_blobs:\n break\n return d",
"def chunks(self):\n for name in self.chunk_names():\n yield self.storage.open(name).read()",
"def orphaned_files(self):\n return self._orphaned_files",
"def test_document_get_blobs(server):\n\n number = 4\n with Doc(server, blobs=number) as doc:\n for idx in range(number):\n xpath = f\"files:files/{idx}/file\"\n blob = doc.fetch_blob(xpath, ssl_verify=SSL_VERIFY)\n assert blob == f\"foo {idx}\".encode(\"utf-8\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Iterable over immediate parents of this commit.
|
def parents(self):
return (Commit(self._repo, parent) for parent in self._commit.parents)
|
[
"def ancestors(self):\n return (\n Commit(self._repo, parent)\n for parent in self._commit.iter_parents()\n )",
"def get_parents(self):\n return []",
"def parents(self, rev):\n self._scanparents(rev)\n return [r for _c, r in sorted(self._parents.get(rev, []))]",
"def parents(self, host):\n return list(self.iter_parents(host))",
"def iter_parents(node):\n parent = node.parent\n\n while parent:\n yield parent\n\n parent = parent.parent",
"def walk_parents(self):\n active = self.parent_datasets[:]\n while active:\n d = active.pop()\n yield d\n active += d.parent_datasets",
"def parents(self):\n # Sort here for determinism\n # return sorted(self._parents.values(), key=lambda edge: str(edge))\n return list(self._parents.values())",
"def get_parents(self, changeset_hash):\n raise NotImplementedError(\"Abstract method\")",
"def get_parents(self, person):\n for f in self.families:\n if person in f.children:\n for p in f.parents:\n yield p",
"def ancestors(self):\n stack = deque([self])\n parent = self.parent\n while parent:\n stack.appendleft(parent)\n parent = parent.parent\n return list(stack)",
"def parentsset(self, rev):\n return smartset.baseset(self.parents(rev))",
"def get_parents(self):\n _p = {}\n for s, children in self.store.items():\n for (_, child) in children:\n assert not child in _p # Each core has only one parent\n _p[child] = s\n return _p",
"def parent_names(self) -> List[str]:\n return [t.name for t in self.parents]",
"def parents(self, nodename):\n parents = set(self.node_dict[nodename].parents.values())\n return parents",
"def iter_parents(content: IResource) -> typing.Iterator[IResource]:\n content = getattr(content, '__parent__', None)\n while content is not None:\n yield content\n content = getattr(content, '__parent__', None)",
"def parents(self, term):\n for parent_term in term.is_a:\n yield self[parent_term]\n for grand_parent in self.parents(self[parent_term]):\n yield grand_parent",
"def _read_parents(self):\n return set()",
"def ancestors(self):\r\n ancestor_list = [self,]\r\n if self.superordinate is not None:\r\n ancestor_list.extend(self.superordinate.ancestors())\r\n return ancestor_list",
"def parents(self, j):\n return [i for i in range(self._num_vertices) if self.has_edge(i, j)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Iterable over all ancestors of this commit.
|
def ancestors(self):
return (
Commit(self._repo, parent)
for parent in self._commit.iter_parents()
)
|
[
"def ancestors(self):\n stack = deque([self])\n parent = self.parent\n while parent:\n stack.appendleft(parent)\n parent = parent.parent\n return list(stack)",
"def ancestors(self):\r\n ancestor_list = [self,]\r\n if self.superordinate is not None:\r\n ancestor_list.extend(self.superordinate.ancestors())\r\n return ancestor_list",
"def get_ancestors(self) -> List['Node']:\n if self.parent is None:\n return []\n ancestors = [self.parent]\n ancestors += self.parent.get_ancestors()\n return ancestors",
"def get_all_ancestors(node):\n return node.iterancestors()",
"def get_all_ancestors(self):\n sql = '''SELECT * FROM idea JOIN (\n SELECT source_id FROM (\n SELECT transitive t_in (1) t_out (2) t_distinct T_NO_CYCLES\n source_id, target_id FROM idea_idea_link WHERE is_tombstone=0) ia\n JOIN idea AS dag_idea ON (ia.source_id = dag_idea.id)\n WHERE dag_idea.discussion_id = :discussion_id\n AND ia.target_id=:idea_id) x on (id=source_id)'''\n ancestors = self.db().query(Idea).from_statement(text(sql).bindparams(\n discussion_id=self.discussion_id, idea_id=self.id))\n\n return ancestors.all()",
"def get_ancestors(self):\n ancestors = list(self.get_parents())\n ancestor_unique_attributes = set([(a.__class__, a.id) for a in ancestors])\n ancestors_with_parents = [\n a for a in ancestors if isinstance(a, DescendantMixin)\n ]\n for ancestor in ancestors_with_parents:\n for parent in ancestor.get_ancestors():\n if (parent.__class__, parent.id) not in ancestor_unique_attributes:\n ancestors.append(parent)\n return ancestors",
"def ancestors(self, **kwargs):\r\n lev = kwargs.get('level', 2)\r\n ancestors = [b.ancestor(level=lev) for b in self.branches(**kwargs)]\r\n return set([a for a in ancestors if a is not None])",
"def parents(self):\n return (Commit(self._repo, parent) for parent in self._commit.parents)",
"def ancestors(self) -> QuerySet['TreeModel']:\n queryset = self.__class__.objects.filter(path__descendant=self.path)\n return queryset.exclude(id=self.id)",
"def list_ancestors(self):\n return self._list(self.client, ancestors_of_group=self.name)",
"def ancestors(self, revs, stoprev=0, inclusive=False):\n\n return ancestor.lazyancestors(self, revs, stoprev=stoprev,\n inclusive=inclusive)",
"def all_ancestors_of_node(self, node_index):\n\n ancestors = set()\n\n ancestor_pool = self.nodes[node_index].get_parents()\n while len(ancestor_pool) > 0:\n p = ancestor_pool.pop()\n ancestors.add(p)\n indices = self.nodes[p].get_parents()\n if len(indices) > 0:\n for j in indices:\n ancestor_pool.add(j)\n \n return ancestors",
"def ancestors_tree(self):\n tree = {}\n for f in self.parents():\n tree[f] = f.ancestors_tree()\n return tree",
"def get_ancestors(self, **kw):\n return type(self).objects.get_ancestors(self, **kw)",
"def ancestors_edges_set(self, cached_results=None):\n if cached_results is None:\n cached_results = dict()\n if self in cached_results.keys():\n return cached_results[self]\n else:\n res = set()\n for f in self.parents():\n res.add((f, self))\n res.update(f.ancestors_edges_set(cached_results=cached_results))\n cached_results[self] = res\n return res",
"def parents(self, rev):\n self._scanparents(rev)\n return [r for _c, r in sorted(self._parents.get(rev, []))]",
"def get_node_ancestors(synset):\n ancestors = set()\n # In the following line, synset.parents already is a set but we create a copy\n # of it instead of using synset.parents directly as later we are 'popping'\n # elements from this set, which would otherwise result to permanently removing\n # parents of synset which is undesirable.\n to_visit = set(synset.parents)\n visited = set()\n while to_visit:\n ancestor = to_visit.pop()\n ancestors.add(ancestor)\n visited.add(ancestor)\n # Same as in the comment above, we create a copy of ancestor.parents\n to_visit = to_visit | set(ancestor.parents) - visited\n return ancestors",
"def get_parents(self, changeset_hash):\n raise NotImplementedError(\"Abstract method\")",
"def g_ancestors(self):\r\n ancest = map(lambda a: \":{0}\".format(a), self.ancestors)\r\n return \", \".join(ancest)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Grab the JARM fingerprint from the local 'possible_jarms' store. This should be ran after setup.sh to output the possible configurations.
|
def get_jarm_from_local(tls_version, cipher):
try:
with open(config.paths['possible_jarms']) as _file:
jarms = json.load(_file)
for j in jarms:
if any(c['tls_version'] == tls_version and c['cipher'] == cipher for c in jarms[j]['configs']):
return j
except:
return ''
|
[
"def jar(self):\n\n\t\treturn self.tool_config.get('jar', default = 'minecraft_server.jar')",
"def get_jamf_information():\n r = requests.get('{}/JSSResource/computers/serialnumber/{}'.format(API_URL, get_serial_number()),\n auth=(API_USER, API_PASS))\n xml_response = untangle.parse(r.text.encode('utf-8'))\n\n try:\n jamf_id = xml_response.computer.general.id.cdata\n jamf_site_name = xml_response.computer.general.site.name.cdata\n return (jamf_id, jamf_site_name)\n except AttributeError:\n print('Error getting JAMF id and/or JAMF site name. No such attribute in response')\n return -1",
"def get_fingerprints():\n\n ssh_key_locations = [\"ssh_host_dsa_key\",\n \"ssh_host_key\",\n \"ssh_host_rsa_key\",\n \"ssh_host_ecdsa_key\"]\n\n retrieved_keys = []\n\n with settings(warn_only=True), hide(\"stdout\", \"warnings\"):\n for key in ssh_key_locations:\n full_key = \"/etc/ssh/\" + key + \".pub\"\n found_key = run(\"ssh-keygen -l -f \" + full_key)\n if found_key is not None:\n if re.match(\"[0-9]\", found_key[0]) is not None:\n retrieved_keys.append(found_key)\n\n print \"\\nFingerprints for {}\".format(env.host)\n for key in retrieved_keys:\n print key\n print \"\\n\"",
"def get_fingerprint():\n print(\"Waiting for image...\")\n while finger.get_image() != adafruit_fingerprint.OK:\n pass\n print(\"Templating...\")\n if finger.image_2_tz(1) != adafruit_fingerprint.OK:\n return False\n print(\"Searching...\")\n if finger.finger_fast_search() != adafruit_fingerprint.OK:\n return False\n return True",
"def import_rasperrypi():\n import raspberrypi as hardware\n available_pins = [7, 8]\n return hardware, available_pins",
"def main(): \n print('--------------------------------------------')\n print('---------Fingerprinting with Python---------')\n args = parsing()\n print('Parameters:')\n print(f' Modality 1: {args.modality1} / Modality 2: {args.modality2}')\n print(f' Path to matrices of modality 1: {args.path_m1}')\n print(f' Path to matrices of modality 2: {args.path_m2}')\n print(f' Path to output: {args.output}')\n print(f' Type of fingerprinting: {args.type}')\n print(' ')\n print(f' Parcellation used: {args.parcellation}')\n print(f' Network used: {args.network}')\n print(f' Correlation used: {args.corr}')\n print(f' ')\n print(f' QC List modality 1: {args.qc_list_m1}')\n print(f' QC List modality 2: {args.qc_list_m2}')\n print(f' Subset: {args.reduction}')\n print(f' Name of subset: {args.extra}')\n print('--------------------------------------------')\n print(' ')\n\n verify_input(args)\n print(f'Initializing an object of class fingerprint...')\n #Create an object of the Fingerprint class\n fp = fingerprint(args.path_m1, args.path_m2, args.qc_list_m1, args.qc_list_m2, args.output,\n args.type, args.modality1, args.modality2, args.parcellation, args.network, args.corr,\n args.reduction, args.extra)\n\n #print(f'Setting nodes of interest based on the {parcellation} parcellation, with {network} network')\n print(f'Selecting ROIs based on network and parcellation...')\n fp.rois()\n\n print(f'Scrubbing directories and fetching filenames...')\n fp.filename_fetch()\n\n print(f'Subsetting the subjects based on QC_list and reduction (if applicable)...')\n fp.subset()\n\n print(f'Finding out which subjects are in both modalities...')\n fp.final_subject_list()\n\n print(f'Targeting filenames of subjects in both modalities...')\n fp.final_file_selection()\n\n print(f'Calculating the similarity matrices...')\n fp.sim_matrix_calculation()\n\n print(f'Export the similarity matrix to a file...')\n fp.export_matrix()\n\n print(f'Export the subject list to a file...')\n fp.export_subject_list()\n\n print(f'Calculating the fingerprinting accuracy...')\n fp.fingerprint_accuracy()\n\n print(f'Calculating the FPC, ASC and BSD...')\n fp.fpc_asc_calculation()\n\n print(f'Export the FP metrics and the accuracy of the FP to a file...')\n fp.export_fpc_df()\n\n print('Extraction complete!')\n print('--------------------------------------------')\n print(' ')\n return None",
"def get_hash_library_name():\n ft2_version = f\"{mpl.ft2font.__freetype_version__.replace('.', '')}\"\n mpl_version = \"dev\" if \"+\" in mpl.__version__ else mpl.__version__.replace(\".\", \"\")\n sunkit_image_version = \"dev\" if \"dev\" in sunkit_image.__version__ else sunpy.__version__.replace(\".\", \"\")\n sunpy_version = \"dev\" if \"dev\" in sunpy.__version__ else sunpy.__version__.replace(\".\", \"\")\n return f\"figure_hashes_mpl_{mpl_version}_ft_{ft2_version}_sunkit_image_{sunkit_image_version}_sunpy_{sunpy_version}.json\"",
"def fingerprint(self):\n if self._fingerprint is None:\n if self.molecule:\n self._fingerprint = self.molecule[0].fingerprint\n return self._fingerprint",
"def fingerprint():\n directory_walker(fingerprint_audiofile, (os.path.join(STORAGE_BASE_PATH,\n FILEHANDLING_CONFIG['checksummed_path']),\n os.path.join(STORAGE_BASE_PATH,\n FILEHANDLING_CONFIG['fingerprinted_path'])))",
"def match(): # code\n\n # for testing, match first fingerprint code in creation.utilisation.imp\n #code = \"\"\n\n utilizations = Model.get('creation.utilisation.imp')\n result = utilizations.find(['title', \"=\", \"999,999\"])\n if not result:\n sys.exit()\n #code = result.fingerprint\n\n print result[0].fingerprint",
"def phred_autodetect(input_file, USER_PHRED):\n\n if input_file.endswith('.gz'): # Open file\n infile = gzip.open(input_file, 'rt')\n else: \n infile = open(input_file, 'r') \n\n # Phred sets\n phred64_set = set(\"@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefgh\")\n phred33_set = set(\"!\\\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJ\")\n\n quality_string = '' # Initialize variables\n line_count = 0\n is_phred33 = False \n is_phred64 = False\n phred_determined = False\n\n line = infile.readline()[:-1] # Read line by line, until phred type is found\n while phred_determined == False:\n line_count += 1\n\n if line_count == 4: # At this point, we are looking at a quality string\n quality_string = line\n is_phred33 = set(quality_string).issubset(phred33_set)\n is_phred64 = set(quality_string).issubset(phred64_set)\n line_count = 0\n\n if is_phred33 and not is_phred64:\n phred_determined = True\n return \"33\"\n\n elif not is_phred33 and is_phred64:\n phred_determined = True\n return \"64\"\n \n line = infile.readline().strip()\n\n infile.close()\n\n # In case phred can't be determined, use the users input. \n if not phred_determined: \n # If user did not specify phred type \n if USER_PHRED == '':\n print('ERROR: We cannot autodetect the phred encoding type of your file(s). Please specify it in the input.')\n sys.exit(1)\n phred_determined = True\n return USER_PHRED",
"def fingerprint(self):\n return self.read_metadata_by_name(self.FINGERPRINT_KEY)",
"def __lookup_registry(self):\n self.__get_current_version_info()\n self.__get_installed_apps()\n self.__get_iis_info()\n #TODO: May need another API to read from reg\n #self.__check_active_directory()\n self.__get_ie_details()\n #TODO: reads a file, not registry so need to fit some where else\n #self.__get_prodspec()\n self.os_details['installed_app'] = ', '.join(self.installed_app)",
"def get_known_jids():\n global _known_jids\n _known_jids_lock.acquire()\n if _known_jids == None:\n _known_jids = ThreadSafeList()\n _known_jids_lock.release()\n return _known_jids",
"def _get_fingerprint(arg, controller):\n\n if not arg:\n try:\n return controller.get_info('fingerprint')\n except:\n raise ValueError(\"We aren't a relay, no information to provide\")\n elif stem.util.tor_tools.is_valid_fingerprint(arg):\n return arg\n elif stem.util.tor_tools.is_valid_nickname(arg):\n try:\n return controller.get_network_status(arg).fingerprint\n except:\n raise ValueError(\"Unable to find a relay with the nickname of '%s'\" % arg)\n elif ':' in arg or stem.util.connection.is_valid_ipv4_address(arg):\n if ':' in arg:\n address, port = arg.rsplit(':', 1)\n\n if not stem.util.connection.is_valid_ipv4_address(address):\n raise ValueError(\"'%s' isn't a valid IPv4 address\" % address)\n elif port and not stem.util.connection.is_valid_port(port):\n raise ValueError(\"'%s' isn't a valid port\" % port)\n\n port = int(port)\n else:\n address, port = arg, None\n\n matches = {}\n\n for desc in controller.get_network_statuses():\n if desc.address == address:\n if not port or desc.or_port == port:\n matches[desc.or_port] = desc.fingerprint\n\n if len(matches) == 0:\n raise ValueError('No relays found at %s' % arg)\n elif len(matches) == 1:\n return list(matches.values())[0]\n else:\n response = \"There's multiple relays at %s, include a port to specify which.\\n\\n\" % arg\n\n for i, or_port in enumerate(matches):\n response += ' %i. %s:%s, fingerprint: %s\\n' % (i + 1, address, or_port, matches[or_port])\n\n raise ValueError(response)\n else:\n raise ValueError(\"'%s' isn't a fingerprint, nickname, or IP address\" % arg)",
"def read_fingerprint(node):\n return _get_attr(node, ATTR_FINGERPRINT)",
"def getProgramRegistryPath():\r\n return config.getConfig()[\"installed-programs.json\"]",
"def read_fingerprints(self, filename):\n self.g = []\n self.dg = []\n self.m_names = []\n self.m_groups = []\n self.sys_elements = []\n self.m_attrs = []\n self.m_energies = []\n self.m_element_counts = []\n self.standards = []\n\n libver = self.settings['libver']\n descriptor = self.settings['descriptor']\n index = self.settings['index']\n with h5py.File(filename, 'r', libver=libver) as h5f:\n # 1) read list of names from system/sys_entries dataset\n self.m_names = list(h5f.require_group(descriptor).keys())\n self.m_names = np.asarray(self.m_names)[slice_from_str(index)]\n self.sys_elements = [symbol.decode('utf-8')\n for symbol\n in h5f['system'].attrs['sys_elements']]\n # 2) Loop through fingerprints, loading data to object\n for j, m_name in enumerate(self.m_names):\n print('read', j, end='\\r')\n path = descriptor + '/' + m_name\n fp = Fingerprint()\n fp.from_file(h5f, path, self.sys_elements)\n self.g.append([fp.dsets_dict[key]\n for key in sorted(fp.dsets_dict.keys())\n if key.find('G_') == 0])\n self.dg.append([fp.dsets_dict[key]\n for key in sorted(fp.dsets_dict.keys())\n if key.find('dG_') == 0])\n energy = float(fp.energy_val) * 1000\n # energy per atom\n self.m_energies.append(energy)\n self.m_element_counts.append(fp.element_counts)\n\n self.m_attrs.append({'energy': energy,\n 'composition': fp.element_counts,\n 'size': fp.natoms})\n\n if not set(fp.elements_set).issubset(set(self.sys_elements)):\n continue # skip if not part of system\n self.g = [list(g) for g in zip(*self.g)]\n self.dg = [list(dg) for dg in zip(*self.dg)]\n print('Read {} fingerprints from {}'.format(len(self.m_energies),\n filename))",
"def _get_symlink_candidates(self):\n\n jarsToLocations = collections.defaultdict(set)\n\n # TODO: Make this more efficient?\n for (dirpath, _, filenames) in os.walk(os.getcwd()):\n for fname in filenames:\n if not fname.endswith('.jar'):\n continue\n jarsToLocations[fname].add(dirpath)\n\n logging.info(\"Found %d unique jars\" % len(jarsToLocations.keys()))\n logging.info(\"JARS that we can symlink:\")\n logging.info(\"^^^^^^^^^^^^^^^^^^^^^^^^^\")\n for jarname in jarsToLocations.keys():\n if (len(jarsToLocations[jarname]) > 1):\n logging.info(jarname)\n for jarpath in jarsToLocations[jarname]:\n logging.info(\"\\t\" + jarpath)\n return jarsToLocations"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get all streamers from db
|
def get_all():
# Get all streamers from db
streamers = Streamer.query.all()
# Serialize the data for the response
streamer_schema = StreamerSchema(many=True)
data = streamer_schema.dump(streamers)
return data
|
[
"def get_streams(self):\n return self.result_set",
"def entries(self):\n return self._streams",
"def data_streams(self):\n return self._GetDataStreams()",
"def make_list():\n list_of_streamers = []\n for st in loop:\n st = Streamer(name=st['name'], real_name=['real_name'], uid=st['id'], birthday=st['birthday'], age=st['age'], job=st['job'],\n favor=st['favor'], relationship=st['relationship'],\n location=st['location'], link=st['link'], tags=st['link'])\n list_of_streamers.append(st)\n return list_of_streamers",
"def discover_streams(self) -> List[Stream]:\n return [\n BusinessesStream(tap=self),\n GroupsStream(tap=self),\n ReviewsStream(tap=self),\n ]",
"async def db_get_all_flows(self):\n query = \"SELECT rowid as ID, * FROM `{}` WHERE `guild` in ({})\".format(\n self.table, \",\".join([\"'{}'\".format(x.id) for x in self.bot.guilds])\n )\n liste = self.bot.db_query(query, ())\n for e in range(len(liste)):\n liste[e] = await self.transform_feed(liste[e])\n return liste",
"def fetch_streams(plugin: Plugin) -> Dict[str, Stream]:\n\n return plugin.streams(stream_types=args.stream_types,\n sorting_excludes=args.stream_sorting_excludes)",
"def all_speakers():\n return [_Speaker(id=s['id']) for s in _pulse.sink_list]",
"def get_all_zulip_streams(self):\n response = self.client.get_streams()\n if response['result'] == 'success':\n return response['streams']\n else:\n raise RuntimeError('check yo auth')",
"def get_all_zulip_streams(self):\n\n response = requests.get('https://api.zulip.com/v1/streams', auth=(self.username, self.api_key))\n if response.status_code == 200:\n return response.json()['streams']\n elif response.status_code == 401:\n raise RuntimeError('check yo auth')\n else:\n raise RuntimeError(':( we failed to GET streams.\\n(%s)' % response)",
"def getLivestreams(self):\n self.logger.debug('getLivestreams')\n #\n cached_data = self._cache.load_cache('livestreams', '')\n if cached_data is not None:\n rs = cached_data;\n else:\n esModel = ExtendedSearchModel.ExtendedSearchModel('')\n esModel.reset()\n esModel.setShow('LIVESTREAM')\n esModel.setExactMatchForShow(True)\n rs = self.extendedSearchQuery(esModel)\n self._cache.save_cache('livestreams', '', rs)\n #\n return rs",
"def GetEventDataStreams(self):\n return self._store.GetEventDataStreams()",
"def list_streams(self)->List[Metadata]:\n rows = self.session.query(Stream.stream_metadata).filter(Stream.study_name == self.study_name).all()\n results = []\n if rows:\n for row in rows:\n results.append(Metadata().from_json_file(row.stream_metadata))\n return results\n else:\n return results",
"def get_all(self):\n\t\treturn self.all_connections",
"def iterSources(self):\n for row in self.iterDictQuery(\"%s ORDER BY name\" % self.sourceQuery):\n yield ThermSource(self, **row)",
"def get_feeds(self):\n return self.feeds",
"def get_players():\n return db_players.Player.find()",
"def get_one(username):\n\n # Get matching streamers (if multiple platforms there could be more than one result)\n streamers = Streamer.query.filter(Streamer.username == username).all()\n\n # If no streamer found\n if len(streamers) == 0:\n # Try to fetch streamer from platform API\n verif_found = False\n for api in apis_to_fetch:\n streamer = api.fetch_streamer(username)\n # If found with streaming API, create it\n if streamer is not None:\n create_db(streamer)\n verif_found = True\n # If no streamer found on any platform\n if not verif_found:\n abort(\n 404,\n \"Streamer not found for username : {}\".format(username),\n )\n\n # Serialize list of matching streamers\n streamers = Streamer.query.filter(Streamer.username == username)\n streamer_schema = StreamerSchema(many=True)\n data = streamer_schema.dump(streamers)\n return data",
"def get(): \n return session.query(Fridge_store).all()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get all streamers matching with username
|
def get_one(username):
# Get matching streamers (if multiple platforms there could be more than one result)
streamers = Streamer.query.filter(Streamer.username == username).all()
# If no streamer found
if len(streamers) == 0:
# Try to fetch streamer from platform API
verif_found = False
for api in apis_to_fetch:
streamer = api.fetch_streamer(username)
# If found with streaming API, create it
if streamer is not None:
create_db(streamer)
verif_found = True
# If no streamer found on any platform
if not verif_found:
abort(
404,
"Streamer not found for username : {}".format(username),
)
# Serialize list of matching streamers
streamers = Streamer.query.filter(Streamer.username == username)
streamer_schema = StreamerSchema(many=True)
data = streamer_schema.dump(streamers)
return data
|
[
"def search_stream(self, stream_name):\n rows = self.session.query(Stream.name).filter(Stream.name.ilike('%'+stream_name+'%')).all()\n if rows:\n return rows\n else:\n return []",
"def get_viewers(streamer_obj) -> list:\r\n session = requests.Session()\r\n retry = Retry(connect=500, backoff_factor=0.5)\r\n adapter = HTTPAdapter(max_retries=retry)\r\n session.mount(\"http://\", adapter)\r\n session.mount(\"https://\", adapter)\r\n\r\n try:\r\n channel_json = session.get(url=(f\"https://tmi.twitch.tv/group/user/\"\r\n f\"{streamer_obj}/chatters\")).json()\r\n broadcaster = channel_json[\"chatters\"][\"broadcaster\"]\r\n viewers = channel_json[\"chatters\"][\"viewers\"]\r\n moderators = (channel_json['chatters']['moderators'])\r\n staff = (channel_json['chatters']['staff'])\r\n vips = (channel_json['chatters']['vips'])\r\n global_mods = (channel_json['chatters']['global_mods'])\r\n admins = (channel_json['chatters']['admins'])\r\n viewers_list = viewers + staff + vips + global_mods + admins\r\n viewers_and_mods = [viewers_list, moderators, broadcaster]\r\n return viewers_and_mods\r\n except TypeError as e:\r\n print(47, \"viewer_data\", e)\r\n return []",
"def list_accessible_streams_user(\n self, username, databasename=\"_system\", full=False\n ):\n request = Request(\n method=\"get\",\n endpoint=\"/user/{}/database/{}/stream?full={}\".format(\n username, databasename, full\n ),\n )\n\n def response_handler(resp):\n if not resp.is_success:\n raise ListStreams(resp, request)\n else:\n return resp.body[\"result\"]\n\n return self._execute(request, response_handler, custom_prefix=\"/_api\")",
"def stream_for_actor(self, actor):\n return self.filter(\n actor = User.objects.filter(id=actor.pk),\n ).order_by('-created_at')",
"def getVideoFeedByUsername(self, username):\n uri = 'http://gdata.youtube.com/feeds/api/users/%s/uploads' % username\n self.getVideoFeed(uri)",
"def get_users(twitter, screen_names):\r\n uResponse = robust_request(twitter,\"users/lookup\",{'screen_name':screen_names})\r\n u = [user for user in uResponse]\r\n return u\r\n pass",
"def streams(self, mask, target, args):\n if self.spam_protect('streams', mask, target, args):\n return\n streams = yield from self.hitbox_streams()\n streams.extend((yield from self.twitch_streams()))\n blacklist = self.bot.db['blacklist'].get('users', {})\n for stream in streams:\n try:\n if stream[\"media_display_name\"] in blacklist:\n streams.remove(stream)\n except:\n if stream[\"channel\"][\"display_name\"] in blacklist:\n streams.remove(stream)\n\n if len(streams) > 0:\n self.bot.privmsg(target, \"%i streams online:\" % len(streams))\n for stream in streams:\n t = stream[\"channel\"].get(\"updated_at\", \"T0\")\n date = t.split(\"T\")\n hour = date[1].replace(\"Z\", \"\")\n\n try: \n self.bot.action(target,\n \"%s - %s - %s Since %s (%s viewers) \"\n % (stream[\"media_display_name\"],\n stream[\"media_status\"],\n stream[\"channel\"][\"channel_link\"],\n stream[\"media_live_since\"],\n stream[\"media_views\"]))\n\n except KeyError:\n self.bot.action(target,\n \"%s - %s - %s since %s (%i viewers) \"\n % (stream[\"channel\"][\"display_name\"],\n stream[\"channel\"][\"status\"],\n stream[\"channel\"][\"url\"],\n hour,\n stream[\"viewers\"]))\n else:\n self.bot.privmsg(target, \"Nobody is streaming :'(\")",
"def get_all_zulip_streams(self):\n\n response = requests.get('https://api.zulip.com/v1/streams', auth=(self.username, self.api_key))\n if response.status_code == 200:\n return response.json()['streams']\n elif response.status_code == 401:\n raise RuntimeError('check yo auth')\n else:\n raise RuntimeError(':( we failed to GET streams.\\n(%s)' % response)",
"def fetch_feed_by_username(self, username):\n # Don't use trailing slash\n youtube_url = 'http://gdata.youtube.com/feeds/api'\n uri = os.sep.join([youtube_url, \"users\", username, \"uploads\"])\n return Api.yt_service.GetYouTubeVideoFeed(uri)",
"def get_all_zulip_streams(self):\n response = self.client.get_streams()\n if response['result'] == 'success':\n return response['streams']\n else:\n raise RuntimeError('check yo auth')",
"def speakers(self):\n return self._request('GET', '/speakers')",
"def userTweets(username):\n api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)\n user_tweet = api.GetUserTimeline(screen_name=username)\n for tweet in user_tweet:\n util.safe_print(tweet.GetText())",
"def stream_user_feeds(twitter, stream, target, users):\n\n # Make sure we have Twitter User IDs\n if len(users) == 0:\n print(\"%s No Twitter User IDs found in %s\" % (ERROR, target))\n sys.exit(1)\n\n # Build Twitter User ID list by accessing the Twitter API.\n print(\"%s Building Twitter User ID list from %s\" % (WARNING, users))\n user_ids = []\n while True:\n for user in users:\n try:\n # Get user ID from screen_name using Twitter API.\n user = twitter.get_user(screen_name=user)\n screen_name = user.screen_name\n id = int(user.id)\n\n if id not in users:\n print(\"%s Obtained [@%s:%s]\" % (OK, screen_name, id))\n user_ids.append(str(id))\n\n time.sleep(randint(0, 2))\n except TweepError as te:\n # Sleep a bit in case Twitter suspends us.\n print(\"%s Tweepy Exception: %s\" % (ERROR, te))\n print(\"%s Sleeping for a random amount of time and retrying.\" % WARNING)\n time.sleep(randint(1, 10))\n continue\n except KeyboardInterrupt:\n print(\"\\n%s Ctrl-c keyboard interrupt, exiting.\" % WARNING)\n stream.disconnect()\n sys.exit(0)\n break\n\n # Search for tweets containing a list of keywords.\n print(\"%s Following %s Twitter FEEDs\" % (WARNING, users))\n stream.filter(follow=user_ids, languages=['en'])",
"def getPlaylists(self, user=None):\n pass",
"def make_list():\n list_of_streamers = []\n for st in loop:\n st = Streamer(name=st['name'], real_name=['real_name'], uid=st['id'], birthday=st['birthday'], age=st['age'], job=st['job'],\n favor=st['favor'], relationship=st['relationship'],\n location=st['location'], link=st['link'], tags=st['link'])\n list_of_streamers.append(st)\n return list_of_streamers",
"def discover_streams(self) -> List[Stream]:\n return [\n BusinessesStream(tap=self),\n GroupsStream(tap=self),\n ReviewsStream(tap=self),\n ]",
"def getUserStringsStream(self) -> ghidra.app.util.bin.format.pe.cli.streams.CliStreamUserStrings:\n ...",
"def filter(self, names):\r\n criteria = lambda score: (score.get_player() in names)\r\n return Song(\r\n self._leaderboardId,\r\n list(filter(criteria, self._scores))\r\n )",
"def all_speakers():\n return [_Speaker(id=s['id']) for s in _pulse.sink_list]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create streamer in database
|
def create_db(streamer):
schema = StreamerSchema()
new_streamer = schema.load(streamer, session=db.session)
# Add the streamer to the database
db.session.add(new_streamer)
db.session.commit()
|
[
"def createDataStream(self):\n\n name = \"SL_MIXED_MUL_PY_12327\"\n self.datastream.set_name(name)\n\n timezone = \"GMT\"\n self.time.set_zone(timezone)\n\n timeIdentifier = \"time\"\n self.time.set_identifier(timeIdentifier)\n\n self.time.set_format(\"millis\")\n\n precisionFormat = \"millis\"\n self.datastream.set_time_precision(precisionFormat)\n\n self.field.set_time(self.time)\n self.field.set_signal(self.signal)\n self.datasource.set_type(\"STANDALONE\")\n self.datastream.set_datasource(self.datasource)\n self.datastream.set_field(self.field)\n\n entityName = \"entity\"\n self.field.set_entityIdentifier(entityName)\n\n ############## For narrow datastream format ################\n # TODO: Uncomment these lines out for Narrow Datastream Format.\n # signalIdentifier = \"signal\"\n # valueIdentifier = \"value\"\n # self.signal.set_signalIdentifier(signalIdentifier)\n # self.signal.set_valueIdentifier(valueIdentifier)\n\n ################ For batch window Type ###################\n # TODO: Uncomment this line out for Batch window type.\n # batchIdentifier = \"batch_id\"\n # self.field.set_batchIdentifier(batchIdentifier)\n\n createdDataStream = self.falkonry.create_datastream(self.datastream)\n datastreamId = createdDataStream.get_id()\n return datastreamId",
"def __call__(self):\n self.create_database()",
"def set_up_streamer(consumer_key, consumer_secret, \n access_token, access_token_secret, focus, database):\n # Set up an authentication twython user.\n twitter = twython.Twython(consumer_key, consumer_secret,\n access_token, access_token_secret) \n # Get all the twitter account need to analyze\n #follows = get_twitters()\n # Dictionary for account name-account id\n account_id = {}\n ref_articles = []\n \n # Create a Streamer to catch updated tweets from target twitter accounts. \n t_stream = TweeStreamer(consumer_key, consumer_secret,\n access_token, access_token_secret, database = database)\n #t_stream.statuses.filter(follow=follow_ids)\n t_stream.statuses.filter(track=focus)\n # Not sure whether need the article list, since it doesn't include the \n # new articles added by streamer.\n return ref_articles",
"def _create_birdy_stream(self):\n try:\n if self.traptor_type == 'follow':\n # Try to set up a twitter stream using twitter id list\n self._create_twitter_follow_stream()\n elif self.traptor_type == 'track':\n # Try to set up a twitter stream using twitter term list\n self._create_twitter_track_stream()\n elif self.traptor_type == 'locations':\n # Try to set up a twitter stream using twitter term list\n self._create_twitter_locations_stream()\n else:\n theLogMsg = 'Caught error creating birdy stream for Traptor ' \\\n 'type that does not exist'\n self.logger.critical(theLogMsg, extra=logExtra({\n 'error_type': 'NotImplementedError',\n 'error_msg': 'Traptor type does not exist.',\n 'ex': traceback.format_exc(1)\n }))\n dd_monitoring.increment('traptor_error_occurred',\n tags=['error_type:not_implemented_error'])\n except Exception as e:\n theLogMsg = \"Caught Twitter Api Error creating {} stream\".format(self.traptor_type)\n self.logger.error(theLogMsg, extra=logExtra(e))\n dd_monitoring.increment('twitter_error_occurred',\n tags=['error_type:twitter_api_error'])\n raise",
"def test_create_table_no_stream():\n class Model(BaseModel):\n class Meta:\n stream = None\n id = Column(String, hash_key=True)\n table = create_table_request(\"Model\", Model)\n assert \"StreamSpecification\" not in table",
"def produce_record(self, stream, key, data):\n r = Record()\n r.key = key\n r.data = data\n r.userStream = stream\n transaction.records.append(r)",
"def create(\n stream_name: str,\n destination_type: DestinationType,\n raw_schema: str,\n default_schema: str,\n schema: str,\n source_sync_mode: SyncMode,\n destination_sync_mode: DestinationSyncMode,\n cursor_field: List[str],\n primary_key: List[List[str]],\n json_column_name: str,\n properties: Dict,\n tables_registry: TableNameRegistry,\n from_table: Union[str, dbt_macro.Macro],\n ) -> \"StreamProcessor\":\n return StreamProcessor(\n stream_name,\n destination_type,\n raw_schema,\n default_schema,\n schema,\n source_sync_mode,\n destination_sync_mode,\n cursor_field,\n primary_key,\n json_column_name,\n properties,\n tables_registry,\n from_table,\n )",
"def make_list():\n list_of_streamers = []\n for st in loop:\n st = Streamer(name=st['name'], real_name=['real_name'], uid=st['id'], birthday=st['birthday'], age=st['age'], job=st['job'],\n favor=st['favor'], relationship=st['relationship'],\n location=st['location'], link=st['link'], tags=st['link'])\n list_of_streamers.append(st)\n return list_of_streamers",
"def _create_twitter_track_stream(self):\n self.logger.info('Creating birdy track stream')\n self.birdy_stream = self.birdy_conn.stream.statuses.filter.post(\n track=self.twitter_rules,\n stall_warnings='true'\n )",
"def _setup(self):\n directory = os.path.join('/tmp', self.stream_id)\n if not os.path.exists(directory):\n os.makedirs(directory)",
"def create_streams(stream_names=None):\n # If specific streams are not given, create all of the possible streams\n if stream_names is None:\n stream_names = ['MATCH_NUM_STREAM', 'CYCLE_NUM_STREAM',\n 'TEMP_SUPER_STREAM']\n streams = {}\n # Creates each of the streams specified and stores them in the\n # streams dict.\n for name in stream_names:\n if name == 'MATCH_NUM_STREAM':\n streams[name] = DB.child(\n 'scoutManagement/currentMatchNumber').stream(\n match_num_stream_handler)\n elif name == 'CYCLE_NUM_STREAM':\n streams[name] = DB.child('scoutManagement/cycleNumber'\n ).stream(cycle_num_stream_handler)\n elif name == 'TEMP_SUPER_STREAM':\n # Used to remove any outdated data\n delete_cache_data_folder('temp_super')\n streams[name] = DB.child('tempSuper').stream(\n temp_super_stream_handler)\n return streams",
"def test_when_stream_exists(self):\n Stream.objects.all().delete()\n new_stream = Stream(pipe=self.pipe, auth=self.auth)\n new_stream.save()\n saved_stream = self.controller.stream\n actual = saved_stream.pk\n expected = new_stream.pk\n self.assertEqual(actual, expected)",
"def save_deepstream_server(deepstream_server):\n deepstream_server.save()",
"def main():\t\n\n\tconn=connect_db()\n\tconsumer = KafkaConsumer()\n\tconsumer.subscribe('AttackTypeCountStream')\n\n\tfor msg in consumer:\n\t\tprint msg.value\t\t\n\t\tkey,value = msg.value.split(\"##\")\n\t\twrite_table(key,value)",
"def addStream(self, stream, interpolator=\"closest\", t1=None, t2=None, dt=None, limit=None, i1=None, i2=None, transform=None,colname=None):\n\n streamquery = query_maker(t1, t2, limit, i1, i2, transform)\n param_stream(self.cdb, streamquery, stream)\n\n streamquery[\"interpolator\"] = interpolator\n\n if colname is None:\n # What do we call this column?\n if isinstance(stream, six.string_types):\n colname = stream\n elif isinstance(stream, Stream):\n colname = stream.path\n else:\n raise Exception(\n \"Could not find a name for the column! use the 'colname' parameter.\")\n\n if colname in self.query[\"dataset\"] or colname is \"x\":\n raise Exception(\n \"The column name either exists, or is labeled 'x'. Use the colname parameter to change the column name.\")\n\n self.query[\"dataset\"][colname] = streamquery",
"def _create_twitter_locations_stream(self):\n self.logger.info('Creating birdy locations stream')\n self.birdy_stream = self.birdy_conn.stream.statuses.filter.post(\n locations=self.twitter_rules,\n stall_warnings='true'\n )",
"def __init__(self, sink):\n\n self.sink = sink",
"async def _init(self):\n\n self.postgres = await self.try_connect_postgres(\n host=self.config['postgres']['host'],\n database=self.config['postgres']['database'],\n user=self.config['postgres']['user'],\n password=self.config['postgres']['password'])\n\n try:\n async with self.postgres.acquire() as conn:\n async with conn.cursor() as curs:\n # create tables if they don't already exist\n await curs.execute('''\n CREATE TABLE body (\n id SERIAL PRIMARY KEY,\n sha256 text,\n content text\n );\n\n CREATE TABLE mailitem (\n id SERIAL PRIMARY KEY,\n datesent timestamp,\n subject text,\n fromaddress text,\n bodyid integer REFERENCES body (id)\n );\n\n CREATE TABLE recipient (\n id SERIAL PRIMARY KEY,\n emailaddress text\n );\n\n CREATE TABLE mailrecipient (\n id SERIAL PRIMARY KEY,\n recipientid integer REFERENCES recipient (id),\n mailid integer REFERENCES mailitem (id)\n );\n\n CREATE TABLE attachment (\n id SERIAL PRIMARY KEY,\n mailid integer REFERENCES mailitem (id),\n sha256 text,\n filename text\n );\n ''')\n logger.debug(\"Created fresh database\")\n except:\n pass",
"def stream():\n server_id = request.args.get('server_id')\n stream_url = request.args.get('stream_url')\n return render_template('/stream.html', title='Stream {}'.format(server_id), id_=server_id, stream_url=stream_url)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete streamers with matching username
|
def delete(username):
# Find matching streamers
existing_streamers = Streamer.query.filter(Streamer.username == username).all()
if existing_streamers is not None:
# Delete streamer for all platforms
for streamer in existing_streamers:
db.session.delete(streamer)
db.session.commit()
return make_response(
"{} successfully deleted".format(username), 200
)
else:
abort(
404,
"Streamer with username {} not found".format(username),
)
|
[
"def remove(self):\n testing = Streamer(name=self)\n for test in loop:\n if test['name'] == testing.name:\n removed_streamer = {\n \"name\": testing.name,\n \"real_name\": testing.real_name,\n \"id\": testing.uid,\n \"birthday\": testing.birthday,\n \"age\": testing.age,\n \"job\": testing.job,\n \"favor\": testing.favor,\n \"relationship\": testing.relationship,\n \"location\": testing.location,\n \"link\": testing.link,\n \"tags\": testing.tags}\n loop.remove(removed_streamer)\n break\n else:\n print(\"Streamer not found\")",
"def remove_student(conf, args):\n previous_len = len(conf.roster)\n\n idxs = []\n for idx, student in enumerate(conf.roster):\n if student['username'] == args.username:\n idxs.append(idx)\n\n offset = 0\n for idx in idxs:\n del conf.roster[idx - offset]\n offset += 1\n\n logger.info(\"Removed %d entries from the roster\", previous_len - len(conf.roster))",
"def removefsuser(self, username):",
"def delete_new_senders(self, user):\n try:\n self.database.execute(\"delete from chat where message = \"\n \"'###new_message###' \"\n \"and frm = '%s'\" % user)\n self.database.commit()\n except sqlite3.IntegrityError:\n pass",
"def remove_user(username, steamid, discorduser=\"\"):\n\tpass",
"def remove_sender(self, sender):\n for o in make_iter(sender):\n try:\n self.senders.remove(o)\n except ValueError:\n pass # nothing to remove",
"def delete_user(self, username):\n if not self.get_user(username):\n print(f\"user {username} not found!\")\n return False\n \n # remove user.\n index = 0\n for user in self.data.get('users'):\n if user.get('username') == username:\n del self.data['users'][index]\n self.file.write(self.data)\n return True\n index += 1\n \n return False",
"def delete_stream(self, stream):\n with self.stream_set.open() as streams:\n self.delete_blocks(stream)\n del streams[stream.key()]\n stream.exists = False",
"async def voicewatch_remove(self, ctx, member: discord.Member):\n config = self.bot.db['super_voicewatch'].setdefault(str(ctx.guild.id), {'users': [], 'channel': ctx.channel.id})\n try:\n config['users'].remove(member.id)\n except ValueError:\n await ctx.send(\"That user was not in the watchlist.\")\n return\n await ctx.send(f\"Removed `{member.name} ({member.id})` from the super voice watchlist.\")\n await hf.dump_json()",
"def remove_stream_key(self, account_name):\n\n for stream_id in self.stream_ids:\n if account_name == stream_id[\"account_name\"]:\n self.stream_ids.remove(stream_id) # for removing object from list\n self.__db_upsert(force_insert=True)\n os.remove(stream_id[\"key_file\"])\n\n return True\n return False",
"def delete_user_by_username(self, userName):\n return self.make_request(\"/users/{0}\".format(userName), method='DELETE')",
"def handler_delete_user(sender, instance, **kwargs):\n rocket_admin = RocketChat(\n settings.ROCKETCHAT_USER,\n settings.ROCKETCHAT_PASSWORD\n )\n rocket_admin.users_delete(instance.chat.chat_user_id)",
"def remove_website_stream(self, w_id, stream_acc_name):\n\n for website in self.websites:\n if w_id == website.id:\n website.remove_stream_key(stream_acc_name)\n return True\n return False",
"def remove_sender(self, senders):\n if isinstance(senders, str):\n self.db_sender_external = \"\"\n self.save(update_fields=[\"db_sender_external\"])\n return\n\n for sender in make_iter(senders):\n if not sender:\n continue\n if not hasattr(sender, \"__dbclass__\"):\n raise ValueError(\"This is a not a typeclassed object!\")\n clsname = sender.__dbclass__.__name__\n if clsname == \"ObjectDB\":\n self.db_sender_objects.remove(sender)\n elif clsname == \"AccountDB\":\n self.db_sender_accounts.remove(sender)\n elif clsname == \"ScriptDB\":\n self.db_sender_accounts.remove(sender)",
"def on_delete(req, resp, team, roster, user):\n team, roster = unquote(team), unquote(roster)\n check_team_auth(team, req)\n connection = db.connect()\n cursor = connection.cursor()\n\n cursor.execute('''DELETE FROM `roster_user`\n WHERE `roster_id`=(\n SELECT `roster`.`id` FROM `roster`\n JOIN `team` ON `team`.`id`=`roster`.`team_id`\n WHERE `team`.`name`=%s AND `roster`.`name`=%s)\n AND `user_id`=(SELECT `id` FROM `user` WHERE `name`=%s)''',\n (team, roster, user))\n deleted = cursor.rowcount\n if deleted == 0:\n raise HTTPNotFound()\n create_audit({'roster': roster, 'user': user}, team, ROSTER_USER_DELETED, req, cursor)\n\n # Remove user from the team if needed\n query = '''DELETE FROM `team_user` WHERE `user_id` = (SELECT `id` FROM `user` WHERE `name`=%s) AND `user_id` NOT IN\n (SELECT `roster_user`.`user_id`\n FROM `roster_user` JOIN `roster` ON `roster`.`id` = `roster_user`.`roster_id`\n WHERE team_id = (SELECT `id` FROM `team` WHERE `name`=%s)\n UNION\n (SELECT `user_id` FROM `team_admin`\n WHERE `team_id` = (SELECT `id` FROM `team` WHERE `name`=%s)))\n AND `team_user`.`team_id` = (SELECT `id` FROM `team` WHERE `name` = %s)'''\n cursor.execute(query, (user, team, team, team))\n if cursor.rowcount != 0:\n unsubscribe_notifications(team, user, cursor)\n connection.commit()\n cursor.close()\n connection.close()",
"def user_remove(self, username):\n return self.delete('rest/api/2/user?username={0}'.format(username))",
"def clear_stream_access_level_user(\n self, username, streamname, databasename=\"_system\"\n ):\n\n request = Request(\n method=\"delete\",\n endpoint=\"/user/{}/database/{}/stream/{}\".format(\n username, databasename, streamname\n ),\n )\n\n def response_handler(resp):\n if not resp.is_success:\n raise ClearStreamAccessLevel(resp, request)\n else:\n if resp.body[\"error\"] is False:\n return True\n elif resp.body[\"error\"] is True:\n return False\n\n return self._execute(request, response_handler, custom_prefix=\"/_api\")",
"def delete_players(self):\n self.cur.execute(\"delete from participants where t_id = %s;\",\n (self.t_id, ))\n self.cur.execute(\"commit;\")",
"def delete_user(self):\n \n User.user_list.remove(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get LaTeX diffs of 'file' (actual copy with its youngest repository version)
|
def LaTeXDiff(target=None, source=None, env=None):
print 'SOURCES :', source
print 'TARGET :', target
do_rev = env['DOREV']
if do_rev:
#
# The command below seems not to work with me :-(
# svn diff -r [REV] $SOURCEFILE | patch -R -p0 -o $TARGET
# What follows is more secure but involved :-(
#
repo_dir = env['REPODIR']
#
#--------------- Get version ---------------
print 'DDCR: git_version'
#
#--------------- retrieve file from SVN and do latexdiff ---------------
return None
|
[
"def git_diff(filepath, since):\n html_diff = None\n commits = git_commits(filepath, since)\n if commits:\n cmd = ('git', '--no-pager', 'diff', commits[-1]+'^', '--',\n filepath)\n stdout, stderr = execute(cmd)\n\n if stdout:\n html_diff = highlight(stdout, lexers.DiffLexer(),\n HtmlFormatter())\n\n # print(' '.join(cmd))\n # print(diff)\n # print('\\n')\n\n return html_diff",
"def _diff(self, version: Version) -> str:\n temp_stdout = StringIO()\n\n with redirect_stdout(temp_stdout):\n dircmp(str(self.directory),\n str(self.tabasco_directory.joinpath(version.name)),\n ignore=[\".\", \"..\", \".tbsc\"]).report()\n\n # Tab the results and ignore the first line (header)\n diff = temp_stdout.getvalue()\n return \"\\n\".join([\"\\t\" + line for line in diff.splitlines()][1:])",
"def get_diff_text(old: Sequence[str], new: Sequence[str], filename: str) -> str:\n newline = \"\\n\"\n diff = difflib.unified_diff(\n old,\n new,\n \"original/\" + filename,\n \"fixed/\" + filename,\n lineterm=newline,\n )\n\n text = \"\"\n for line in diff:\n text += line\n\n # Work around missing newline (http://bugs.python.org/issue2142).\n if not line.endswith(newline):\n text += newline + r\"\\ No newline at end of file\" + newline\n\n return text",
"def _extract_file_diff(self, diff_std_out, filename):\n patch_str = \"\"\n in_our_patch = False\n\n for line in diff_std_out.splitlines(False):\n if self._start_of_patch(line, filename):\n in_our_patch = True\n else:\n in_our_patch = False\n\n if in_our_patch:\n patch_str += line + \"\\n\"\n\n return patch_str",
"def get_diff(old_text, new_text, site_url):\n print (\"Get diff...\")\n \n all_lines = []\n for line in difflib.unified_diff(old_text.splitlines(), new_text.splitlines(), n=0):\n all_lines.append(line)\n\n if all_lines == []:\n return None\n\n html = make_html(all_lines, site_url)\n return html",
"def latexdiff(files1, files2):\n if not which('latexdiff'):\n _missing_diff_program('latexdiff')\n\n if isinstance(files1, str):\n files1 = [files1]\n if isinstance(files2, str):\n files2 = [files2]\n\n for fromfile, tofile in zip(files1, files2):\n\n if fromfile.endswith('.do.txt'):\n basename = fromfile[:-7]\n failure1 = os.system('doconce format pdflatex %s' % basename)\n failure2 = os.system('doconce ptex2tex %s' % basename)\n fromfile = basename + '.tex'\n\n if tofile.endswith('.do.txt'):\n basename = tofile[:-7]\n failure1 = os.system('doconce format pdflatex %s' % basename)\n failure2 = os.system('doconce ptex2tex %s' % basename)\n tofile = basename + '.tex'\n\n diff_file = 'tmp_diff_%s.tex' % tofile\n failure = os.system('latexdiff %s %s > %s' %\n (fromfile, tofile, diff_file))\n failure = os.system('pdflatex %s' % diff_file)\n size = os.path.getsize(diff_file)\n if size > 4:\n print 'output in', diff_file[:-3] + 'pdf'",
"def revision_diff(self,src_ext,revision):\n repo_base = \"%s/%s\" % (self.base_url,src_ext)\n if self.verbose:\n self.log.info(\"(%s)\\n%s\" % (inspect.stack()[0][3],revision))\n try:\n revision_diff = self.client.diff(self.workspace, repo_base,\n revision1=pysvn.Revision(pysvn.opt_revision_kind.number,int(revision) -1),\n revision2=pysvn.Revision(pysvn.opt_revision_kind.number,int(revision)))\n return revision_diff\n except Exception as e:\n self.log.error(e)\n return \"\"",
"def original_content(self, filename):\n return gitinfo.revision_content(gitinfo.current_commit(), filename)",
"def git_diff(*args):\n command = (['git', '--no-pager', 'diff'] + list(args) + [\n '--', 'requirements.txt', 'misc/requirements/requirements-*.txt'])\n proc = subprocess.run(command,\n stdout=subprocess.PIPE,\n encoding='utf-8',\n check=True)\n return proc.stdout.splitlines()",
"def filediff(web):\n fctx, ctx = None, None\n try:\n fctx = webutil.filectx(web.repo, web.req)\n except LookupError:\n ctx = webutil.changectx(web.repo, web.req)\n path = webutil.cleanpath(web.repo, web.req.qsparams[b'file'])\n if path not in ctx.files():\n raise\n\n if fctx is not None:\n path = fctx.path()\n ctx = fctx.changectx()\n basectx = ctx.p1()\n\n style = web.config(b'web', b'style')\n if b'style' in web.req.qsparams:\n style = web.req.qsparams[b'style']\n\n diffs = webutil.diffs(web, ctx, basectx, [path], style)\n if fctx is not None:\n rename = webutil.renamelink(fctx)\n ctx = fctx\n else:\n rename = templateutil.mappinglist([])\n ctx = ctx\n\n return web.sendtemplate(\n b'filediff',\n file=path,\n symrev=webutil.symrevorshortnode(web.req, ctx),\n rename=rename,\n diff=diffs,\n **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))\n )",
"def test_with_multi_commit_diff(self):\n reader = DiffXReader(io.BytesIO(\n b'#diffx: encoding=utf-8, version=1.0\\n'\n b'#.change:\\n'\n b'#..preamble: indent=4, length=49, mimetype=text/markdown\\n'\n b' Summary of the _first_ commit in the series.\\n'\n b'#..meta: format=json, length=244\\n'\n b'{\\n'\n b' \"author\": \"Test User <test@example.com>\",\\n'\n b' \"committer\": \"Test User <test@example.com>\",\\n'\n b' \"committer date\": \"2021-06-02T13:12:06-07:00\",\\n'\n b' \"date\": \"2021-06-01T19:26:31-07:00\",\\n'\n b' \"id\": \"a25e7b28af5e3184946068f432122c68c1a30b23\"\\n'\n b'}\\n'\n b'#..file:\\n'\n b'#...meta: format=json, length=166\\n'\n b'{\\n'\n b' \"path\": \"file1\",\\n'\n b' \"revision\": {\\n'\n b' \"new\": \"eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef\",\\n'\n b' \"old\": \"c8839177d1a5605aa60abe69db95c84183f0eebe\"\\n'\n b' }\\n'\n b'}\\n'\n b'#...diff: length=60\\n'\n b'--- /file1\\n'\n b'+++ /file1\\n'\n b'@@ -498,7 +498,7 @@\\n'\n b' ... diff content\\n'\n b'#.change:\\n'\n b'#..preamble: indent=4, length=52\\n'\n b' Summary of commit #2\\n'\n b'\\n'\n b' Here\\'s a description.\\n'\n b'#..meta: format=json, length=244\\n'\n b'{\\n'\n b' \"author\": \"Test User <test@example.com>\",\\n'\n b' \"committer\": \"Test User <test@example.com>\",\\n'\n b' \"committer date\": \"2021-06-02T19:46:25-07:00\",\\n'\n b' \"date\": \"2021-06-01T19:46:22-07:00\",\\n'\n b' \"id\": \"91127b687f583184144161f432222748c1a30b23\"\\n'\n b'}\\n'\n b'#..file:\\n'\n b'#...meta: format=json, length=166\\n'\n b'{\\n'\n b' \"path\": \"file2\",\\n'\n b' \"revision\": {\\n'\n b' \"new\": \"a2ccb0cb48383472345d41a32afde39a7e6a72dd\",\\n'\n b' \"old\": \"1b7af7f97076effed5db722afe31c993e6adbc78\"\\n'\n b' }\\n'\n b'}\\n'\n b'#...diff: length=80\\n'\n b'--- a/file2\\n'\n b'+++ b/file2\\n'\n b'@@ -66,7 +66,8 @@\\n'\n b' ... diff content for commit 2, file2\\n'\n b'#..file:\\n'\n b'#...meta: format=json, length=166\\n'\n b'{\\n'\n b' \"path\": \"file3\",\\n'\n b' \"revision\": {\\n'\n b' \"new\": \"0d4a0fb8d62b762a26e13591d06d93d79d61102f\",\\n'\n b' \"old\": \"be089b7197974703c83682088a068bef3422c6c2\"\\n'\n b' }\\n'\n b'}\\n'\n b'#...diff: length=82\\n'\n b'--- a/file3\\n'\n b'+++ b/file3\\n'\n b'@@ -258,7 +258,8 @@\\n'\n b' ... diff content for commit 2, file3\\n'\n ))\n\n self.assertEqual(list(reader), [\n {\n 'level': 0,\n 'line': 0,\n 'options': {\n 'encoding': 'utf-8',\n 'version': '1.0',\n },\n 'section': Section.MAIN,\n 'type': 'diffx',\n },\n {\n 'level': 1,\n 'line': 1,\n 'options': {},\n 'section': Section.CHANGE,\n 'type': 'change',\n },\n {\n 'level': 2,\n 'line': 2,\n 'options': {\n 'indent': 4,\n 'length': 49,\n 'mimetype': 'text/markdown',\n },\n 'section': Section.CHANGE_PREAMBLE,\n 'text': 'Summary of the _first_ commit in the series.\\n',\n 'type': 'preamble',\n },\n {\n 'level': 2,\n 'line': 4,\n 'metadata': {\n 'author': 'Test User <test@example.com>',\n 'committer': 'Test User <test@example.com>',\n 'committer date': '2021-06-02T13:12:06-07:00',\n 'date': '2021-06-01T19:26:31-07:00',\n 'id': 'a25e7b28af5e3184946068f432122c68c1a30b23',\n },\n 'options': {\n 'format': 'json',\n 'length': 244,\n },\n 'section': Section.CHANGE_META,\n 'type': 'meta',\n },\n {\n 'level': 2,\n 'line': 12,\n 'options': {},\n 'section': Section.FILE,\n 'type': 'file',\n },\n {\n 'level': 3,\n 'line': 13,\n 'metadata': {\n 'path': 'file1',\n 'revision': {\n 'new': 'eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',\n 'old': 'c8839177d1a5605aa60abe69db95c84183f0eebe',\n },\n },\n 'options': {\n 'format': 'json',\n 'length': 166,\n },\n 'section': Section.FILE_META,\n 'type': 'meta',\n },\n {\n 'level': 3,\n 'line': 21,\n 'options': {\n 'length': 60,\n },\n 'section': Section.FILE_DIFF,\n 'diff': (\n b'--- /file1\\n'\n b'+++ /file1\\n'\n b'@@ -498,7 +498,7 @@\\n'\n b' ... diff content\\n'\n ),\n 'type': 'diff',\n },\n {\n 'level': 1,\n 'line': 26,\n 'options': {},\n 'section': Section.CHANGE,\n 'type': 'change',\n },\n {\n 'level': 2,\n 'line': 27,\n 'options': {\n 'indent': 4,\n 'length': 52,\n },\n 'section': Section.CHANGE_PREAMBLE,\n 'text': (\n \"Summary of commit #2\\n\"\n \"\\n\"\n \"Here's a description.\\n\"\n ),\n 'type': 'preamble',\n },\n {\n 'level': 2,\n 'line': 31,\n 'metadata': {\n 'author': 'Test User <test@example.com>',\n 'committer': 'Test User <test@example.com>',\n 'committer date': '2021-06-02T19:46:25-07:00',\n 'date': '2021-06-01T19:46:22-07:00',\n 'id': '91127b687f583184144161f432222748c1a30b23',\n },\n 'options': {\n 'format': 'json',\n 'length': 244,\n },\n 'section': Section.CHANGE_META,\n 'type': 'meta',\n },\n {\n 'level': 2,\n 'line': 39,\n 'options': {},\n 'section': Section.FILE,\n 'type': 'file',\n },\n {\n 'level': 3,\n 'line': 40,\n 'metadata': {\n 'path': 'file2',\n 'revision': {\n 'new': 'a2ccb0cb48383472345d41a32afde39a7e6a72dd',\n 'old': '1b7af7f97076effed5db722afe31c993e6adbc78',\n },\n },\n 'options': {\n 'format': 'json',\n 'length': 166,\n },\n 'section': Section.FILE_META,\n 'type': 'meta',\n },\n {\n 'level': 3,\n 'line': 48,\n 'options': {\n 'length': 80,\n },\n 'section': Section.FILE_DIFF,\n 'diff': (\n b'--- a/file2\\n'\n b'+++ b/file2\\n'\n b'@@ -66,7 +66,8 @@\\n'\n b' ... diff content for commit 2, file2\\n'\n ),\n 'type': 'diff',\n },\n {\n 'level': 2,\n 'line': 53,\n 'options': {},\n 'section': Section.FILE,\n 'type': 'file',\n },\n {\n 'level': 3,\n 'line': 54,\n 'metadata': {\n 'path': 'file3',\n 'revision': {\n 'new': '0d4a0fb8d62b762a26e13591d06d93d79d61102f',\n 'old': 'be089b7197974703c83682088a068bef3422c6c2',\n },\n },\n 'options': {\n 'format': 'json',\n 'length': 166,\n },\n 'section': Section.FILE_META,\n 'type': 'meta',\n },\n {\n 'level': 3,\n 'line': 62,\n 'options': {\n 'length': 82,\n },\n 'section': Section.FILE_DIFF,\n 'diff': (\n b'--- a/file3\\n'\n b'+++ b/file3\\n'\n b'@@ -258,7 +258,8 @@\\n'\n b' ... diff content for commit 2, file3\\n'\n ),\n 'type': 'diff',\n },\n ])",
"def get_comit_difference(repo_path,c_hash):\n \n cdiff = []\n s = subprocess.check_output(\"cd %s; git log --stat -2 %s \" % (repo_path,c_hash), shell=True)\n \n r = re.compile(\"commit (.*?)\\n.*?Author: (.*?)\\n.*?Date:(.*?)\\n\\n(.*?)\\n\\n(.*?)\\n(.*?)\\n\", re.M+re.S+re.U+re.I)\n matches = r.findall(s)\n for m in matches:\n cdiff.append(dict(commit_hash=m[0].strip(), author=m[1].strip(), datetime=m[2].strip(), message=m[3].strip(), file_name=m[4].strip(), changes=m[5].strip()))\n \n\n\n return cdiff",
"def get_difference_between_revisions(revision_one,timestamp_one,revision_two,timestamp_two):\r\n difference_holder = []\r\n \r\n added_text_holder = []\r\n \r\n removed_text_holder = []\r\n \r\n url = \"https://en.wikipedia.org/w/api.php?action=compare&format=json&fromrev=\" + revision_one +\"&torev=\" + revision_two\r\n \r\n response = urllib.request.urlopen(url).read() \r\n \r\n link_info = (response.decode('utf-8'))\r\n \r\n j = json.loads(link_info)\r\n \r\n com = j[\"compare\"]['*']\r\n \r\n soup = BeautifulSoup(com,'lxml')\r\n \r\n \r\n lister = soup.find_all('td')\r\n \r\n lsz_added = map(str,lister)\r\n \r\n lsz_removed = map(str,lister)\r\n \r\n indices_two = [i for i, text in enumerate(lsz_removed) if 'deletedline' in text]\r\n \r\n indices = [i for i, text in enumerate(lsz_added) if 'addedline' in text]\r\n \r\n for added_text in indices:\r\n if lister[added_text].get_text() in added_text_holder:\r\n break\r\n else:\r\n if lister[added_text].get_text() != \"\":\r\n edited_text = lister[added_text].get_text().split(\",\")\r\n fixed_added_text = \" \".join(edited_text)\r\n added_text_holder.append(revision_one + \"sez\" + timestamp_one + \"sez\" + revision_two + \"sez\" + timestamp_two + \"sez\" +\"added text: \" +fixed_added_text)\r\n \r\n \r\n for deleted_text in indices_two:\r\n if lister[deleted_text].get_text() in removed_text_holder:\r\n break\r\n else:\r\n if lister[deleted_text].get_text() != \"\":\r\n edited_text = lister[deleted_text].get_text().split(\",\")\r\n fixed_deleted_text = \" \".join(edited_text) \r\n removed_text_holder.append(revision_one + \"sez\" + timestamp_one + \"sez\" + revision_two + \"sez\" + timestamp_two + \"sez\" +\"removed text: \" + fixed_deleted_text) \r\n \r\n difference_holder.append(added_text_holder)\r\n difference_holder.append(removed_text_holder)\r\n \r\n return difference_holder",
"def get_commit_difference(repo_path):\n \n diff = []\n s = subprocess.check_output(\"cd %s; git log --stat \" % repo_path, shell=True)\n r = re.compile(\"commit (.*?)\\n.*?Author: (.*?)\\n.*?Date:(.*?)\\n\\n(.*?)\\n\\n(.*?)\\n(.*?)\\n\", re.M+re.S+re.U+re.I)\n matches = r.findall(s)\n for m in matches:\n diff.append(dict(commit_hash=m[0].strip(), author=m[1].strip(), datetime=m[2].strip(), message=m[3].strip(), file_name=m[4].strip(), changes=m[5].strip()))\n #diff.append(dict(commit_diff=m[0].strip()))\n\n\n return diff",
"def git_diff_pipeline(output_name, master_path, prev_commit):\n log = logging.getLogger(__name__)\n\n current_path = inline_current(master_path)\n log.debug(\"current_path {0}\".format(current_path))\n prev_path = inline_prev(prev_commit, master_path)\n log.debug(\"prev_path {0}\".format(prev_path))\n\n # Run latexmk\n diff_path = os.path.splitext(output_name)[0]\n ldiff_cmd = \"latexdiff --type=CTRADITIONAL {prev} {current} > {diff}.tex\".\\\n format(prev=prev_path, current=current_path, diff=diff_path)\n subprocess.call(ldiff_cmd, shell=True)\n\n # Compile the diff document with latexmk\n ltmk_cmd = \"latexmk -f -pdf -bibtex-cond {0}.tex\".format(diff_path)\n subprocess.call(ltmk_cmd, shell=True)\n\n # Copy to build directory\n if not os.path.exists(\"build\"):\n os.makedirs(\"build\")\n pdf_path = \"{0}.pdf\".format(output_name)\n if os.path.exists(pdf_path):\n shutil.move(pdf_path, os.path.join(\"build\", pdf_path))\n\n # Clean up\n ltmk_cmd = \"latexmk -f -pdf -bibtex-cond -c {0}.tex\".format(diff_path)\n subprocess.call(ltmk_cmd, shell=True)\n build_exts = ['Notes.bib', '.bbl', '.tex']\n for ext in build_exts:\n path = \"\".join((output_name, ext))\n if os.path.exists(path):\n os.remove(path)\n os.remove(prev_path)\n os.remove(current_path)",
"async def get_file_diff(self, pr_id: str, filename: str) -> Dict[str, str]:\n pull_request = await self._get_pull_requests(pr_id)\n\n base_content = await self.__get_content(\n pull_request[\"base\"][\"repo\"][\"url\"], filename, pull_request[\"base\"][\"sha\"]\n )\n head_content = await self.__get_content(\n pull_request[\"head\"][\"repo\"][\"url\"], filename, pull_request[\"head\"][\"sha\"]\n )\n\n return {\n \"base\": {\n \"label\": pull_request[\"base\"][\"label\"],\n \"sha\": pull_request[\"base\"][\"sha\"],\n \"content\": base_content,\n },\n \"head\": {\n \"label\": pull_request[\"head\"][\"label\"],\n \"sha\": pull_request[\"head\"][\"sha\"],\n \"content\": head_content,\n },\n }",
"def get_diff(scmtool, repository_info, revision_range=None,\n svn_changelist=None, files=[]):\n if revision_range:\n diff_info = scmtool.diff_between_revisions(\n revision_range,\n files,\n repository_info)\n elif svn_changelist:\n diff_info = scmtool.diff_changelist(svn_changelist)\n else:\n diff_info = scmtool.diff(files)\n\n # Support compatibility with diff functions that haven't been updated\n # to return a dictionary.\n if isinstance(diff_info, tuple):\n diff_info = {\n 'diff': diff_info[0],\n 'parent_diff': diff_info[1],\n 'base_commit_id': None,\n }\n\n return diff_info",
"def build_diff(self):\n return self._main_vcs.merged_diff(self.base_revision,\n self.arguments.new_revision,\n self.arguments.unified_lines)",
"def create_visual_diff_through_html_files(file1, file2, encoding=\"utf8\", page=None,\n browser=False, notebook=False, context_size=None,\n inline_view=False):\n diff = create_visual_diff_through_html(file1, file2, notebook=notebook,\n context_size=context_size, inline_view=inline_view)\n if page is not None:\n with open(page, \"w\", encoding=\"utf8\") as f:\n f.write(diff)\n if browser: # pragma: no cover\n if page is None:\n raise AttributeError(\"browser is True, page must be True\")\n import webbrowser\n webbrowser.open(page)\n return None\n return diff"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
We return a dataset or datapoint with all attributes that have the given role
|
def __getattr__(
self, role: str
) -> Union["algoneer.dataset.Dataset", "algoneer.dataset.Attribute"]:
relevant_columns: List[str] = []
for attribute in self._obj.attributes.values():
if role in attribute.roles:
relevant_columns.append(attribute.column)
return self._obj[relevant_columns]
|
[
"def data(self, column, role):\n return self.columns[column](self._project, role)",
"def filter_granted(self, queryset):\n return Dataset.filter_by_user(self.request.user)",
"def data(self, column, role):\n return self.columns[column](self._user, role)",
"def skillsRelevantForRole(self, role: str) -> dict:\n\n\t\tresult = {}\n\t\tfor skill in self:\n\t\t\tif not hasattr(skill, \"roles\"): continue\n\t\t\tfor (curRole, relevance) in skill.roles:\n\t\t\t\tif curRole == role:\n\t\t\t\t\tresult[skill.id] = relevance\n\t\treturn result",
"def get_dataset(self):\n return Adviser.objects.values(\n 'id',\n 'date_joined',\n 'first_name',\n 'last_login',\n 'last_name',\n 'telephone_number',\n 'contact_email',\n 'dit_team_id',\n 'is_active',\n 'sso_email_user_id',\n )",
"def Attributes(self) -> EPlotAttributeCollection:",
"def attribution(self, entity, agent, role=None, identifier=None,\n other_attributes=None):\n if other_attributes is None:\n other_attributes = {}\n if role is not None:\n other_attributes.update({VOPROV_ATTR_ROLE: role})\n if len(other_attributes) == 0:\n other_attributes = None\n return self.new_record(\n VOPROV_ATTRIBUTION, identifier, {\n VOPROV_ATTR_ENTITY: entity,\n VOPROV_ATTR_AGENT: agent\n },\n other_attributes\n )",
"def get_raw_data(self, dataset: SampleTypeEnum, *attributes: str) \\\n -> List[Tuple]:\n sample: List[Series] = self._sample.get_data(dataset)\n\n res: List[tuple] \\\n = [(*self._filter_row(row, attributes),) for row in sample]\n\n return res",
"async def whohas(self, ctx, *, role: str):\n\n role = await helpers.role_by_substring(ctx, role)\n\n members_with_role = []\n for member in ctx.guild.members:\n if role in member.roles:\n members_with_role.append(member.mention)\n if not members_with_role:\n await ctx.send(\"Nobody has that role. :<\")\n else:\n embed = discord.Embed(title=f\"Members with {role.name}: {len(members_with_role)}\")\n embed.description = \", \".join(members_with_role[:30])\n if len(members_with_role) > 30:\n embed.set_footer(text=f\"...and {len(members_with_role)-30} others.\")\n await ctx.send(embed=embed)",
"def where_attr(self, attr, dtype):\n return np.array([self._hfile[\"where\"].attrs[attr]], dtype=dtype)",
"def get_result(self, role):\n return self.connection.assume_role_with_saml(role.role_arn, role.principal_arn, self.assertion, duration_seconds=3600)",
"def get_abilities(self, role: Role) -> AbilityList:\n\n return AbilityList(\n abilities=[ABILITIES.get_ability_by_key(key) for key in self.data.overview_data(\"world\", self.current_queue.rank, role)[\"abilities\"][\"ability_order\"]]\n )",
"def __getitem__(self, given):\n return self.dataset[given]",
"def get_permissions_of_role(rolename):\n role_uuid = get_all_permissions(rolename)\n URL = f'{APIVERSION}/roles/{role_uuid}/permissions'\n c = start_handler()\n c.setopt(c.WRITEDATA, BUFFER)\n perform_request(c, URL)\n body = json.loads(BUFFER.getvalue())\n for output in body['data']:\n for key, value in output.items():\n if 'attributes' in key:\n print(value['name'])",
"def get_attractions():\n from entity import Attraction\n rows = read_csv(\"data/attractions.csv\")\n return (Attraction(*row) for row in rows[1:])",
"def getAttributePermissionInfo( self, state_id, attr_id, p ):\n roles = None\n perm = None\n if self.state_attr_permission_roles:\n perm_def = self.state_attr_permission_roles.get( (state_id, attr_id), {} )\n roles = perm_def.get(p, None)\n if roles is None:\n return {'acquired':1, 'roles':[]}\n else:\n if type(roles) is TupleType:\n acq = 0\n else:\n acq = 1\n return {'acquired':acq, 'roles':list(roles)}",
"def get_dataset(self):\n linear_qs = AminoAcid.objects.filter(amino_acid__in=self.linear)\\\n .values_list('data__linear_smile')\n methylated_qs = AminoAcid.objects.filter(amino_acid__in=self.methylated)\\\n .values_list('data__methylated_smile')\n linear_dataset = list(map(lambda x: x[0], linear_qs))\n methylated_dataset = list(map(lambda x: x[0], methylated_qs))\n dataset = linear_dataset + methylated_dataset\n return dataset",
"def test_get_role_associates(self):\n pass",
"async def roleinfo(self, ctx, *, role: str):\n\n role = await helpers.role_by_substring(ctx, role)\n\n embed = discord.Embed(title=role.name)\n embed.colour = role.color\n embed.description = f\"{role.id} | Members: {len(role.members)}\"\n embed.add_field(name=\"Color\", value=f\"{role.color}\", inline=False)\n\n if role.permissions.administrator:\n embed.add_field(name=\"Administrator\", value=True)\n\n else:\n paginator = commands.Paginator(prefix=\"\", suffix=\"\")\n\n for permission, value in role.permissions:\n if value:\n paginator.add_line(str(permission).capitalize().replace(\"_\", \" \"))\n\n for page in paginator.pages:\n embed.add_field(name=\"Permissions\", value=page)\n\n await ctx.send(embed=embed)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Randomly sample datapoints without labels.
|
def sample_unlabeled_data(n_samples):
gen = ToyDataGenerator()
xs, _ = gen.generate_uniform_dataset(n_samples)
return xs
|
[
"def get_random_datapoint(self):\n random_datapoint = None\n if self.missing_label_placeholder is not None:\n random_datapoint = np.random.choice(\n len(self.y_[self.labeled_indices_]))\n else:\n random_datapoint = np.random.randint(low=0, high=len(self.y_))\n return random_datapoint",
"def sample(X,Y):\n sampledX = []\n sampledY = []\n nullPos = []\n\n # get all indices of 'O'-labeled data\n nonNullNum = 0\n for pos in range(len(Y)):\n label = Y[pos]\n if (getLabelIndex('O') == label):\n nullPos.append(pos)\n else:\n nonNullNum += 1\n \n # randomly sample part of 'O'-labeled data\n num = nonNullNum / 20 + 1\n sampledNullPos = random.sample(nullPos, num)\n\n for pos in range(len(Y)):\n if ((pos in nullPos) and (pos not in sampledNullPos)):\n continue\n else:\n feature = X[pos]\n label = Y[pos]\n sampledX.append(feature)\n sampledY.append(label)\n #print sampledY\n return sampledX, sampledY",
"def labeled_uniform_sample(self, sample_size, replace=True):\n idx = np.random.choice(np.where(self.get_labeled_mask())[0],\n size=sample_size, replace=replace )\n return Dataset(self._X[idx], self._y[idx])",
"def _get_random_sample_from_class(self, label):\n class_indexes = np.where(self.labels == label)[0]\n CLASS_INDEX = np.random.choice(class_indexes, 1)[0]\n\n return self.samples[CLASS_INDEX : CLASS_INDEX + 1]",
"def sample_labelled(train, train_label, size, classes):\n label_size = size / classes\n\n random_index = range(train.shape[0])\n random.shuffle(random_index)\n\n train, train_label = train[random_index], train_label[random_index]\n\n labelled_indexes = []\n label_indexes = {}\n for index, label in enumerate(train_label):\n label_indexes.setdefault(label, [])\n label_indexes[label].append(index)\n\n for label, indexes in label_indexes.items():\n labelled_indexes += indexes[:label_size]\n\n unlabelled_indexes = list(set(random_index) - set(labelled_indexes))\n\n X_labelled = train[labelled_indexes]\n Y_labelled = train_label[labelled_indexes]\n X_unlabelled = train[unlabelled_indexes]\n Y_unlabelled = train_label[unlabelled_indexes]\n return X_labelled, Y_labelled, X_unlabelled, Y_unlabelled",
"def randomSamples(feature, img, numPoints, seed, classBand, classValues, classPoints, scale = 30):\n img = img.addBands(ee.Image.pixelLonLat())\n \n points = img.stratifiedSample(\n numPoints = numPoints,\n classBand = classBand, \n region = feature,\n seed = seed,\n classValues = classValues, # valores a serem classificados \n classPoints = classPoints, \n dropNulls = True, \n scale = scale\n )\n \n points = points.randomColumn('randCol', 0)\n\n return points.map(setGeometry)",
"def predictive_sample(self, x, n=1000):\n samples = []\n with Sampling(n=n, flipout=False):\n for x_data, y_data in make_generator(x, test=True):\n samples += [self(O.expand_dims(x_data, 0)).sample()]\n return np.concatenate(samples, axis=1)",
"def stratified_random_resampling(combined_labels, y_label, sampling_method, seed_val):\r\n \r\n #Applies random sampling\r\n random.seed(seed_val)\r\n\r\n \r\n #Merges y_label into a single list to perform undersampling altogether\r\n \r\n combined_labels = combined_labels + [y_label]\r\n \r\n #Determine the number of y_labels\r\n label_val = np.unique(y_label).tolist()\r\n\r\n #Count the number of data in each label\r\n label_count = list()\r\n for i in range(len(label_val)):\r\n label_count.append((y_label == i).sum()) #numpy way of performing .count() function in list format\r\n \r\n #Determine which label has the least count\r\n #******************************\r\n if sampling_method == 'undersampling':\r\n min_max_label = label_count.index(min(label_count))\r\n elif sampling_method == 'oversampling':\r\n min_max_label = label_count.index(max(label_count))\r\n \r\n \r\n #Reorganize the list without the min label count\r\n label_val.remove(min_max_label)\r\n #label_val[min_label] = None\r\n \r\n #Create lists of lists containing label's original index value and its respective labels\r\n \"\"\"\r\n Ex. Suppose we have a y_label = [0,0,1,2,2] that contains 3 different labels\r\n y_label would then be converted into [[0,0], [1,0], [2,1], [3,2], [4,2]] \r\n where the first index within the list is the original index value and the second index\r\n is the y label. This is done to track random.sample() function on which label is randomly selected\r\n \"\"\"\r\n y_label_index = list()\r\n for i in range(len(y_label)):\r\n y_label_index.append([i, y_label[i]])\r\n \r\n #Now separating each of the label into its own lists\r\n list_output = list() #This specific lists output all the labels that need to be removed with its index value\r\n for i in range(len(label_val)):\r\n current_label_list = list()\r\n current_label = label_val[i]\r\n for j in range(len(y_label_index)):\r\n if y_label_index[j][1] == current_label:\r\n current_label_list.append(y_label_index[j])\r\n \r\n\r\n #Specifies how many of the said label needs to be removed based off the min/max label count\r\n if sampling_method == 'undersampling':\r\n target_label_count = label_count[current_label] - label_count[min_max_label]\r\n \r\n #Random sampling within a label without replacement\r\n randomized_list = random.sample(current_label_list, target_label_count) \r\n \r\n elif sampling_method == 'oversampling':\r\n target_label_count = label_count[min_max_label] - label_count[current_label]\r\n \r\n #Random sampling within a label WITH replacement if with replacement option cannot be done\r\n try: \r\n randomized_list = random.sample(current_label_list, target_label_count) \r\n except ValueError:\r\n print('Selected sample is larger than the population, sampling WITH replacement is used for label: ' + str(current_label_list[0][1]))\r\n randomized_list = random.choices(current_label_list, k=target_label_count)\r\n \r\n list_output.append(randomized_list)\r\n\r\n\r\n #---Take the combined_labels and remove each of them based on its index values---\r\n #Combine entire lists into a single list. If it is a binary label, then processed_list = list_output\r\n processed_list = list()\r\n for i in range(len(label_val)):\r\n processed_list.extend(list_output[i])\r\n \r\n #The lists must be sorted in reverse order so that when xlabels are removed, it is not affecting its index value\r\n processed_list.sort(reverse = True)\r\n \r\n #Deleting all the available xlabels and ylabels\r\n final_output = list()\r\n for i in range(len(combined_labels)):\r\n target_label = combined_labels[i]\r\n target_label = target_label.tolist()\r\n \r\n if sampling_method == 'undersampling':\r\n for j in tqdm(range(len(processed_list))):\r\n del target_label[processed_list[j][0]]\r\n final_output.append(target_label)\r\n \r\n elif sampling_method == 'oversampling':\r\n for j in tqdm(range(len(processed_list))):\r\n #Insert(index position, insert value)\r\n target_label.insert(processed_list[j][0], target_label[processed_list[j][0]])\r\n final_output.append(target_label)\r\n\r\n #Ouput Summary\r\n print('\\n\\n* Resampling complete * | Method used: ' + str(sampling_method))\r\n print('Original dataset count: ' + str(Counter(y_label)))\r\n \r\n #final_output's last index is always the y_label\r\n y_train_rs = np.array(final_output[len(final_output)-1])\r\n print('Resampled dataset count: ' + str(Counter(y_train_rs)))\r\n \r\n return final_output, list_output",
"def add_noise_and_shuffle(point_cloud, label):\n dev_in_metres = 0.05 # <- change this value to change amount of noise\n # add noise to the points\n point_cloud += tf.random.uniform(point_cloud.shape, -dev_in_metres, dev_in_metres, dtype=tf.float64)\n # shuffle points\n # point_cloud = tf.random.shuffle(point_cloud)\n return point_cloud, label",
"def test_sample_wrong_X():\n\n # Create the object\n ros = RandomOverSampler(random_state=RND_SEED)\n ros.fit(X, Y)\n assert_raises(RuntimeError, ros.sample, np.random.random((100, 40)),\n np.array([0] * 50 + [1] * 50))",
"def test_random_sample_2000(self):\n\t\t#-Load Random Sample From RAW DATASET-#\n\t\tyears = [2000]\n\t\trs = import_csv_as_statatypes(TEST_DATA_DIR+\"nberfeenstra_wtf00_random_sample.csv\") \t\t#random sample\n\t\tdel rs['obs']\n\t\tassert_rows_in_df(df=self.obj.raw_data, rows=rs)\n\t\tassert_unique_rows_in_df(df=self.obj.raw_data, rows=rs)",
"def test_x_data_but_no_y_data(self):\n lead_time = pd.to_timedelta(\"1d\")\n lookback = pd.to_timedelta(\"2y\")\n prediction_period = pd.to_timedelta(\"180d\")\n min_date = pd.to_datetime(\"2018-01-01\")\n max_date = (\n min_date + lookback + lead_time + prediction_period + pd.Timedelta(\"1d\")\n )\n\n sampler = BinnedUniformSampler(\n min_date=min_date,\n max_date=max_date,\n lead_time=lead_time,\n prediction_period=prediction_period,\n samples_per_lookback=1,\n lookback=lookback,\n )\n # purchase that covers one lookback, but no prediction period\n customer_data = self.generate_data_for_one_customer(\n 1, min_date, min_date + lookback, n_orders=12\n )\n # a sampler with max date greater than the maximum order date and\n # params such that exactly one sample is created for the customer\n samples = sampler.generate_samples(customer_data)\n\n assert samples.index.get_level_values(\"sample_id\").nunique() == 1\n assert samples.x_include.sum() >= 1\n assert samples.y_include.sum() == 0",
"def _sample_negative_new(self, pos_items, all_items):\n while True:\n sample = np.random.choice(all_items)\n if sample in pos_items:\n continue\n return sample",
"def sample(self) -> np.ndarray:\n if self._sample_keys is None:\n return np.array([])\n elif self._sample_keys.size > self.limit:\n self._discard_excess_elements()\n return self._sample_elements",
"def test_random_sample_1962(self):\n\t\t#-Load Random Sample From RAW DATASET-#\n\t\tyears = [1962]\n\t\tobj = self.obj\n\t\trs = import_csv_as_statatypes(TEST_DATA_DIR+\"nberfeenstra_wtf62_random_sample.csv\") \t\t#random sample\n\t\tdel rs['obs']\n\t\tassert_rows_in_df(df=self.obj.raw_data, rows=rs)\n\t\tassert_unique_rows_in_df(df=self.obj.raw_data, rows=rs)",
"def test_oversampling_no_replace(base_clumper):\n with pytest.raises(ValueError):\n base_clumper.sample(n=len(base_clumper) + 1, replace=False)",
"def random_data(data):\n df = pd.DataFrame(data)\n df = df.sample(frac=1) #.reset_index(drop=True)\n return np.array(df)",
"def noisy_observations(data_in, avg_obs_per_point, cov, shuffle=False):\n\n\t#init\t\n\tdata_out = []\n\tn_in = len(data_in)\n\tidx = []\n\n\t#generate indeces, set 1 to give exact indeces back\n\tif avg_obs_per_point == 1:\n\t\tidx = np.arange(n_in).tolist()\n\telse:\n\t\tfor i in xrange(n_in):\n\t\t\tn = int(np.random.normal(avg_obs_per_point))\n\t\t\tidx += [i for j in xrange(n)]\n\n\t#shuffle list if desired\n\tif shuffle:\n\t\trandom.shuffle(idx) #operates in place\n\n\t#grab each data point and draw random sample from N(xy,cov)\n\tfor i in idx:\n\t\tdata_out.append({'name':data_in[i]['class'],\n\t\t\t\t\t\t 'data':np.random.multivariate_normal(data_in[i]['xy'],cov)})\n\treturn data_out",
"def StratifiedSample(data, nperlabel):\n sample = pd.DataFrame()\n datagrp = data.groupby('label')\n sortedgrp = datagrp.size().order(ascending=False)\n for i, l in enumerate(sortedgrp.index):\n if sortedgrp[l] > nperlabel:\n print(\"==> %-50s %6d\" % (l, sortedgrp[l]))\n sample = sample.append(RandomSample(data[data['label'] == l],\n nperlabel))\n else:\n break\n print(\"There are %d labels have more than %d articles\" % (i, nperlabel))\n print(\"Sample size: %s articles\" % (len(sample)))\n return sample"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.