code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
a = [3, 4, 2, 3, 5, 8, 23, 32, 35, 34, 4, 6, 9]
print("")
print("Lesson #2")
print("Program start:")
for i in a:
if i < 9:
print(i)
print("End")
|
normal
|
{
"blob_id": "58f7810e2731721562e3459f92684589dc66862c",
"index": 881,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('')\nprint('Lesson #2')\nprint('Program start:')\nfor i in a:\n if i < 9:\n print(i)\nprint('End')\n",
"step-3": "a = [3, 4, 2, 3, 5, 8, 23, 32, 35, 34, 4, 6, 9]\nprint('')\nprint('Lesson #2')\nprint('Program start:')\nfor i in a:\n if i < 9:\n print(i)\nprint('End')\n",
"step-4": "a = [3, 4, 2, 3, 5, 8, 23, 32, 35, 34, 4, 6, 9]\n\nprint(\"\")\nprint(\"Lesson #2\")\nprint(\"Program start:\")\nfor i in a:\n if i < 9:\n print(i)\nprint(\"End\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import hashlib
import json
#import logger
import Login.loger as logger
#configurations
import Configurations.config as config
def generate_data(*args):
#add data into seperate variables
try:
station_data = args[0]
except KeyError as e:
logger.log(log_type=config.log_error,params=e)
return None
#extract all variables from data
"""
There are the Parameters need to be extracted from the packet
Weather Parameters
1 - dateist
2 - dailyrainMM
3 - rain
4 - tempc
5 - winddir
6 - windspeedkmh
7 - humidity
8 - baromMM
Technical Parameters
1 - batt
2 - network
3 - RSSI
4 - action
5 - softwaretype
6 - version
"""
data_hashed = dict()
#data_hashed['dateist']=generate_id('dateist',station_data['station_id'])
data_hashed['dailyrainMM']=generate_id('dailyrainMM',station_data['station_id'])
data_hashed['rain']=generate_id('rain',station_data['station_id'])
data_hashed['tempc']=generate_id('tempc',station_data['station_id'])
data_hashed['winddir']=generate_id('winddir',station_data['station_id'])
data_hashed['windspeedkmh']=generate_id('windspeedkmh',station_data['station_id'])
data_hashed['humidity']=generate_id('humidity',station_data['station_id'])
data_hashed['baromMM']=generate_id('baromMM',station_data['station_id'])
data_hashed['BAT']=generate_id('BAT',station_data['station_id'])
data_hashed['network']=generate_id('network',station_data['station_id'])
data_hashed['RSSI']=generate_id('RSSI',station_data['station_id'])
data_hashed['action']=generate_id('action',station_data['station_id'])
data_hashed['softwareType']=generate_id('softwareType',station_data['station_id'])
data_hashed['version']=generate_id('version',station_data['station_id'])
return data_hashed
def generate_id(parameter,station_id):
meta_data= parameter+station_id
#generate all the keys for the has ids
hash_id = hashlib.sha256(config.encryption_key)
hash_id.update(json.dumps(meta_data).encode())
return hash_id.hexdigest()
|
normal
|
{
"blob_id": "2a5c6f442e6e6cec6c4663b764c8a9a15aec8c40",
"index": 6971,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_id(parameter, station_id):\n meta_data = parameter + station_id\n hash_id = hashlib.sha256(config.encryption_key)\n hash_id.update(json.dumps(meta_data).encode())\n return hash_id.hexdigest()\n",
"step-3": "<mask token>\n\n\ndef generate_data(*args):\n try:\n station_data = args[0]\n except KeyError as e:\n logger.log(log_type=config.log_error, params=e)\n return None\n \"\"\"\n There are the Parameters need to be extracted from the packet\n \n Weather Parameters\n 1 - dateist\n 2 - dailyrainMM\n 3 - rain\n 4 - tempc\n 5 - winddir\n 6 - windspeedkmh\n 7 - humidity\n 8 - baromMM\n\n Technical Parameters\n 1 - batt\n 2 - network\n 3 - RSSI\n 4 - action\n 5 - softwaretype\n 6 - version\n \"\"\"\n data_hashed = dict()\n data_hashed['dailyrainMM'] = generate_id('dailyrainMM', station_data[\n 'station_id'])\n data_hashed['rain'] = generate_id('rain', station_data['station_id'])\n data_hashed['tempc'] = generate_id('tempc', station_data['station_id'])\n data_hashed['winddir'] = generate_id('winddir', station_data['station_id'])\n data_hashed['windspeedkmh'] = generate_id('windspeedkmh', station_data[\n 'station_id'])\n data_hashed['humidity'] = generate_id('humidity', station_data[\n 'station_id'])\n data_hashed['baromMM'] = generate_id('baromMM', station_data['station_id'])\n data_hashed['BAT'] = generate_id('BAT', station_data['station_id'])\n data_hashed['network'] = generate_id('network', station_data['station_id'])\n data_hashed['RSSI'] = generate_id('RSSI', station_data['station_id'])\n data_hashed['action'] = generate_id('action', station_data['station_id'])\n data_hashed['softwareType'] = generate_id('softwareType', station_data[\n 'station_id'])\n data_hashed['version'] = generate_id('version', station_data['station_id'])\n return data_hashed\n\n\ndef generate_id(parameter, station_id):\n meta_data = parameter + station_id\n hash_id = hashlib.sha256(config.encryption_key)\n hash_id.update(json.dumps(meta_data).encode())\n return hash_id.hexdigest()\n",
"step-4": "import hashlib\nimport json\nimport Login.loger as logger\nimport Configurations.config as config\n\n\ndef generate_data(*args):\n try:\n station_data = args[0]\n except KeyError as e:\n logger.log(log_type=config.log_error, params=e)\n return None\n \"\"\"\n There are the Parameters need to be extracted from the packet\n \n Weather Parameters\n 1 - dateist\n 2 - dailyrainMM\n 3 - rain\n 4 - tempc\n 5 - winddir\n 6 - windspeedkmh\n 7 - humidity\n 8 - baromMM\n\n Technical Parameters\n 1 - batt\n 2 - network\n 3 - RSSI\n 4 - action\n 5 - softwaretype\n 6 - version\n \"\"\"\n data_hashed = dict()\n data_hashed['dailyrainMM'] = generate_id('dailyrainMM', station_data[\n 'station_id'])\n data_hashed['rain'] = generate_id('rain', station_data['station_id'])\n data_hashed['tempc'] = generate_id('tempc', station_data['station_id'])\n data_hashed['winddir'] = generate_id('winddir', station_data['station_id'])\n data_hashed['windspeedkmh'] = generate_id('windspeedkmh', station_data[\n 'station_id'])\n data_hashed['humidity'] = generate_id('humidity', station_data[\n 'station_id'])\n data_hashed['baromMM'] = generate_id('baromMM', station_data['station_id'])\n data_hashed['BAT'] = generate_id('BAT', station_data['station_id'])\n data_hashed['network'] = generate_id('network', station_data['station_id'])\n data_hashed['RSSI'] = generate_id('RSSI', station_data['station_id'])\n data_hashed['action'] = generate_id('action', station_data['station_id'])\n data_hashed['softwareType'] = generate_id('softwareType', station_data[\n 'station_id'])\n data_hashed['version'] = generate_id('version', station_data['station_id'])\n return data_hashed\n\n\ndef generate_id(parameter, station_id):\n meta_data = parameter + station_id\n hash_id = hashlib.sha256(config.encryption_key)\n hash_id.update(json.dumps(meta_data).encode())\n return hash_id.hexdigest()\n",
"step-5": "import hashlib\nimport json\n#import logger \nimport Login.loger as logger\n#configurations\nimport Configurations.config as config\n\ndef generate_data(*args):\n #add data into seperate variables\n try:\n station_data = args[0]\n except KeyError as e:\n logger.log(log_type=config.log_error,params=e)\n return None\n #extract all variables from data\n \"\"\"\n There are the Parameters need to be extracted from the packet\n \n Weather Parameters\n 1 - dateist\n 2 - dailyrainMM\n 3 - rain\n 4 - tempc\n 5 - winddir\n 6 - windspeedkmh\n 7 - humidity\n 8 - baromMM\n\n Technical Parameters\n 1 - batt\n 2 - network\n 3 - RSSI\n 4 - action\n 5 - softwaretype\n 6 - version\n \"\"\"\n data_hashed = dict()\n #data_hashed['dateist']=generate_id('dateist',station_data['station_id'])\n data_hashed['dailyrainMM']=generate_id('dailyrainMM',station_data['station_id'])\n data_hashed['rain']=generate_id('rain',station_data['station_id'])\n data_hashed['tempc']=generate_id('tempc',station_data['station_id'])\n data_hashed['winddir']=generate_id('winddir',station_data['station_id'])\n data_hashed['windspeedkmh']=generate_id('windspeedkmh',station_data['station_id'])\n data_hashed['humidity']=generate_id('humidity',station_data['station_id'])\n data_hashed['baromMM']=generate_id('baromMM',station_data['station_id'])\n data_hashed['BAT']=generate_id('BAT',station_data['station_id'])\n data_hashed['network']=generate_id('network',station_data['station_id'])\n data_hashed['RSSI']=generate_id('RSSI',station_data['station_id'])\n data_hashed['action']=generate_id('action',station_data['station_id'])\n data_hashed['softwareType']=generate_id('softwareType',station_data['station_id'])\n data_hashed['version']=generate_id('version',station_data['station_id'])\n return data_hashed \n\n\n\n \ndef generate_id(parameter,station_id):\n meta_data= parameter+station_id\n #generate all the keys for the has ids\n hash_id = hashlib.sha256(config.encryption_key)\n hash_id.update(json.dumps(meta_data).encode())\n return hash_id.hexdigest()\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='Session', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('token', models.CharField(editable=
False, max_length=64, unique=True)), ('description', models.
CharField(blank=True, max_length=512)), ('created_at', models.
DateTimeField(auto_now_add=True)), ('last_seen_at', models.
DateTimeField(blank=True, editable=False, null=True)), ('user',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
settings.AUTH_USER_MODEL))])]
<|reserved_special_token_1|>
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='Session', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('token', models.CharField(editable=
False, max_length=64, unique=True)), ('description', models.
CharField(blank=True, max_length=512)), ('created_at', models.
DateTimeField(auto_now_add=True)), ('last_seen_at', models.
DateTimeField(blank=True, editable=False, null=True)), ('user',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
settings.AUTH_USER_MODEL))])]
<|reserved_special_token_1|>
# Generated by Django 2.1.3 on 2019-01-06 06:53
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Session",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("token", models.CharField(editable=False, max_length=64, unique=True)),
("description", models.CharField(blank=True, max_length=512)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"last_seen_at",
models.DateTimeField(blank=True, editable=False, null=True),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
|
flexible
|
{
"blob_id": "a91d42764fa14111afca4551edd6c889903ed9bd",
"index": 8056,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Session', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('token', models.CharField(editable=\n False, max_length=64, unique=True)), ('description', models.\n CharField(blank=True, max_length=512)), ('created_at', models.\n DateTimeField(auto_now_add=True)), ('last_seen_at', models.\n DateTimeField(blank=True, editable=False, null=True)), ('user',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))])]\n",
"step-4": "import django.db.models.deletion\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Session', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('token', models.CharField(editable=\n False, max_length=64, unique=True)), ('description', models.\n CharField(blank=True, max_length=512)), ('created_at', models.\n DateTimeField(auto_now_add=True)), ('last_seen_at', models.\n DateTimeField(blank=True, editable=False, null=True)), ('user',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))])]\n",
"step-5": "# Generated by Django 2.1.3 on 2019-01-06 06:53\n\nimport django.db.models.deletion\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"Session\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"token\", models.CharField(editable=False, max_length=64, unique=True)),\n (\"description\", models.CharField(blank=True, max_length=512)),\n (\"created_at\", models.DateTimeField(auto_now_add=True)),\n (\n \"last_seen_at\",\n models.DateTimeField(blank=True, editable=False, null=True),\n ),\n (\n \"user\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ModelTest(TestCase):
def test_expense_form_valid_data(self):
form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',
'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,
'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})
self.assertTrue(form.is_valid())
<|reserved_special_token_0|>
def test_expense_form_invalid_required(self):
form = StudentForm(data={'student_id': 500, 'firstName': '',
'lastName': '', 'department': '', 'mathScore': 100,
'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors, {'firstName': [
'This field is required.'], 'lastName': [
'This field is required.'], 'department': [
'This field is required.']})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ModelTest(TestCase):
def test_expense_form_valid_data(self):
form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',
'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,
'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})
self.assertTrue(form.is_valid())
def test_expense_form_no_data(self):
form = StudentForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 8)
def test_expense_form_invalid_required(self):
form = StudentForm(data={'student_id': 500, 'firstName': '',
'lastName': '', 'department': '', 'mathScore': 100,
'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors, {'firstName': [
'This field is required.'], 'lastName': [
'This field is required.'], 'department': [
'This field is required.']})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ModelTest(TestCase):
def test_expense_form_valid_data(self):
form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',
'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,
'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})
self.assertTrue(form.is_valid())
def test_expense_form_no_data(self):
form = StudentForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 8)
def test_expense_form_invalid_required(self):
form = StudentForm(data={'student_id': 500, 'firstName': '',
'lastName': '', 'department': '', 'mathScore': 100,
'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors, {'firstName': [
'This field is required.'], 'lastName': [
'This field is required.'], 'department': [
'This field is required.']})
def test_expense_form_invalid_equal_to_max(self):
form = StudentForm(data={'student_id': 120000, 'firstName':
'Berkay', 'lastName': 'Tan', 'department': 'Bilisim',
'mathScore': 200, 'physicsScore': 150, 'chemistryScore': 150,
'biologyScore': 101})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 5)
self.assertEqual(form.errors, {'student_id': [
'Ensure this value is less than or equal to 9999.'],
'mathScore': ['Ensure this value is less than or equal to 100.'
], 'physicsScore': [
'Ensure this value is less than or equal to 100.'],
'chemistryScore': [
'Ensure this value is less than or equal to 100.'],
'biologyScore': [
'Ensure this value is less than or equal to 100.']})
<|reserved_special_token_1|>
from django.test import TestCase
from student.forms import StudentForm
class ModelTest(TestCase):
def test_expense_form_valid_data(self):
form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',
'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,
'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})
self.assertTrue(form.is_valid())
def test_expense_form_no_data(self):
form = StudentForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 8)
def test_expense_form_invalid_required(self):
form = StudentForm(data={'student_id': 500, 'firstName': '',
'lastName': '', 'department': '', 'mathScore': 100,
'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors, {'firstName': [
'This field is required.'], 'lastName': [
'This field is required.'], 'department': [
'This field is required.']})
def test_expense_form_invalid_equal_to_max(self):
form = StudentForm(data={'student_id': 120000, 'firstName':
'Berkay', 'lastName': 'Tan', 'department': 'Bilisim',
'mathScore': 200, 'physicsScore': 150, 'chemistryScore': 150,
'biologyScore': 101})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 5)
self.assertEqual(form.errors, {'student_id': [
'Ensure this value is less than or equal to 9999.'],
'mathScore': ['Ensure this value is less than or equal to 100.'
], 'physicsScore': [
'Ensure this value is less than or equal to 100.'],
'chemistryScore': [
'Ensure this value is less than or equal to 100.'],
'biologyScore': [
'Ensure this value is less than or equal to 100.']})
<|reserved_special_token_1|>
from django.test import TestCase
from student.forms import StudentForm
class ModelTest(TestCase):
def test_expense_form_valid_data(self):
form = StudentForm(data={
'student_id': 500,
'firstName': "Emre",
'lastName': "Tan",
'department': "Panama",
'mathScore': 100,
'physicsScore': 70,
'chemistryScore': 40,
'biologyScore': 10
})
self.assertTrue(form.is_valid())
def test_expense_form_no_data(self):
form = StudentForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 8)
def test_expense_form_invalid_required(self):
form = StudentForm(data={
'student_id': 500,
'firstName': "",
'lastName': "",
'department': "",
'mathScore': 100,
'physicsScore': 70,
'chemistryScore': 40,
'biologyScore': 10
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors, {
'firstName': ['This field is required.'],
'lastName': ['This field is required.'],
'department': ['This field is required.']
})
def test_expense_form_invalid_equal_to_max(self):
form = StudentForm(data={
'student_id': 120000,
'firstName': "Berkay",
'lastName': "Tan",
'department': "Bilisim",
'mathScore': 200,
'physicsScore': 150,
'chemistryScore': 150,
'biologyScore': 101
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 5)
self.assertEqual(form.errors, {
'student_id': ['Ensure this value is less than or equal to 9999.'],
'mathScore': ['Ensure this value is less than or equal to 100.'],
'physicsScore': ['Ensure this value is less than or equal to 100.'],
'chemistryScore': ['Ensure this value is less than or equal to 100.'],
'biologyScore': ['Ensure this value is less than or equal to 100.'],
})
|
flexible
|
{
"blob_id": "6dc7c7de972388f3984a1238a2d62e53c60c622e",
"index": 6252,
"step-1": "<mask token>\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',\n 'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertTrue(form.is_valid())\n <mask token>\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={'student_id': 500, 'firstName': '',\n 'lastName': '', 'department': '', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {'firstName': [\n 'This field is required.'], 'lastName': [\n 'This field is required.'], 'department': [\n 'This field is required.']})\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',\n 'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertTrue(form.is_valid())\n\n def test_expense_form_no_data(self):\n form = StudentForm(data={})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 8)\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={'student_id': 500, 'firstName': '',\n 'lastName': '', 'department': '', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {'firstName': [\n 'This field is required.'], 'lastName': [\n 'This field is required.'], 'department': [\n 'This field is required.']})\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',\n 'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertTrue(form.is_valid())\n\n def test_expense_form_no_data(self):\n form = StudentForm(data={})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 8)\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={'student_id': 500, 'firstName': '',\n 'lastName': '', 'department': '', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {'firstName': [\n 'This field is required.'], 'lastName': [\n 'This field is required.'], 'department': [\n 'This field is required.']})\n\n def test_expense_form_invalid_equal_to_max(self):\n form = StudentForm(data={'student_id': 120000, 'firstName':\n 'Berkay', 'lastName': 'Tan', 'department': 'Bilisim',\n 'mathScore': 200, 'physicsScore': 150, 'chemistryScore': 150,\n 'biologyScore': 101})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 5)\n self.assertEqual(form.errors, {'student_id': [\n 'Ensure this value is less than or equal to 9999.'],\n 'mathScore': ['Ensure this value is less than or equal to 100.'\n ], 'physicsScore': [\n 'Ensure this value is less than or equal to 100.'],\n 'chemistryScore': [\n 'Ensure this value is less than or equal to 100.'],\n 'biologyScore': [\n 'Ensure this value is less than or equal to 100.']})\n",
"step-4": "from django.test import TestCase\nfrom student.forms import StudentForm\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',\n 'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertTrue(form.is_valid())\n\n def test_expense_form_no_data(self):\n form = StudentForm(data={})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 8)\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={'student_id': 500, 'firstName': '',\n 'lastName': '', 'department': '', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {'firstName': [\n 'This field is required.'], 'lastName': [\n 'This field is required.'], 'department': [\n 'This field is required.']})\n\n def test_expense_form_invalid_equal_to_max(self):\n form = StudentForm(data={'student_id': 120000, 'firstName':\n 'Berkay', 'lastName': 'Tan', 'department': 'Bilisim',\n 'mathScore': 200, 'physicsScore': 150, 'chemistryScore': 150,\n 'biologyScore': 101})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 5)\n self.assertEqual(form.errors, {'student_id': [\n 'Ensure this value is less than or equal to 9999.'],\n 'mathScore': ['Ensure this value is less than or equal to 100.'\n ], 'physicsScore': [\n 'Ensure this value is less than or equal to 100.'],\n 'chemistryScore': [\n 'Ensure this value is less than or equal to 100.'],\n 'biologyScore': [\n 'Ensure this value is less than or equal to 100.']})\n",
"step-5": "from django.test import TestCase\nfrom student.forms import StudentForm\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={\n 'student_id': 500,\n 'firstName': \"Emre\",\n 'lastName': \"Tan\",\n 'department': \"Panama\",\n 'mathScore': 100,\n 'physicsScore': 70,\n 'chemistryScore': 40,\n 'biologyScore': 10\n })\n\n self.assertTrue(form.is_valid())\n\n def test_expense_form_no_data(self):\n form = StudentForm(data={})\n\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 8)\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={\n 'student_id': 500,\n 'firstName': \"\",\n 'lastName': \"\",\n 'department': \"\",\n 'mathScore': 100,\n 'physicsScore': 70,\n 'chemistryScore': 40,\n 'biologyScore': 10\n })\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {\n 'firstName': ['This field is required.'],\n 'lastName': ['This field is required.'],\n 'department': ['This field is required.']\n })\n\n def test_expense_form_invalid_equal_to_max(self):\n form = StudentForm(data={\n 'student_id': 120000,\n 'firstName': \"Berkay\",\n 'lastName': \"Tan\",\n 'department': \"Bilisim\",\n 'mathScore': 200,\n 'physicsScore': 150,\n 'chemistryScore': 150,\n 'biologyScore': 101\n })\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 5)\n self.assertEqual(form.errors, {\n 'student_id': ['Ensure this value is less than or equal to 9999.'],\n 'mathScore': ['Ensure this value is less than or equal to 100.'],\n 'physicsScore': ['Ensure this value is less than or equal to 100.'],\n 'chemistryScore': ['Ensure this value is less than or equal to 100.'],\n 'biologyScore': ['Ensure this value is less than or equal to 100.'],\n })\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
INPUT_MINBIAS = '/build/RAWReference/MinBias_RAW_320_STARTUP.root'
INPUT_TTBAR = '/build/RAWReference/TTbar_RAW_320_STARTUP.root'
puSTARTUP_TTBAR = (
'/build/RAWReference/TTbar_Tauola_PileUp_RAW_320_STARTUP.root')
relval = {'step1': {'step': 'GEN-HLT', 'timesize': (100, ['MinBias',
'TTbar']), 'igprof': (50, ['TTbar']), 'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'], 'cmsdriver':
'--eventcontent RAWSIM --conditions auto:mc'}, 'step2': {'step':
'RAW2DIGI-RECO', 'timesize': (8000, ['MinBias', 'TTbar']), 'igprof': (
200, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'],
'pileupInput': puSTARTUP_TTBAR, 'fileInput': [INPUT_MINBIAS,
INPUT_TTBAR], 'cmsdriver':
'--eventcontent RECOSIM --conditions auto:startup'}, 'GENSIMDIGI': {
'step': 'GEN-SIM,DIGI', 'timesize': (100, ['MinBias',
'SingleElectronE1000', 'SingleMuMinusPt10', 'SinglePiMinusE1000',
'TTbar']), 'igprof': (5, ['TTbar']), 'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'], 'fileInput': '', 'cmsdriver':
'--eventcontent FEVTDEBUG --conditions auto:mc'}, 'HLT': {'step': 'HLT',
'timesize': (8000, ['MinBias', 'TTbar']), 'igprof': (500, ['TTbar']),
'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], 'pileupInput':
puSTARTUP_TTBAR, 'fileInput': [INPUT_MINBIAS, INPUT_TTBAR], 'cmsdriver':
'--eventcontent RAWSIM --conditions auto:startup --processName HLTFROMRAW'
}, 'FASTSIM': {'step': 'GEN-FASTSIM', 'timesize': (8000, ['MinBias',
'TTbar']), 'igprof': (500, ['TTbar']), 'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'], 'cmsdriver':
'--eventcontent RECOSIM --conditions auto:mc'}}
<|reserved_special_token_1|>
INPUT_MINBIAS = '/build/RAWReference/MinBias_RAW_320_STARTUP.root'
INPUT_TTBAR = '/build/RAWReference/TTbar_RAW_320_STARTUP.root'
puSTARTUP_TTBAR = '/build/RAWReference/TTbar_Tauola_PileUp_RAW_320_STARTUP.root'
relval = {
'step1': { 'step': 'GEN-HLT',
'timesize': (100, ['MinBias','TTbar']),
'igprof': (50, ['TTbar']),
'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'],
#??? 'pileupInput': '',
'cmsdriver': '--eventcontent RAWSIM --conditions auto:mc' },
'step2': { 'step': 'RAW2DIGI-RECO',
'timesize': (8000, ['MinBias','TTbar']),
'igprof': (200, ['TTbar']),
'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'],
'pileupInput': puSTARTUP_TTBAR,
'fileInput': [INPUT_MINBIAS,INPUT_TTBAR],
'cmsdriver': '--eventcontent RECOSIM --conditions auto:startup' },
'GENSIMDIGI': { 'step': 'GEN-SIM,DIGI',
'timesize': (100, ['MinBias','SingleElectronE1000','SingleMuMinusPt10','SinglePiMinusE1000','TTbar']),
'igprof': (5, ['TTbar']),
'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'],
#??? 'pileupInput': '',
'fileInput': '',
'cmsdriver': '--eventcontent FEVTDEBUG --conditions auto:mc' },
'HLT': { 'step': 'HLT',
'timesize': (8000, ['MinBias','TTbar']),
'igprof': (500, ['TTbar']),
'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'],
'pileupInput': puSTARTUP_TTBAR,
'fileInput': [INPUT_MINBIAS,INPUT_TTBAR],
'cmsdriver': '--eventcontent RAWSIM --conditions auto:startup --processName HLTFROMRAW' },
'FASTSIM': { 'step': 'GEN-FASTSIM',
'timesize': (8000, ['MinBias','TTbar']),
'igprof': (500, ['TTbar']),
'memcheck': (5, ['TTbar']),
'pileup': ['TTbar'],
'cmsdriver': '--eventcontent RECOSIM --conditions auto:mc' }
}
|
flexible
|
{
"blob_id": "78c9f92349ba834bc64dc84f884638c4316a9ea4",
"index": 352,
"step-1": "<mask token>\n",
"step-2": "INPUT_MINBIAS = '/build/RAWReference/MinBias_RAW_320_STARTUP.root'\nINPUT_TTBAR = '/build/RAWReference/TTbar_RAW_320_STARTUP.root'\npuSTARTUP_TTBAR = (\n '/build/RAWReference/TTbar_Tauola_PileUp_RAW_320_STARTUP.root')\nrelval = {'step1': {'step': 'GEN-HLT', 'timesize': (100, ['MinBias',\n 'TTbar']), 'igprof': (50, ['TTbar']), 'memcheck': (5, ['TTbar']),\n 'pileup': ['TTbar'], 'cmsdriver':\n '--eventcontent RAWSIM --conditions auto:mc'}, 'step2': {'step':\n 'RAW2DIGI-RECO', 'timesize': (8000, ['MinBias', 'TTbar']), 'igprof': (\n 200, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'],\n 'pileupInput': puSTARTUP_TTBAR, 'fileInput': [INPUT_MINBIAS,\n INPUT_TTBAR], 'cmsdriver':\n '--eventcontent RECOSIM --conditions auto:startup'}, 'GENSIMDIGI': {\n 'step': 'GEN-SIM,DIGI', 'timesize': (100, ['MinBias',\n 'SingleElectronE1000', 'SingleMuMinusPt10', 'SinglePiMinusE1000',\n 'TTbar']), 'igprof': (5, ['TTbar']), 'memcheck': (5, ['TTbar']),\n 'pileup': ['TTbar'], 'fileInput': '', 'cmsdriver':\n '--eventcontent FEVTDEBUG --conditions auto:mc'}, 'HLT': {'step': 'HLT',\n 'timesize': (8000, ['MinBias', 'TTbar']), 'igprof': (500, ['TTbar']),\n 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], 'pileupInput':\n puSTARTUP_TTBAR, 'fileInput': [INPUT_MINBIAS, INPUT_TTBAR], 'cmsdriver':\n '--eventcontent RAWSIM --conditions auto:startup --processName HLTFROMRAW'\n }, 'FASTSIM': {'step': 'GEN-FASTSIM', 'timesize': (8000, ['MinBias',\n 'TTbar']), 'igprof': (500, ['TTbar']), 'memcheck': (5, ['TTbar']),\n 'pileup': ['TTbar'], 'cmsdriver':\n '--eventcontent RECOSIM --conditions auto:mc'}}\n",
"step-3": "INPUT_MINBIAS = '/build/RAWReference/MinBias_RAW_320_STARTUP.root'\nINPUT_TTBAR = '/build/RAWReference/TTbar_RAW_320_STARTUP.root'\n\npuSTARTUP_TTBAR = '/build/RAWReference/TTbar_Tauola_PileUp_RAW_320_STARTUP.root'\n\nrelval = {\n 'step1': {\t'step': 'GEN-HLT',\n\t\t\t'timesize': (100, ['MinBias','TTbar']),\n\t\t\t'igprof': (50, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n#???\t\t\t'pileupInput': '',\n\t\t\t'cmsdriver': '--eventcontent RAWSIM --conditions auto:mc' },\n\n\t'step2': {\t'step': 'RAW2DIGI-RECO',\n\t\t\t'timesize': (8000, ['MinBias','TTbar']),\n\t \t\t'igprof': (200, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n\t\t\t'pileupInput': puSTARTUP_TTBAR,\n\t\t\t'fileInput': [INPUT_MINBIAS,INPUT_TTBAR],\n\t\t\t'cmsdriver': '--eventcontent RECOSIM --conditions auto:startup' },\n\n\t'GENSIMDIGI': {\t'step': 'GEN-SIM,DIGI',\n\t\t\t'timesize': (100, ['MinBias','SingleElectronE1000','SingleMuMinusPt10','SinglePiMinusE1000','TTbar']),\n\t\t\t'igprof': (5, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n#???\t\t\t'pileupInput': '',\n\t\t\t'fileInput': '',\n\t\t\t'cmsdriver': '--eventcontent FEVTDEBUG --conditions auto:mc' },\n\n\t'HLT': { 'step': 'HLT',\n\t\t\t'timesize': (8000, ['MinBias','TTbar']),\n\t\t\t'igprof': (500, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n\t\t\t'pileupInput': puSTARTUP_TTBAR,\n\t\t\t'fileInput': [INPUT_MINBIAS,INPUT_TTBAR],\n\t\t\t'cmsdriver': '--eventcontent RAWSIM --conditions auto:startup --processName HLTFROMRAW' },\n\n\t'FASTSIM': {\t'step': 'GEN-FASTSIM',\n\t\t\t'timesize': (8000, ['MinBias','TTbar']),\n\t\t\t'igprof': (500, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n\t\t\t'cmsdriver': '--eventcontent RECOSIM --conditions auto:mc' }\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')
<|reserved_special_token_0|>
for line in lines:
ln = line.rstrip('\n')
shutil.copy(ln, outputfolder)
file1.close()
os.system('del ' + org_GIS + 'tempext.txt')
raw_input('done!')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
org_GIS = raw_input(
'provide path to GIS folder in dropbox : eg. C:\\Dropbox\\Barcin_Hoyuk\\AIS_Barcin_Hoyuk\\AIS\\GIS\\: '
)
outputfolder = raw_input('provide path to output folder : eg. C:\\Temp\\: ')
ext = raw_input('provide extention type to be copied eg .tif or .jpg :')
os.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')
file1 = open(org_GIS + 'tempext.txt', 'r')
lines = file1.readlines()
for line in lines:
ln = line.rstrip('\n')
shutil.copy(ln, outputfolder)
file1.close()
os.system('del ' + org_GIS + 'tempext.txt')
raw_input('done!')
<|reserved_special_token_1|>
import shutil
import os
org_GIS = raw_input(
'provide path to GIS folder in dropbox : eg. C:\\Dropbox\\Barcin_Hoyuk\\AIS_Barcin_Hoyuk\\AIS\\GIS\\: '
)
outputfolder = raw_input('provide path to output folder : eg. C:\\Temp\\: ')
ext = raw_input('provide extention type to be copied eg .tif or .jpg :')
os.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')
file1 = open(org_GIS + 'tempext.txt', 'r')
lines = file1.readlines()
for line in lines:
ln = line.rstrip('\n')
shutil.copy(ln, outputfolder)
file1.close()
os.system('del ' + org_GIS + 'tempext.txt')
raw_input('done!')
<|reserved_special_token_1|>
# This script allows you to copy all files with a certain extention to a new folder without integrating the sub folders
# Created by Maurice de Kleijn Vrije Universiteit Amsterdam Spatial Information laboratory for the datamanagement of the the archaological project Barin Hoyuk
# 22062016 Python 2.7
import shutil
import os
org_GIS = raw_input("provide path to GIS folder in dropbox : eg. C:\Dropbox\Barcin_Hoyuk\AIS_Barcin_Hoyuk\AIS\GIS\\: ")
outputfolder = raw_input("provide path to output folder : eg. C:\Temp\: ")
ext = raw_input("provide extention type to be copied eg .tif or .jpg :")
os.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')
file1 = open(org_GIS + 'tempext.txt', 'r')
lines = file1.readlines()
for line in lines:
ln = line.rstrip('\n')
shutil.copy(ln, outputfolder)
file1.close()
os.system('del ' + org_GIS + 'tempext.txt')
raw_input("done!")
|
flexible
|
{
"blob_id": "778cf8064fa45e3e25a66f2165dcf6885c72fb8a",
"index": 634,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')\n<mask token>\nfor line in lines:\n ln = line.rstrip('\\n')\n shutil.copy(ln, outputfolder)\nfile1.close()\nos.system('del ' + org_GIS + 'tempext.txt')\nraw_input('done!')\n",
"step-3": "<mask token>\norg_GIS = raw_input(\n 'provide path to GIS folder in dropbox : eg. C:\\\\Dropbox\\\\Barcin_Hoyuk\\\\AIS_Barcin_Hoyuk\\\\AIS\\\\GIS\\\\: '\n )\noutputfolder = raw_input('provide path to output folder : eg. C:\\\\Temp\\\\: ')\next = raw_input('provide extention type to be copied eg .tif or .jpg :')\nos.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')\nfile1 = open(org_GIS + 'tempext.txt', 'r')\nlines = file1.readlines()\nfor line in lines:\n ln = line.rstrip('\\n')\n shutil.copy(ln, outputfolder)\nfile1.close()\nos.system('del ' + org_GIS + 'tempext.txt')\nraw_input('done!')\n",
"step-4": "import shutil\nimport os\norg_GIS = raw_input(\n 'provide path to GIS folder in dropbox : eg. C:\\\\Dropbox\\\\Barcin_Hoyuk\\\\AIS_Barcin_Hoyuk\\\\AIS\\\\GIS\\\\: '\n )\noutputfolder = raw_input('provide path to output folder : eg. C:\\\\Temp\\\\: ')\next = raw_input('provide extention type to be copied eg .tif or .jpg :')\nos.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')\nfile1 = open(org_GIS + 'tempext.txt', 'r')\nlines = file1.readlines()\nfor line in lines:\n ln = line.rstrip('\\n')\n shutil.copy(ln, outputfolder)\nfile1.close()\nos.system('del ' + org_GIS + 'tempext.txt')\nraw_input('done!')\n",
"step-5": "# This script allows you to copy all files with a certain extention to a new folder without integrating the sub folders\n# Created by Maurice de Kleijn Vrije Universiteit Amsterdam Spatial Information laboratory for the datamanagement of the the archaological project Barin Hoyuk\n# 22062016 Python 2.7\n\nimport shutil\nimport os\n\norg_GIS = raw_input(\"provide path to GIS folder in dropbox : eg. C:\\Dropbox\\Barcin_Hoyuk\\AIS_Barcin_Hoyuk\\AIS\\GIS\\\\: \")\noutputfolder = raw_input(\"provide path to output folder : eg. C:\\Temp\\: \")\next = raw_input(\"provide extention type to be copied eg .tif or .jpg :\")\n\nos.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')\n\nfile1 = open(org_GIS + 'tempext.txt', 'r')\nlines = file1.readlines()\n\nfor line in lines:\n ln = line.rstrip('\\n')\n shutil.copy(ln, outputfolder)\nfile1.close()\n\nos.system('del ' + org_GIS + 'tempext.txt')\n\nraw_input(\"done!\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
from setuptools import setup
from django_spaghetti import __version__
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-themes',
version=__version__,
packages=['django_themes'],
include_package_data=True,
license='MIT License',
description='Admin extensions to make theming django sites easier for end users of django sites',
long_description=README,
url='https://github.com/LegoStormtroopr/django-themes/',
author='Samuel Spencer',
author_email='sam@aristotlemetadata.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
keywords='django themes',
install_requires=['django'], # I mean obviously you'll have django installed if you want to use this.
)
|
normal
|
{
"blob_id": "6e557c2b85031a0038afd6a9987e3417b926218f",
"index": 6184,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:\n README = readme.read()\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\nsetup(name='django-themes', version=__version__, packages=['django_themes'],\n include_package_data=True, license='MIT License', description=\n 'Admin extensions to make theming django sites easier for end users of django sites'\n , long_description=README, url=\n 'https://github.com/LegoStormtroopr/django-themes/', author=\n 'Samuel Spencer', author_email='sam@aristotlemetadata.com', classifiers\n =['Environment :: Web Environment', 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content'], keywords=\n 'django themes', install_requires=['django'])\n",
"step-3": "import os\nfrom setuptools import setup\nfrom django_spaghetti import __version__\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:\n README = readme.read()\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\nsetup(name='django-themes', version=__version__, packages=['django_themes'],\n include_package_data=True, license='MIT License', description=\n 'Admin extensions to make theming django sites easier for end users of django sites'\n , long_description=README, url=\n 'https://github.com/LegoStormtroopr/django-themes/', author=\n 'Samuel Spencer', author_email='sam@aristotlemetadata.com', classifiers\n =['Environment :: Web Environment', 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content'], keywords=\n 'django themes', install_requires=['django'])\n",
"step-4": "import os\nfrom setuptools import setup\nfrom django_spaghetti import __version__\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:\n README = readme.read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name='django-themes',\n version=__version__,\n packages=['django_themes'],\n include_package_data=True,\n license='MIT License',\n description='Admin extensions to make theming django sites easier for end users of django sites',\n long_description=README,\n url='https://github.com/LegoStormtroopr/django-themes/',\n author='Samuel Spencer',\n author_email='sam@aristotlemetadata.com',\n classifiers=[\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n # Replace these appropriately if you are stuck on Python 2.\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n keywords='django themes',\n install_requires=['django'], # I mean obviously you'll have django installed if you want to use this.\n\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from flask import Flask
from flask import render_template
from flask import make_response
import json
from lib import powerswitch
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/on/')
def on():
state = powerswitch.on()
return json.dumps(state)
@app.route('/off/')
def off():
state = powerswitch.off()
return json.dumps(state)
@app.route('/toggle/')
def toggle():
state = powerswitch.toggle()
return json.dumps(state)
@app.route('/state/')
def state():
state = powerswitch.state()
return json.dumps(state)
if __name__ == "__main__":
powerswitch.on()
app.run(host='0.0.0.0', port=80, debug=True)
|
normal
|
{
"blob_id": "18d3f58048b7e5d792eb2494ecc62bb158ac7407",
"index": 254,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n\n<mask token>\n\n\n@app.route('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\n\n@app.route('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\n\n@app.route('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n\n@app.route('/on/')\ndef on():\n state = powerswitch.on()\n return json.dumps(state)\n\n\n@app.route('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\n\n@app.route('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\n\n@app.route('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\n\nif __name__ == '__main__':\n powerswitch.on()\n app.run(host='0.0.0.0', port=80, debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n\n@app.route('/on/')\ndef on():\n state = powerswitch.on()\n return json.dumps(state)\n\n\n@app.route('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\n\n@app.route('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\n\n@app.route('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\n\nif __name__ == '__main__':\n powerswitch.on()\n app.run(host='0.0.0.0', port=80, debug=True)\n",
"step-4": "from flask import Flask\nfrom flask import render_template\nfrom flask import make_response\nimport json\nfrom lib import powerswitch\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n\n@app.route('/on/')\ndef on():\n state = powerswitch.on()\n return json.dumps(state)\n\n\n@app.route('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\n\n@app.route('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\n\n@app.route('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\n\nif __name__ == '__main__':\n powerswitch.on()\n app.run(host='0.0.0.0', port=80, debug=True)\n",
"step-5": "from flask import Flask\nfrom flask import render_template\nfrom flask import make_response\n\nimport json\n\nfrom lib import powerswitch\n\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n@app.route('/on/')\ndef on():\n state = powerswitch.on()\n return json.dumps(state)\n\n@app.route('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\n@app.route('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\n@app.route('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\nif __name__ == \"__main__\":\n powerswitch.on()\n app.run(host='0.0.0.0', port=80, debug=True)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class Response:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def chain_size(self):
server_chain_size = self.node.get_ledger_size()
self.return_response(1, server_chain_size)
def chain_sync(self):
u = Utils()
blocks = [u.dict_to_json(block) for block in self.node.
get_server_ledger()]
self.return_response(2, blocks)
<|reserved_special_token_0|>
def new_transaction(self):
t = Transaction()
tx = self.data['content'][0][0]
if t.validate(tx):
self.node.server.shared_tx.append(tx)
self.return_response(4, tx)
else:
self.return_response(4)
def return_response(self, flag, content=None):
m = Message()
response = m.create('response', flag, [content])
self.node.send(response)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Response:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def chain_size(self):
server_chain_size = self.node.get_ledger_size()
self.return_response(1, server_chain_size)
def chain_sync(self):
u = Utils()
blocks = [u.dict_to_json(block) for block in self.node.
get_server_ledger()]
self.return_response(2, blocks)
def new_block(self):
b = Block()
block = self.data['content'][0]
if not self.node.get_server_ledger():
logging.error('{0}Peer #{1}: cannot validate blocks! Authorizing!'
.format(self.node.type, self.node.index))
self.return_response(3, block)
elif b.validate(block):
self.node.server.write_message('announce', 1, block['index'])
self.node.add_block(block)
self.return_response(3, block)
else:
self.node.server.write_message('announce', 2, block['index'])
self.return_response(3)
def new_transaction(self):
t = Transaction()
tx = self.data['content'][0][0]
if t.validate(tx):
self.node.server.shared_tx.append(tx)
self.return_response(4, tx)
else:
self.return_response(4)
def return_response(self, flag, content=None):
m = Message()
response = m.create('response', flag, [content])
self.node.send(response)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Response:
def __init__(self, node, data):
self.node = node
self.data = data
self.selector()
def selector(self):
if self.data['flag'] == 1:
self.chain_size()
elif self.data['flag'] == 2:
self.chain_sync()
elif self.data['flag'] == 3:
if isinstance(self.data['content'], bool):
self.append_new_block()
else:
self.new_block()
else:
self.new_transaction()
def chain_size(self):
server_chain_size = self.node.get_ledger_size()
self.return_response(1, server_chain_size)
def chain_sync(self):
u = Utils()
blocks = [u.dict_to_json(block) for block in self.node.
get_server_ledger()]
self.return_response(2, blocks)
def new_block(self):
b = Block()
block = self.data['content'][0]
if not self.node.get_server_ledger():
logging.error('{0}Peer #{1}: cannot validate blocks! Authorizing!'
.format(self.node.type, self.node.index))
self.return_response(3, block)
elif b.validate(block):
self.node.server.write_message('announce', 1, block['index'])
self.node.add_block(block)
self.return_response(3, block)
else:
self.node.server.write_message('announce', 2, block['index'])
self.return_response(3)
def new_transaction(self):
t = Transaction()
tx = self.data['content'][0][0]
if t.validate(tx):
self.node.server.shared_tx.append(tx)
self.return_response(4, tx)
else:
self.return_response(4)
def return_response(self, flag, content=None):
m = Message()
response = m.create('response', flag, [content])
self.node.send(response)
<|reserved_special_token_1|>
import logging
from utils import Utils
from block import Block
from message import Message
from transaction import Transaction
class Response:
def __init__(self, node, data):
self.node = node
self.data = data
self.selector()
def selector(self):
if self.data['flag'] == 1:
self.chain_size()
elif self.data['flag'] == 2:
self.chain_sync()
elif self.data['flag'] == 3:
if isinstance(self.data['content'], bool):
self.append_new_block()
else:
self.new_block()
else:
self.new_transaction()
def chain_size(self):
server_chain_size = self.node.get_ledger_size()
self.return_response(1, server_chain_size)
def chain_sync(self):
u = Utils()
blocks = [u.dict_to_json(block) for block in self.node.
get_server_ledger()]
self.return_response(2, blocks)
def new_block(self):
b = Block()
block = self.data['content'][0]
if not self.node.get_server_ledger():
logging.error('{0}Peer #{1}: cannot validate blocks! Authorizing!'
.format(self.node.type, self.node.index))
self.return_response(3, block)
elif b.validate(block):
self.node.server.write_message('announce', 1, block['index'])
self.node.add_block(block)
self.return_response(3, block)
else:
self.node.server.write_message('announce', 2, block['index'])
self.return_response(3)
def new_transaction(self):
t = Transaction()
tx = self.data['content'][0][0]
if t.validate(tx):
self.node.server.shared_tx.append(tx)
self.return_response(4, tx)
else:
self.return_response(4)
def return_response(self, flag, content=None):
m = Message()
response = m.create('response', flag, [content])
self.node.send(response)
<|reserved_special_token_1|>
import logging
from utils import Utils
from block import Block
from message import Message
from transaction import Transaction
class Response:
def __init__(self, node, data):
self.node = node
self.data = data
self.selector()
def selector(self):
if self.data['flag'] == 1:
self.chain_size()
elif self.data['flag'] == 2:
self.chain_sync()
elif self.data['flag'] == 3:
if isinstance(self.data['content'], bool):
self.append_new_block()
else:
self.new_block()
else:
self.new_transaction()
def chain_size(self):
server_chain_size = self.node.get_ledger_size()
self.return_response(1, server_chain_size)
def chain_sync(self):
u = Utils()
blocks = [u.dict_to_json(block) for block in self.node.get_server_ledger()]
self.return_response(2, blocks)
def new_block(self):
b = Block()
block = self.data['content'][0]
if not self.node.get_server_ledger():
# Server has no chain, cannot validate previous hash
logging.error('{0}Peer #{1}: cannot validate blocks! Authorizing!'.format(self.node.type,self.node.index))
self.return_response(3, block)
else:
if b.validate(block):
self.node.server.write_message('announce', 1, block['index'])
self.node.add_block(block)
self.return_response(3, block)
else:
self.node.server.write_message('announce', 2, block['index'])
self.return_response(3)
def new_transaction(self):
t = Transaction()
tx = self.data['content'][0][0]
if t.validate(tx):
self.node.server.shared_tx.append(tx)
self.return_response(4, tx)
else:
self.return_response(4)
def return_response(self, flag, content=None):
m = Message()
response = m.create('response', flag, [content])
self.node.send(response)
|
flexible
|
{
"blob_id": "55b8590410bfe8f12ce3b52710238a79d27189a7",
"index": 5125,
"step-1": "<mask token>\n\n\nclass Response:\n <mask token>\n <mask token>\n\n def chain_size(self):\n server_chain_size = self.node.get_ledger_size()\n self.return_response(1, server_chain_size)\n\n def chain_sync(self):\n u = Utils()\n blocks = [u.dict_to_json(block) for block in self.node.\n get_server_ledger()]\n self.return_response(2, blocks)\n <mask token>\n\n def new_transaction(self):\n t = Transaction()\n tx = self.data['content'][0][0]\n if t.validate(tx):\n self.node.server.shared_tx.append(tx)\n self.return_response(4, tx)\n else:\n self.return_response(4)\n\n def return_response(self, flag, content=None):\n m = Message()\n response = m.create('response', flag, [content])\n self.node.send(response)\n",
"step-2": "<mask token>\n\n\nclass Response:\n <mask token>\n <mask token>\n\n def chain_size(self):\n server_chain_size = self.node.get_ledger_size()\n self.return_response(1, server_chain_size)\n\n def chain_sync(self):\n u = Utils()\n blocks = [u.dict_to_json(block) for block in self.node.\n get_server_ledger()]\n self.return_response(2, blocks)\n\n def new_block(self):\n b = Block()\n block = self.data['content'][0]\n if not self.node.get_server_ledger():\n logging.error('{0}Peer #{1}: cannot validate blocks! Authorizing!'\n .format(self.node.type, self.node.index))\n self.return_response(3, block)\n elif b.validate(block):\n self.node.server.write_message('announce', 1, block['index'])\n self.node.add_block(block)\n self.return_response(3, block)\n else:\n self.node.server.write_message('announce', 2, block['index'])\n self.return_response(3)\n\n def new_transaction(self):\n t = Transaction()\n tx = self.data['content'][0][0]\n if t.validate(tx):\n self.node.server.shared_tx.append(tx)\n self.return_response(4, tx)\n else:\n self.return_response(4)\n\n def return_response(self, flag, content=None):\n m = Message()\n response = m.create('response', flag, [content])\n self.node.send(response)\n",
"step-3": "<mask token>\n\n\nclass Response:\n\n def __init__(self, node, data):\n self.node = node\n self.data = data\n self.selector()\n\n def selector(self):\n if self.data['flag'] == 1:\n self.chain_size()\n elif self.data['flag'] == 2:\n self.chain_sync()\n elif self.data['flag'] == 3:\n if isinstance(self.data['content'], bool):\n self.append_new_block()\n else:\n self.new_block()\n else:\n self.new_transaction()\n\n def chain_size(self):\n server_chain_size = self.node.get_ledger_size()\n self.return_response(1, server_chain_size)\n\n def chain_sync(self):\n u = Utils()\n blocks = [u.dict_to_json(block) for block in self.node.\n get_server_ledger()]\n self.return_response(2, blocks)\n\n def new_block(self):\n b = Block()\n block = self.data['content'][0]\n if not self.node.get_server_ledger():\n logging.error('{0}Peer #{1}: cannot validate blocks! Authorizing!'\n .format(self.node.type, self.node.index))\n self.return_response(3, block)\n elif b.validate(block):\n self.node.server.write_message('announce', 1, block['index'])\n self.node.add_block(block)\n self.return_response(3, block)\n else:\n self.node.server.write_message('announce', 2, block['index'])\n self.return_response(3)\n\n def new_transaction(self):\n t = Transaction()\n tx = self.data['content'][0][0]\n if t.validate(tx):\n self.node.server.shared_tx.append(tx)\n self.return_response(4, tx)\n else:\n self.return_response(4)\n\n def return_response(self, flag, content=None):\n m = Message()\n response = m.create('response', flag, [content])\n self.node.send(response)\n",
"step-4": "import logging\nfrom utils import Utils\nfrom block import Block\nfrom message import Message\nfrom transaction import Transaction\n\n\nclass Response:\n\n def __init__(self, node, data):\n self.node = node\n self.data = data\n self.selector()\n\n def selector(self):\n if self.data['flag'] == 1:\n self.chain_size()\n elif self.data['flag'] == 2:\n self.chain_sync()\n elif self.data['flag'] == 3:\n if isinstance(self.data['content'], bool):\n self.append_new_block()\n else:\n self.new_block()\n else:\n self.new_transaction()\n\n def chain_size(self):\n server_chain_size = self.node.get_ledger_size()\n self.return_response(1, server_chain_size)\n\n def chain_sync(self):\n u = Utils()\n blocks = [u.dict_to_json(block) for block in self.node.\n get_server_ledger()]\n self.return_response(2, blocks)\n\n def new_block(self):\n b = Block()\n block = self.data['content'][0]\n if not self.node.get_server_ledger():\n logging.error('{0}Peer #{1}: cannot validate blocks! Authorizing!'\n .format(self.node.type, self.node.index))\n self.return_response(3, block)\n elif b.validate(block):\n self.node.server.write_message('announce', 1, block['index'])\n self.node.add_block(block)\n self.return_response(3, block)\n else:\n self.node.server.write_message('announce', 2, block['index'])\n self.return_response(3)\n\n def new_transaction(self):\n t = Transaction()\n tx = self.data['content'][0][0]\n if t.validate(tx):\n self.node.server.shared_tx.append(tx)\n self.return_response(4, tx)\n else:\n self.return_response(4)\n\n def return_response(self, flag, content=None):\n m = Message()\n response = m.create('response', flag, [content])\n self.node.send(response)\n",
"step-5": "import logging\n\nfrom utils import Utils\nfrom block import Block\nfrom message import Message\nfrom transaction import Transaction\n\nclass Response:\n def __init__(self, node, data):\n self.node = node\n self.data = data\n self.selector()\n\n def selector(self):\n if self.data['flag'] == 1:\n self.chain_size()\n elif self.data['flag'] == 2:\n self.chain_sync()\n elif self.data['flag'] == 3:\n if isinstance(self.data['content'], bool):\n self.append_new_block()\n else:\n self.new_block()\n else:\n self.new_transaction()\n\n def chain_size(self):\n server_chain_size = self.node.get_ledger_size()\n self.return_response(1, server_chain_size)\n\n def chain_sync(self):\n u = Utils()\n blocks = [u.dict_to_json(block) for block in self.node.get_server_ledger()]\n self.return_response(2, blocks)\n\n def new_block(self):\n b = Block()\n block = self.data['content'][0]\n if not self.node.get_server_ledger():\n # Server has no chain, cannot validate previous hash\n logging.error('{0}Peer #{1}: cannot validate blocks! Authorizing!'.format(self.node.type,self.node.index))\n self.return_response(3, block)\n else:\n if b.validate(block):\n self.node.server.write_message('announce', 1, block['index'])\n self.node.add_block(block)\n self.return_response(3, block)\n else:\n self.node.server.write_message('announce', 2, block['index'])\n self.return_response(3)\n\n def new_transaction(self):\n t = Transaction()\n tx = self.data['content'][0][0]\n if t.validate(tx):\n self.node.server.shared_tx.append(tx)\n self.return_response(4, tx)\n else:\n self.return_response(4)\n\n def return_response(self, flag, content=None):\n m = Message()\n response = m.create('response', flag, [content])\n self.node.send(response)\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Os valores são \x1b[32m{}\x1b[m e \x1b[31m{}\x1b[m !!!'.format(a, b))
<|reserved_special_token_0|>
print('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores[
'amarelo']))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
a = 3
b = 5
print('Os valores são \x1b[32m{}\x1b[m e \x1b[31m{}\x1b[m !!!'.format(a, b))
nome = 'Kátia'
cores = {'limpa': '\x1b]m', 'azul': '\x1b[34m', 'amarelo': '\x1b[33m',
'pretoebranco': '\x1b[7;30m'}
print('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores[
'amarelo']))
<|reserved_special_token_1|>
"""
Cores no terminal
"""
a = 3
b = 5
print('Os valores são \033[32m{}\033[m e \033[31m{}\033[m !!!'.format(a, b))
# Dicionário de cores:
nome = 'Kátia'
cores = {'limpa':'\033]m',
'azul':'\033[34m',
'amarelo':'\033[33m',
'pretoebranco':'\033[7;30m'}
print('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores['amarelo']))
# dá pra colocar as cores dentro das chaves tb.
|
flexible
|
{
"blob_id": "7bbbd30ba1578c1165ccf5c2fff22609c16dfd64",
"index": 393,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Os valores são \\x1b[32m{}\\x1b[m e \\x1b[31m{}\\x1b[m !!!'.format(a, b))\n<mask token>\nprint('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores[\n 'amarelo']))\n",
"step-3": "<mask token>\na = 3\nb = 5\nprint('Os valores são \\x1b[32m{}\\x1b[m e \\x1b[31m{}\\x1b[m !!!'.format(a, b))\nnome = 'Kátia'\ncores = {'limpa': '\\x1b]m', 'azul': '\\x1b[34m', 'amarelo': '\\x1b[33m',\n 'pretoebranco': '\\x1b[7;30m'}\nprint('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores[\n 'amarelo']))\n",
"step-4": "\"\"\"\r\nCores no terminal\r\n\"\"\"\r\n\r\na = 3\r\nb = 5\r\nprint('Os valores são \\033[32m{}\\033[m e \\033[31m{}\\033[m !!!'.format(a, b))\r\n\r\n# Dicionário de cores:\r\nnome = 'Kátia'\r\ncores = {'limpa':'\\033]m',\r\n 'azul':'\\033[34m',\r\n 'amarelo':'\\033[33m',\r\n 'pretoebranco':'\\033[7;30m'}\r\n\r\nprint('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores['amarelo']))\r\n# dá pra colocar as cores dentro das chaves tb.\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# accessing array elements rows/columns
import numpy as np
a = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])
print(a.shape) # array shape
print(a)
print('\n')
# specific array element [r,c]
# item 6
print(a[0][5])
# item 8
print(a[1][1]) # or
print(a[1][-6])
# get a specific row/specific column
print(a[1])
print(a[0])
print(a[0, :])
print(a[:, 1]) # prints second column
print('\n')
# get only the even numbers from first row [start_index:end_index:step]
print('even numbers from first row')
print(a[0, 1:8:2])
# change certain value of array
a[1, 2] = 90
print('new array is ',a)
|
normal
|
{
"blob_id": "8cc97ebe0ff7617eaf31919d40fa6c312d7b6f94",
"index": 8814,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(a.shape)\nprint(a)\nprint('\\n')\nprint(a[0][5])\nprint(a[1][1])\nprint(a[1][-6])\nprint(a[1])\nprint(a[0])\nprint(a[0, :])\nprint(a[:, 1])\nprint('\\n')\nprint('even numbers from first row')\nprint(a[0, 1:8:2])\n<mask token>\nprint('new array is ', a)\n",
"step-3": "<mask token>\na = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])\nprint(a.shape)\nprint(a)\nprint('\\n')\nprint(a[0][5])\nprint(a[1][1])\nprint(a[1][-6])\nprint(a[1])\nprint(a[0])\nprint(a[0, :])\nprint(a[:, 1])\nprint('\\n')\nprint('even numbers from first row')\nprint(a[0, 1:8:2])\na[1, 2] = 90\nprint('new array is ', a)\n",
"step-4": "import numpy as np\na = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])\nprint(a.shape)\nprint(a)\nprint('\\n')\nprint(a[0][5])\nprint(a[1][1])\nprint(a[1][-6])\nprint(a[1])\nprint(a[0])\nprint(a[0, :])\nprint(a[:, 1])\nprint('\\n')\nprint('even numbers from first row')\nprint(a[0, 1:8:2])\na[1, 2] = 90\nprint('new array is ', a)\n",
"step-5": "# accessing array elements rows/columns\nimport numpy as np\n\na = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])\nprint(a.shape) # array shape\nprint(a)\nprint('\\n')\n\n# specific array element [r,c]\n# item 6\nprint(a[0][5])\n\n# item 8\nprint(a[1][1]) # or\nprint(a[1][-6])\n\n# get a specific row/specific column\nprint(a[1])\nprint(a[0])\nprint(a[0, :])\nprint(a[:, 1]) # prints second column\nprint('\\n')\n\n# get only the even numbers from first row [start_index:end_index:step]\nprint('even numbers from first row')\nprint(a[0, 1:8:2])\n\n# change certain value of array\na[1, 2] = 90\nprint('new array is ',a)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import time
from wxpy import *
bot = Bot(cache_path='wxpy.pkl')
def get(i):
with open('晚安.txt', 'r', encoding='utf-8') as f:
line = f.readlines()[i]
return line
def send(i):
myfriend = bot.friends().search('微信好友昵称')[0]
myfriend.send(get(i))
i += 1
def main():
for i in range(3650):
send(i)
time.sleep(5)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a7d11f130e0d5d6c9b4ac7c5d3a804fb9f79b943",
"index": 2284,
"step-1": "<mask token>\n\n\ndef get(i):\n with open('晚安.txt', 'r', encoding='utf-8') as f:\n line = f.readlines()[i]\n return line\n\n\n<mask token>\n\n\ndef main():\n for i in range(3650):\n send(i)\n time.sleep(5)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get(i):\n with open('晚安.txt', 'r', encoding='utf-8') as f:\n line = f.readlines()[i]\n return line\n\n\ndef send(i):\n myfriend = bot.friends().search('微信好友昵称')[0]\n myfriend.send(get(i))\n i += 1\n\n\ndef main():\n for i in range(3650):\n send(i)\n time.sleep(5)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nbot = Bot(cache_path='wxpy.pkl')\n\n\ndef get(i):\n with open('晚安.txt', 'r', encoding='utf-8') as f:\n line = f.readlines()[i]\n return line\n\n\ndef send(i):\n myfriend = bot.friends().search('微信好友昵称')[0]\n myfriend.send(get(i))\n i += 1\n\n\ndef main():\n for i in range(3650):\n send(i)\n time.sleep(5)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import time\nfrom wxpy import *\nbot = Bot(cache_path='wxpy.pkl')\n\n\ndef get(i):\n with open('晚安.txt', 'r', encoding='utf-8') as f:\n line = f.readlines()[i]\n return line\n\n\ndef send(i):\n myfriend = bot.friends().search('微信好友昵称')[0]\n myfriend.send(get(i))\n i += 1\n\n\ndef main():\n for i in range(3650):\n send(i)\n time.sleep(5)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": null,
"step-ids": [
2,
4,
5,
6
]
}
|
[
2,
4,
5,
6
] |
#!/usr/bin/env python
import serial
from action import Action
import math
comm = serial.Serial("/dev/ttyACM3", 115200, timeout=1)
#comm = None
robot = Action(comm)
from flask import Flask
from flask import send_from_directory
import os
static_dir = os.path.join(os.getcwd(), "ControlApp")
print "serving from " + static_dir
app = Flask(__name__)
app.debug = False
@app.route('/')
def root():
return send_from_directory(static_dir, "control.html")
@app.route("/stop")
def do_stop():
robot.stop()
return "ok"
@app.route("/forward")
def do_forward():
robot.move(0, 1)
return "ok"
@app.route("/backward")
def do_backward():
robot.move(0, -1)
return "ok"
@app.route("/left")
def do_left():
robot.move(math.pi/2.0, 1)
return "ok"
@app.route("/right")
def do_right():
robot.move(math.pi*3.0/2.0, 1)
return "ok"
@app.route("/turncw")
def do_turncw():
robot.turn(0.5)
return "ok"
@app.route("/turnacw")
def do_turnacw():
robot.turn(-0.5)
return "ok"
@app.route("/kick")
def do_kick():
robot.kick()
return "ok"
@app.route("/catch")
def do_catch():
robot.catch()
return "ok"
if __name__ == "__main__":
app.debug = True
app.run(port=5001)
|
normal
|
{
"blob_id": "54a6405e3447d488aa4fca88159ccaac2506df2c",
"index": 5995,
"step-1": "#!/usr/bin/env python\n\nimport serial\nfrom action import Action\nimport math\n\ncomm = serial.Serial(\"/dev/ttyACM3\", 115200, timeout=1)\n#comm = None\nrobot = Action(comm)\n\nfrom flask import Flask\nfrom flask import send_from_directory\nimport os\n\nstatic_dir = os.path.join(os.getcwd(), \"ControlApp\")\nprint \"serving from \" + static_dir\n\napp = Flask(__name__)\napp.debug = False\n\n\n@app.route('/')\ndef root():\n return send_from_directory(static_dir, \"control.html\")\n\n@app.route(\"/stop\")\ndef do_stop():\n robot.stop()\n return \"ok\"\n \n@app.route(\"/forward\")\ndef do_forward():\n robot.move(0, 1)\n return \"ok\"\n \n@app.route(\"/backward\")\ndef do_backward():\n robot.move(0, -1)\n return \"ok\"\n\n@app.route(\"/left\")\ndef do_left():\n robot.move(math.pi/2.0, 1)\n return \"ok\"\n\n@app.route(\"/right\")\ndef do_right():\n robot.move(math.pi*3.0/2.0, 1)\n return \"ok\"\n\n@app.route(\"/turncw\")\ndef do_turncw():\n robot.turn(0.5)\n return \"ok\"\n\n@app.route(\"/turnacw\")\ndef do_turnacw():\n robot.turn(-0.5)\n return \"ok\"\n\n@app.route(\"/kick\")\ndef do_kick():\n robot.kick()\n return \"ok\"\n\n@app.route(\"/catch\")\ndef do_catch():\n robot.catch()\n return \"ok\" \n \nif __name__ == \"__main__\":\n app.debug = True\n app.run(port=5001)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def line_evaluation(param_list, param_eval, file_name='line evaluation', **
kwargs):
"""
Evaluates a list of parameter pairs across repeated trials and aggregates the result.
Parameters
----------
param_list : array_like
List of values to test for parameter of interest.
param_eval : callable
Must take a parameter instance and return an object that can be evaluated
by `aggr_meth` (see :func:`grid_evaluation`).
file_name : string, optional
Optional name for the file. (default is 'line evaluation')
Returns
-------
dict
A dictionary with the results of the experiment.
Notes
-----
You can also explicitely set the arguments in :func:`grid_evaluation` in this function
call.
"""
experiment = grid_evaluation(param_list_one=param_list, param_list_two=
[], param_eval=param_eval, file_name=file_name, **kwargs)
experiment['line'] = experiment.pop('grid')
experiment['cols'] = experiment.pop('rows')
return experiment
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16,
aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',
save_to_disk=True, save_each=1000, chunksize=1.0):
"""
Evaluates a grid of parameter pairs across repeated trials and aggregates the result.
Parameters
----------
param_list_one : array_like
List of values to test for the first parameter.
param_list_two : array_like, optional
List of values to test for the second parameter. Can be empty, in which case a
one-dimensional grid is evaluated.
param_eval : callable
Must take an instance of parameter values and return an object that can be evaluated
by `aggr_meth`. It should accept one input if `param_list_two` is empty, and two inputs
otherwise.
n_trials : int, optional
Number of trials to run for each parameter pair. (default is `16`)
aggr_method : callable, optional
The aggregation method for the values returned by `patam_eval` on different
trials for the same parameter pair. (default is :func:`numpy.mean`)
save_dir : string, optional
Directory onto which save the result. (default is 'data/')
file_name : string, optional
Optional name for the file. It is always prepended with the time stamp at the
end of the grid evaluation. (default is 'grid evaluation')
save_to_disk : bool, optional
Whether to save the experiment to disk (True) or not (False). (default is `True`)
save_each : int, optional
Save the experiment each time `save_each` grid points are computed. (default is `1000`)
chunksize : int
The size of the chunks of jobs sent to each parallel worker. (default is `1`)
Returns
-------
dict
A dictionary with the results of the experiment.
"""
if not list(param_list_two):
params = param_list_one
grid_shape = len(param_list_one),
is_really_grid = False
else:
params = list(itertools.product(param_list_one, param_list_two))
grid_shape = len(param_list_one), len(param_list_two)
is_really_grid = True
def grid_fun(point):
trial_out = np.nan * np.ones((n_trials,))
for i in np.arange(n_trials):
if is_really_grid:
trial_out[i] = param_eval(point[0], point[1])
else:
trial_out[i] = param_eval(point)
return aggr_method(trial_out)
n_grid_pts = len(params)
def record_experiment(grid):
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
save_path = save_dir + now + ' ' + file_name + '.pkl'
experiment = {'date': now, 'rows': param_list_one, 'cols':
param_list_two, 'n_trials': n_trials, 'grid': np.reshape(grid,
grid_shape), 'path': save_path}
if save_to_disk:
utils.save_obj(experiment, save_path)
return experiment
nb_workers = min(mp.cpu_count(), 24)
print('Working with {} processes.'.format(nb_workers))
pool = mp.Pool(nb_workers)
it = pool.imap(grid_fun, params, chunksize=chunksize)
grid = np.nan * np.ones((n_grid_pts,))
for idx, val in enumerate(tqdm(it, total=n_grid_pts)):
grid[idx] = val
if idx >= save_each and idx % save_each == 0:
experiment = record_experiment(grid)
pool.close()
pool.join()
experiment = record_experiment(grid)
return experiment
def line_evaluation(param_list, param_eval, file_name='line evaluation', **
kwargs):
"""
Evaluates a list of parameter pairs across repeated trials and aggregates the result.
Parameters
----------
param_list : array_like
List of values to test for parameter of interest.
param_eval : callable
Must take a parameter instance and return an object that can be evaluated
by `aggr_meth` (see :func:`grid_evaluation`).
file_name : string, optional
Optional name for the file. (default is 'line evaluation')
Returns
-------
dict
A dictionary with the results of the experiment.
Notes
-----
You can also explicitely set the arguments in :func:`grid_evaluation` in this function
call.
"""
experiment = grid_evaluation(param_list_one=param_list, param_list_two=
[], param_eval=param_eval, file_name=file_name, **kwargs)
experiment['line'] = experiment.pop('grid')
experiment['cols'] = experiment.pop('rows')
return experiment
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import utils
import datetime
import itertools
import numpy as np
import recovery as rec
import sampling as smp
import graphs_signals as gs
import pathos.multiprocessing as mp
from tqdm import tqdm
def grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16,
aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',
save_to_disk=True, save_each=1000, chunksize=1.0):
"""
Evaluates a grid of parameter pairs across repeated trials and aggregates the result.
Parameters
----------
param_list_one : array_like
List of values to test for the first parameter.
param_list_two : array_like, optional
List of values to test for the second parameter. Can be empty, in which case a
one-dimensional grid is evaluated.
param_eval : callable
Must take an instance of parameter values and return an object that can be evaluated
by `aggr_meth`. It should accept one input if `param_list_two` is empty, and two inputs
otherwise.
n_trials : int, optional
Number of trials to run for each parameter pair. (default is `16`)
aggr_method : callable, optional
The aggregation method for the values returned by `patam_eval` on different
trials for the same parameter pair. (default is :func:`numpy.mean`)
save_dir : string, optional
Directory onto which save the result. (default is 'data/')
file_name : string, optional
Optional name for the file. It is always prepended with the time stamp at the
end of the grid evaluation. (default is 'grid evaluation')
save_to_disk : bool, optional
Whether to save the experiment to disk (True) or not (False). (default is `True`)
save_each : int, optional
Save the experiment each time `save_each` grid points are computed. (default is `1000`)
chunksize : int
The size of the chunks of jobs sent to each parallel worker. (default is `1`)
Returns
-------
dict
A dictionary with the results of the experiment.
"""
if not list(param_list_two):
params = param_list_one
grid_shape = len(param_list_one),
is_really_grid = False
else:
params = list(itertools.product(param_list_one, param_list_two))
grid_shape = len(param_list_one), len(param_list_two)
is_really_grid = True
def grid_fun(point):
trial_out = np.nan * np.ones((n_trials,))
for i in np.arange(n_trials):
if is_really_grid:
trial_out[i] = param_eval(point[0], point[1])
else:
trial_out[i] = param_eval(point)
return aggr_method(trial_out)
n_grid_pts = len(params)
def record_experiment(grid):
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
save_path = save_dir + now + ' ' + file_name + '.pkl'
experiment = {'date': now, 'rows': param_list_one, 'cols':
param_list_two, 'n_trials': n_trials, 'grid': np.reshape(grid,
grid_shape), 'path': save_path}
if save_to_disk:
utils.save_obj(experiment, save_path)
return experiment
nb_workers = min(mp.cpu_count(), 24)
print('Working with {} processes.'.format(nb_workers))
pool = mp.Pool(nb_workers)
it = pool.imap(grid_fun, params, chunksize=chunksize)
grid = np.nan * np.ones((n_grid_pts,))
for idx, val in enumerate(tqdm(it, total=n_grid_pts)):
grid[idx] = val
if idx >= save_each and idx % save_each == 0:
experiment = record_experiment(grid)
pool.close()
pool.join()
experiment = record_experiment(grid)
return experiment
def line_evaluation(param_list, param_eval, file_name='line evaluation', **
kwargs):
"""
Evaluates a list of parameter pairs across repeated trials and aggregates the result.
Parameters
----------
param_list : array_like
List of values to test for parameter of interest.
param_eval : callable
Must take a parameter instance and return an object that can be evaluated
by `aggr_meth` (see :func:`grid_evaluation`).
file_name : string, optional
Optional name for the file. (default is 'line evaluation')
Returns
-------
dict
A dictionary with the results of the experiment.
Notes
-----
You can also explicitely set the arguments in :func:`grid_evaluation` in this function
call.
"""
experiment = grid_evaluation(param_list_one=param_list, param_list_two=
[], param_eval=param_eval, file_name=file_name, **kwargs)
experiment['line'] = experiment.pop('grid')
experiment['cols'] = experiment.pop('rows')
return experiment
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Phase transition module
"""
import utils
import datetime
import itertools
import numpy as np
import recovery as rec
import sampling as smp
import graphs_signals as gs
import pathos.multiprocessing as mp
from tqdm import tqdm
## MAIN FUNCTIONS ##
def grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16,
aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',
save_to_disk=True, save_each=1000, chunksize=1.):
r"""
Evaluates a grid of parameter pairs across repeated trials and aggregates the result.
Parameters
----------
param_list_one : array_like
List of values to test for the first parameter.
param_list_two : array_like, optional
List of values to test for the second parameter. Can be empty, in which case a
one-dimensional grid is evaluated.
param_eval : callable
Must take an instance of parameter values and return an object that can be evaluated
by `aggr_meth`. It should accept one input if `param_list_two` is empty, and two inputs
otherwise.
n_trials : int, optional
Number of trials to run for each parameter pair. (default is `16`)
aggr_method : callable, optional
The aggregation method for the values returned by `patam_eval` on different
trials for the same parameter pair. (default is :func:`numpy.mean`)
save_dir : string, optional
Directory onto which save the result. (default is 'data/')
file_name : string, optional
Optional name for the file. It is always prepended with the time stamp at the
end of the grid evaluation. (default is 'grid evaluation')
save_to_disk : bool, optional
Whether to save the experiment to disk (True) or not (False). (default is `True`)
save_each : int, optional
Save the experiment each time `save_each` grid points are computed. (default is `1000`)
chunksize : int
The size of the chunks of jobs sent to each parallel worker. (default is `1`)
Returns
-------
dict
A dictionary with the results of the experiment.
"""
if not list(param_list_two): # If `param_list_two` is empty
params = param_list_one
grid_shape = (len(param_list_one),)
is_really_grid = False
else:
params = list(itertools.product(param_list_one, param_list_two))
grid_shape = (len(param_list_one), len(param_list_two))
is_really_grid = True
def grid_fun(point): # Function to compute for each grid point
trial_out = np.nan * np.ones((n_trials,))
for i in np.arange(n_trials):
if is_really_grid:
trial_out[i] = param_eval(point[0], point[1])
else: # If `param_list_two` is empty
trial_out[i] = param_eval(point)
return aggr_method(trial_out)
n_grid_pts = len(params)
# Recording procedure
def record_experiment(grid):
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
save_path = save_dir + now + ' ' + file_name + '.pkl'
experiment = {
'date': now,
'rows': param_list_one,
'cols': param_list_two,
'n_trials': n_trials,
'grid': np.reshape(grid, grid_shape),
'path': save_path
}
if save_to_disk:
utils.save_obj(experiment, save_path)
return experiment
# Set a pool of workers
nb_workers = min(mp.cpu_count(), 24)
print('Working with {} processes.'.format(nb_workers))
pool = mp.Pool(nb_workers)
# Iterate `grid_fun` across workers
it = pool.imap(grid_fun, params, chunksize=chunksize)
grid = np.nan * np.ones((n_grid_pts,))
for idx, val in enumerate(tqdm(it, total=n_grid_pts)):
grid[idx] = val
# Make sure that we save after each couple of iterations
if (idx >= save_each) and (idx % save_each == 0):
experiment = record_experiment(grid)
# Close pool
pool.close()
pool.join()
experiment = record_experiment(grid)
return experiment
def line_evaluation(param_list, param_eval, file_name='line evaluation', **kwargs):
r"""
Evaluates a list of parameter pairs across repeated trials and aggregates the result.
Parameters
----------
param_list : array_like
List of values to test for parameter of interest.
param_eval : callable
Must take a parameter instance and return an object that can be evaluated
by `aggr_meth` (see :func:`grid_evaluation`).
file_name : string, optional
Optional name for the file. (default is 'line evaluation')
Returns
-------
dict
A dictionary with the results of the experiment.
Notes
-----
You can also explicitely set the arguments in :func:`grid_evaluation` in this function
call.
"""
experiment = grid_evaluation(param_list_one=param_list,
param_list_two=[],
param_eval=param_eval,
file_name=file_name,
**kwargs)
experiment['line'] = experiment.pop('grid')
experiment['cols'] = experiment.pop('rows')
return experiment
|
flexible
|
{
"blob_id": "d65f858c3ad06226b83d2627f6d38e03eae5b36c",
"index": 266,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef line_evaluation(param_list, param_eval, file_name='line evaluation', **\n kwargs):\n \"\"\"\n Evaluates a list of parameter pairs across repeated trials and aggregates the result.\n\n Parameters\n ----------\n param_list : array_like\n List of values to test for parameter of interest.\n param_eval : callable\n Must take a parameter instance and return an object that can be evaluated \n by `aggr_meth` (see :func:`grid_evaluation`).\n file_name : string, optional\n Optional name for the file. (default is 'line evaluation')\n \n Returns\n -------\n dict\n A dictionary with the results of the experiment.\n \n Notes\n -----\n You can also explicitely set the arguments in :func:`grid_evaluation` in this function \n call.\n\n \"\"\"\n experiment = grid_evaluation(param_list_one=param_list, param_list_two=\n [], param_eval=param_eval, file_name=file_name, **kwargs)\n experiment['line'] = experiment.pop('grid')\n experiment['cols'] = experiment.pop('rows')\n return experiment\n",
"step-3": "<mask token>\n\n\ndef grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16,\n aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',\n save_to_disk=True, save_each=1000, chunksize=1.0):\n \"\"\"\n Evaluates a grid of parameter pairs across repeated trials and aggregates the result.\n\n Parameters\n ----------\n param_list_one : array_like\n List of values to test for the first parameter.\n param_list_two : array_like, optional\n List of values to test for the second parameter. Can be empty, in which case a \n one-dimensional grid is evaluated.\n param_eval : callable\n Must take an instance of parameter values and return an object that can be evaluated \n by `aggr_meth`. It should accept one input if `param_list_two` is empty, and two inputs \n otherwise.\n n_trials : int, optional\n Number of trials to run for each parameter pair. (default is `16`)\n aggr_method : callable, optional\n The aggregation method for the values returned by `patam_eval` on different \n trials for the same parameter pair. (default is :func:`numpy.mean`)\n save_dir : string, optional\n Directory onto which save the result. (default is 'data/')\n file_name : string, optional\n Optional name for the file. It is always prepended with the time stamp at the \n end of the grid evaluation. (default is 'grid evaluation')\n save_to_disk : bool, optional\n Whether to save the experiment to disk (True) or not (False). (default is `True`)\n save_each : int, optional\n Save the experiment each time `save_each` grid points are computed. (default is `1000`)\n chunksize : int\n The size of the chunks of jobs sent to each parallel worker. (default is `1`)\n \n Returns\n -------\n dict\n A dictionary with the results of the experiment.\n\n \"\"\"\n if not list(param_list_two):\n params = param_list_one\n grid_shape = len(param_list_one),\n is_really_grid = False\n else:\n params = list(itertools.product(param_list_one, param_list_two))\n grid_shape = len(param_list_one), len(param_list_two)\n is_really_grid = True\n\n def grid_fun(point):\n trial_out = np.nan * np.ones((n_trials,))\n for i in np.arange(n_trials):\n if is_really_grid:\n trial_out[i] = param_eval(point[0], point[1])\n else:\n trial_out[i] = param_eval(point)\n return aggr_method(trial_out)\n n_grid_pts = len(params)\n\n def record_experiment(grid):\n now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n save_path = save_dir + now + ' ' + file_name + '.pkl'\n experiment = {'date': now, 'rows': param_list_one, 'cols':\n param_list_two, 'n_trials': n_trials, 'grid': np.reshape(grid,\n grid_shape), 'path': save_path}\n if save_to_disk:\n utils.save_obj(experiment, save_path)\n return experiment\n nb_workers = min(mp.cpu_count(), 24)\n print('Working with {} processes.'.format(nb_workers))\n pool = mp.Pool(nb_workers)\n it = pool.imap(grid_fun, params, chunksize=chunksize)\n grid = np.nan * np.ones((n_grid_pts,))\n for idx, val in enumerate(tqdm(it, total=n_grid_pts)):\n grid[idx] = val\n if idx >= save_each and idx % save_each == 0:\n experiment = record_experiment(grid)\n pool.close()\n pool.join()\n experiment = record_experiment(grid)\n return experiment\n\n\ndef line_evaluation(param_list, param_eval, file_name='line evaluation', **\n kwargs):\n \"\"\"\n Evaluates a list of parameter pairs across repeated trials and aggregates the result.\n\n Parameters\n ----------\n param_list : array_like\n List of values to test for parameter of interest.\n param_eval : callable\n Must take a parameter instance and return an object that can be evaluated \n by `aggr_meth` (see :func:`grid_evaluation`).\n file_name : string, optional\n Optional name for the file. (default is 'line evaluation')\n \n Returns\n -------\n dict\n A dictionary with the results of the experiment.\n \n Notes\n -----\n You can also explicitely set the arguments in :func:`grid_evaluation` in this function \n call.\n\n \"\"\"\n experiment = grid_evaluation(param_list_one=param_list, param_list_two=\n [], param_eval=param_eval, file_name=file_name, **kwargs)\n experiment['line'] = experiment.pop('grid')\n experiment['cols'] = experiment.pop('rows')\n return experiment\n",
"step-4": "<mask token>\nimport utils\nimport datetime\nimport itertools\nimport numpy as np\nimport recovery as rec\nimport sampling as smp\nimport graphs_signals as gs\nimport pathos.multiprocessing as mp\nfrom tqdm import tqdm\n\n\ndef grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16,\n aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',\n save_to_disk=True, save_each=1000, chunksize=1.0):\n \"\"\"\n Evaluates a grid of parameter pairs across repeated trials and aggregates the result.\n\n Parameters\n ----------\n param_list_one : array_like\n List of values to test for the first parameter.\n param_list_two : array_like, optional\n List of values to test for the second parameter. Can be empty, in which case a \n one-dimensional grid is evaluated.\n param_eval : callable\n Must take an instance of parameter values and return an object that can be evaluated \n by `aggr_meth`. It should accept one input if `param_list_two` is empty, and two inputs \n otherwise.\n n_trials : int, optional\n Number of trials to run for each parameter pair. (default is `16`)\n aggr_method : callable, optional\n The aggregation method for the values returned by `patam_eval` on different \n trials for the same parameter pair. (default is :func:`numpy.mean`)\n save_dir : string, optional\n Directory onto which save the result. (default is 'data/')\n file_name : string, optional\n Optional name for the file. It is always prepended with the time stamp at the \n end of the grid evaluation. (default is 'grid evaluation')\n save_to_disk : bool, optional\n Whether to save the experiment to disk (True) or not (False). (default is `True`)\n save_each : int, optional\n Save the experiment each time `save_each` grid points are computed. (default is `1000`)\n chunksize : int\n The size of the chunks of jobs sent to each parallel worker. (default is `1`)\n \n Returns\n -------\n dict\n A dictionary with the results of the experiment.\n\n \"\"\"\n if not list(param_list_two):\n params = param_list_one\n grid_shape = len(param_list_one),\n is_really_grid = False\n else:\n params = list(itertools.product(param_list_one, param_list_two))\n grid_shape = len(param_list_one), len(param_list_two)\n is_really_grid = True\n\n def grid_fun(point):\n trial_out = np.nan * np.ones((n_trials,))\n for i in np.arange(n_trials):\n if is_really_grid:\n trial_out[i] = param_eval(point[0], point[1])\n else:\n trial_out[i] = param_eval(point)\n return aggr_method(trial_out)\n n_grid_pts = len(params)\n\n def record_experiment(grid):\n now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n save_path = save_dir + now + ' ' + file_name + '.pkl'\n experiment = {'date': now, 'rows': param_list_one, 'cols':\n param_list_two, 'n_trials': n_trials, 'grid': np.reshape(grid,\n grid_shape), 'path': save_path}\n if save_to_disk:\n utils.save_obj(experiment, save_path)\n return experiment\n nb_workers = min(mp.cpu_count(), 24)\n print('Working with {} processes.'.format(nb_workers))\n pool = mp.Pool(nb_workers)\n it = pool.imap(grid_fun, params, chunksize=chunksize)\n grid = np.nan * np.ones((n_grid_pts,))\n for idx, val in enumerate(tqdm(it, total=n_grid_pts)):\n grid[idx] = val\n if idx >= save_each and idx % save_each == 0:\n experiment = record_experiment(grid)\n pool.close()\n pool.join()\n experiment = record_experiment(grid)\n return experiment\n\n\ndef line_evaluation(param_list, param_eval, file_name='line evaluation', **\n kwargs):\n \"\"\"\n Evaluates a list of parameter pairs across repeated trials and aggregates the result.\n\n Parameters\n ----------\n param_list : array_like\n List of values to test for parameter of interest.\n param_eval : callable\n Must take a parameter instance and return an object that can be evaluated \n by `aggr_meth` (see :func:`grid_evaluation`).\n file_name : string, optional\n Optional name for the file. (default is 'line evaluation')\n \n Returns\n -------\n dict\n A dictionary with the results of the experiment.\n \n Notes\n -----\n You can also explicitely set the arguments in :func:`grid_evaluation` in this function \n call.\n\n \"\"\"\n experiment = grid_evaluation(param_list_one=param_list, param_list_two=\n [], param_eval=param_eval, file_name=file_name, **kwargs)\n experiment['line'] = experiment.pop('grid')\n experiment['cols'] = experiment.pop('rows')\n return experiment\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"Phase transition module\n\n\"\"\"\n\n\nimport utils\nimport datetime\nimport itertools\n\nimport numpy as np\nimport recovery as rec\nimport sampling as smp\nimport graphs_signals as gs\nimport pathos.multiprocessing as mp\n\nfrom tqdm import tqdm\n\n\n## MAIN FUNCTIONS ##\n\ndef grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16, \n aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',\n save_to_disk=True, save_each=1000, chunksize=1.):\n r\"\"\"\n Evaluates a grid of parameter pairs across repeated trials and aggregates the result.\n\n Parameters\n ----------\n param_list_one : array_like\n List of values to test for the first parameter.\n param_list_two : array_like, optional\n List of values to test for the second parameter. Can be empty, in which case a \n one-dimensional grid is evaluated.\n param_eval : callable\n Must take an instance of parameter values and return an object that can be evaluated \n by `aggr_meth`. It should accept one input if `param_list_two` is empty, and two inputs \n otherwise.\n n_trials : int, optional\n Number of trials to run for each parameter pair. (default is `16`)\n aggr_method : callable, optional\n The aggregation method for the values returned by `patam_eval` on different \n trials for the same parameter pair. (default is :func:`numpy.mean`)\n save_dir : string, optional\n Directory onto which save the result. (default is 'data/')\n file_name : string, optional\n Optional name for the file. It is always prepended with the time stamp at the \n end of the grid evaluation. (default is 'grid evaluation')\n save_to_disk : bool, optional\n Whether to save the experiment to disk (True) or not (False). (default is `True`)\n save_each : int, optional\n Save the experiment each time `save_each` grid points are computed. (default is `1000`)\n chunksize : int\n The size of the chunks of jobs sent to each parallel worker. (default is `1`)\n \n Returns\n -------\n dict\n A dictionary with the results of the experiment.\n\n \"\"\"\n \n \n if not list(param_list_two): # If `param_list_two` is empty\n params = param_list_one\n grid_shape = (len(param_list_one),)\n is_really_grid = False\n \n else:\n params = list(itertools.product(param_list_one, param_list_two))\n grid_shape = (len(param_list_one), len(param_list_two))\n is_really_grid = True\n \n def grid_fun(point): # Function to compute for each grid point\n \n trial_out = np.nan * np.ones((n_trials,))\n \n for i in np.arange(n_trials):\n \n if is_really_grid:\n trial_out[i] = param_eval(point[0], point[1])\n else: # If `param_list_two` is empty\n trial_out[i] = param_eval(point)\n \n return aggr_method(trial_out)\n \n n_grid_pts = len(params)\n \n # Recording procedure\n def record_experiment(grid):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n save_path = save_dir + now + ' ' + file_name + '.pkl'\n experiment = {\n 'date': now,\n 'rows': param_list_one,\n 'cols': param_list_two,\n 'n_trials': n_trials,\n 'grid': np.reshape(grid, grid_shape),\n 'path': save_path\n }\n if save_to_disk:\n utils.save_obj(experiment, save_path)\n return experiment\n \n # Set a pool of workers\n nb_workers = min(mp.cpu_count(), 24)\n print('Working with {} processes.'.format(nb_workers))\n pool = mp.Pool(nb_workers)\n \n # Iterate `grid_fun` across workers\n it = pool.imap(grid_fun, params, chunksize=chunksize)\n grid = np.nan * np.ones((n_grid_pts,))\n\n for idx, val in enumerate(tqdm(it, total=n_grid_pts)):\n grid[idx] = val\n \n # Make sure that we save after each couple of iterations\n if (idx >= save_each) and (idx % save_each == 0): \n experiment = record_experiment(grid)\n \n # Close pool\n pool.close()\n pool.join()\n \n experiment = record_experiment(grid)\n \n return experiment\n\n\ndef line_evaluation(param_list, param_eval, file_name='line evaluation', **kwargs):\n r\"\"\"\n Evaluates a list of parameter pairs across repeated trials and aggregates the result.\n\n Parameters\n ----------\n param_list : array_like\n List of values to test for parameter of interest.\n param_eval : callable\n Must take a parameter instance and return an object that can be evaluated \n by `aggr_meth` (see :func:`grid_evaluation`).\n file_name : string, optional\n Optional name for the file. (default is 'line evaluation')\n \n Returns\n -------\n dict\n A dictionary with the results of the experiment.\n \n Notes\n -----\n You can also explicitely set the arguments in :func:`grid_evaluation` in this function \n call.\n\n \"\"\"\n \n experiment = grid_evaluation(param_list_one=param_list,\n param_list_two=[],\n param_eval=param_eval,\n file_name=file_name,\n **kwargs)\n\n experiment['line'] = experiment.pop('grid')\n experiment['cols'] = experiment.pop('rows')\n \n return experiment\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# encoding: utf-8
'''
1D2DCNN抽取特征,LSTM后提取特征,最后将提取的特征进行拼接,CNN与LSTM是交叉在一起的
'''
# 导入相关的包
import keras
# 导入相关层的结构
from keras.models import Sequential
from keras.layers import Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, Flatten, Dense, Dropout,LSTM,Reshape
from keras import Model
# 可视化神经网络
from keras.utils import plot_model
def merge_model(model_1, model_2):
'''
keras将两个独立的模型融合起来
:param model_1:
:param model_2:
:return:
'''
# model_1.load_weights('model_1_weight.h5')#这里可以加载各自权重
# model_2.load_weights('model_2_weight.h5')#可以是预训练好的模型权重(迁移学习)
inp1 = model_1.input # 第一个模型的参数
inp2 = model_2.input # 第二个模型的参数
r1 = model_1.output
r2 = model_2.output
x = keras.layers.Concatenate(axis=1)([r1, r2])
model = Model(inputs=[inp1, inp2], outputs=x)
return model
def addLayers_model(model):
'''
修改模型(模型加层)
采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层
:param model: 待扩层的模型
:return:
'''
origin_model = model
for layer in origin_model.layers:
layer.trainable = False # 原来的不训练,冻结网络层
inp = origin_model.input
x = origin_model.output
den = Dense(512, name="fine_dense")(x)
l = Dropout(0.5)(den)
result = Dense(10, activation="softmax")(l)
model = Model(input=inp, outputs=result)
return model
input_shape_1D = (1024, 1)
input_shape_2D = (32, 32, 1)
# 构建模型
# 网络结构(卷积层:relu - 池化层 - 卷积层 - 池化层 - Flatten - 汇聚层 - 全连接层 - Dropout - softmax)
# ====================1、 1D部分 ==============================
model1 = Sequential()
# Conv1D:8 @ 1*1024。8个过滤器(卷积核),卷积核大小设置为3
model1.add(Conv1D(filters=8,
kernel_size=(3),
input_shape=input_shape_1D,
padding='same',
activation='relu'))
# MaxPooling1D:8 @ 1*512。
model1.add(MaxPooling1D(pool_size=(2), padding='same'))
# Conv1D:16 @ 1*512。16个过滤器,大小设置为3
model1.add(Conv1D(filters=16,
kernel_size=(3),
input_shape=(1, 512),
padding='same',
activation='relu'))
# MaxPooling1D:16 @ 1*256。
model1.add(MaxPooling1D(pool_size=(2), padding='same'))
'''
# Conv1D: 16 @ 1*256 。16个过滤器,大小设置为3
model1.add(Conv1D(filters=16,
kernel_size=(3),
input_shape=(1, 512),
padding='same',
activation='relu'))
# MaxPooling1D:16 @ 1*128。
model1.add(MaxPooling1D(pool_size=(2), padding='same'))
'''
model1.add(LSTM(32,return_sequences=True))
model1.add(Flatten()) # 压平:将输出压平为1维
# =============================================================
# ============ ======== 2、 2D部分 ============================
model2 = Sequential()
# Conv2D:8 @ 32*32。8个过滤器(卷积核),卷积核大小设置为3*3
model2.add(Conv2D(filters=8,
kernel_size=(3, 3),
input_shape=input_shape_2D,
padding='same',
activation='relu'))
# MaxPooling2D:8 @ 16*16。
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
# Conv2D:16 @ 16*16。16个过滤器,卷积核大小设置为3*3
model2.add(Conv2D(filters=16,
kernel_size=(3, 3),
input_shape=(16, 16, 1),
padding='same',
activation='relu'))
# MaxPooling2D:16 @ 8*8。
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
'''
# Conv2D:16 @ 8*8。16个过滤器,卷积核大小设置为3*3
model2.add(Conv2D(filters=16,
kernel_size=(3, 3),
input_shape=(8, 8, 1),
padding='same',
activation='relu'))
# MaxPooling2D:16 @ 4*4。
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
'''
print("model2两层卷积后的输出形状:",model2.output_shape) # (None,4,4,16)
model2.add(Reshape((64,16))) #(None,16,16)
model2.add(LSTM(32,return_sequences=True))
model2.add(Flatten())
# =============================================================
# ==================== 3、汇聚层 ===============================
# 融合部分
model = merge_model(model1, model2)
model.summary()
# =============================================================
print("model.outputs:",model.output.shape)
# ============= 4、 全连接层,dropout,分类层 ====================
model = addLayers_model(model)
print(model.summary())
plot_model(model, to_file='model/1D2DLSTM_cross.png')
# =============================================================
# ==================== 5、模型训练指标 ==========================
# adam优化器, lr:初始学习率为0.1,学习率下降递减采用:ReduceLROnPlateau,在 model.fit 的回调函数中设置
# adam = keras.optimizers.Adam(lr=0.1)
adam = keras.optimizers.Adam()
model.compile(loss='categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
# =============================================================
# 保存模型结构
model.save('model/1D2DLSTM_cross.h5')
|
normal
|
{
"blob_id": "cce1b6f8e4b3f78adfa2243fe49b4994d35c5a38",
"index": 9898,
"step-1": "<mask token>\n\n\ndef merge_model(model_1, model_2):\n \"\"\"\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n \"\"\"\n inp1 = model_1.input\n inp2 = model_2.input\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n \"\"\"\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n \"\"\"\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name='fine_dense')(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation='softmax')(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef merge_model(model_1, model_2):\n \"\"\"\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n \"\"\"\n inp1 = model_1.input\n inp2 = model_2.input\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n \"\"\"\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n \"\"\"\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name='fine_dense')(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation='softmax')(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\n<mask token>\nmodel1.add(Conv1D(filters=8, kernel_size=3, input_shape=input_shape_1D,\n padding='same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\nmodel1.add(Conv1D(filters=16, kernel_size=3, input_shape=(1, 512), padding=\n 'same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\n<mask token>\nmodel1.add(LSTM(32, return_sequences=True))\nmodel1.add(Flatten())\n<mask token>\nmodel2.add(Conv2D(filters=8, kernel_size=(3, 3), input_shape=input_shape_2D,\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\nmodel2.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(16, 16, 1),\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n<mask token>\nprint('model2两层卷积后的输出形状:', model2.output_shape)\nmodel2.add(Reshape((64, 16)))\nmodel2.add(LSTM(32, return_sequences=True))\nmodel2.add(Flatten())\n<mask token>\nmodel.summary()\nprint('model.outputs:', model.output.shape)\n<mask token>\nprint(model.summary())\nplot_model(model, to_file='model/1D2DLSTM_cross.png')\n<mask token>\nmodel.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[\n 'accuracy'])\nmodel.save('model/1D2DLSTM_cross.h5')\n",
"step-3": "<mask token>\n\n\ndef merge_model(model_1, model_2):\n \"\"\"\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n \"\"\"\n inp1 = model_1.input\n inp2 = model_2.input\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n \"\"\"\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n \"\"\"\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name='fine_dense')(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation='softmax')(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\ninput_shape_1D = 1024, 1\ninput_shape_2D = 32, 32, 1\nmodel1 = Sequential()\nmodel1.add(Conv1D(filters=8, kernel_size=3, input_shape=input_shape_1D,\n padding='same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\nmodel1.add(Conv1D(filters=16, kernel_size=3, input_shape=(1, 512), padding=\n 'same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\n<mask token>\nmodel1.add(LSTM(32, return_sequences=True))\nmodel1.add(Flatten())\nmodel2 = Sequential()\nmodel2.add(Conv2D(filters=8, kernel_size=(3, 3), input_shape=input_shape_2D,\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\nmodel2.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(16, 16, 1),\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n<mask token>\nprint('model2两层卷积后的输出形状:', model2.output_shape)\nmodel2.add(Reshape((64, 16)))\nmodel2.add(LSTM(32, return_sequences=True))\nmodel2.add(Flatten())\nmodel = merge_model(model1, model2)\nmodel.summary()\nprint('model.outputs:', model.output.shape)\nmodel = addLayers_model(model)\nprint(model.summary())\nplot_model(model, to_file='model/1D2DLSTM_cross.png')\nadam = keras.optimizers.Adam()\nmodel.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[\n 'accuracy'])\nmodel.save('model/1D2DLSTM_cross.h5')\n",
"step-4": "<mask token>\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, Flatten, Dense, Dropout, LSTM, Reshape\nfrom keras import Model\nfrom keras.utils import plot_model\n\n\ndef merge_model(model_1, model_2):\n \"\"\"\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n \"\"\"\n inp1 = model_1.input\n inp2 = model_2.input\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n \"\"\"\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n \"\"\"\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name='fine_dense')(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation='softmax')(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\ninput_shape_1D = 1024, 1\ninput_shape_2D = 32, 32, 1\nmodel1 = Sequential()\nmodel1.add(Conv1D(filters=8, kernel_size=3, input_shape=input_shape_1D,\n padding='same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\nmodel1.add(Conv1D(filters=16, kernel_size=3, input_shape=(1, 512), padding=\n 'same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\n<mask token>\nmodel1.add(LSTM(32, return_sequences=True))\nmodel1.add(Flatten())\nmodel2 = Sequential()\nmodel2.add(Conv2D(filters=8, kernel_size=(3, 3), input_shape=input_shape_2D,\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\nmodel2.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(16, 16, 1),\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n<mask token>\nprint('model2两层卷积后的输出形状:', model2.output_shape)\nmodel2.add(Reshape((64, 16)))\nmodel2.add(LSTM(32, return_sequences=True))\nmodel2.add(Flatten())\nmodel = merge_model(model1, model2)\nmodel.summary()\nprint('model.outputs:', model.output.shape)\nmodel = addLayers_model(model)\nprint(model.summary())\nplot_model(model, to_file='model/1D2DLSTM_cross.png')\nadam = keras.optimizers.Adam()\nmodel.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[\n 'accuracy'])\nmodel.save('model/1D2DLSTM_cross.h5')\n",
"step-5": "#!/usr/bin/env python\n# encoding: utf-8\n'''\n 1D2DCNN抽取特征,LSTM后提取特征,最后将提取的特征进行拼接,CNN与LSTM是交叉在一起的\n'''\n\n# 导入相关的包\nimport keras\n\n# 导入相关层的结构\nfrom keras.models import Sequential\nfrom keras.layers import Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, Flatten, Dense, Dropout,LSTM,Reshape\nfrom keras import Model\n\n# 可视化神经网络\nfrom keras.utils import plot_model\n\n\ndef merge_model(model_1, model_2):\n '''\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n '''\n\n # model_1.load_weights('model_1_weight.h5')#这里可以加载各自权重\n # model_2.load_weights('model_2_weight.h5')#可以是预训练好的模型权重(迁移学习)\n\n inp1 = model_1.input # 第一个模型的参数\n inp2 = model_2.input # 第二个模型的参数\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n '''\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n '''\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False # 原来的不训练,冻结网络层\n\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name=\"fine_dense\")(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation=\"softmax\")(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\ninput_shape_1D = (1024, 1)\ninput_shape_2D = (32, 32, 1)\n\n# 构建模型\n# 网络结构(卷积层:relu - 池化层 - 卷积层 - 池化层 - Flatten - 汇聚层 - 全连接层 - Dropout - softmax)\n# ====================1、 1D部分 ==============================\nmodel1 = Sequential()\n# Conv1D:8 @ 1*1024。8个过滤器(卷积核),卷积核大小设置为3\nmodel1.add(Conv1D(filters=8,\n kernel_size=(3),\n input_shape=input_shape_1D,\n padding='same',\n activation='relu'))\n\n# MaxPooling1D:8 @ 1*512。\nmodel1.add(MaxPooling1D(pool_size=(2), padding='same'))\n\n# Conv1D:16 @ 1*512。16个过滤器,大小设置为3\nmodel1.add(Conv1D(filters=16,\n kernel_size=(3),\n input_shape=(1, 512),\n padding='same',\n activation='relu'))\n\n# MaxPooling1D:16 @ 1*256。\nmodel1.add(MaxPooling1D(pool_size=(2), padding='same'))\n'''\n# Conv1D: 16 @ 1*256 。16个过滤器,大小设置为3\nmodel1.add(Conv1D(filters=16,\n kernel_size=(3),\n input_shape=(1, 512),\n padding='same',\n activation='relu'))\n\n# MaxPooling1D:16 @ 1*128。\nmodel1.add(MaxPooling1D(pool_size=(2), padding='same'))\n'''\n\nmodel1.add(LSTM(32,return_sequences=True))\nmodel1.add(Flatten()) # 压平:将输出压平为1维\n\n# =============================================================\n\n# ============ ======== 2、 2D部分 ============================\nmodel2 = Sequential()\n# Conv2D:8 @ 32*32。8个过滤器(卷积核),卷积核大小设置为3*3\nmodel2.add(Conv2D(filters=8,\n kernel_size=(3, 3),\n input_shape=input_shape_2D,\n padding='same',\n activation='relu'))\n\n# MaxPooling2D:8 @ 16*16。\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n\n# Conv2D:16 @ 16*16。16个过滤器,卷积核大小设置为3*3\nmodel2.add(Conv2D(filters=16,\n kernel_size=(3, 3),\n input_shape=(16, 16, 1),\n padding='same',\n activation='relu'))\n\n# MaxPooling2D:16 @ 8*8。\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n\n'''\n# Conv2D:16 @ 8*8。16个过滤器,卷积核大小设置为3*3\nmodel2.add(Conv2D(filters=16,\n kernel_size=(3, 3),\n input_shape=(8, 8, 1),\n padding='same',\n activation='relu'))\n\n# MaxPooling2D:16 @ 4*4。\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n'''\nprint(\"model2两层卷积后的输出形状:\",model2.output_shape) # (None,4,4,16)\nmodel2.add(Reshape((64,16))) #(None,16,16)\nmodel2.add(LSTM(32,return_sequences=True))\nmodel2.add(Flatten())\n# =============================================================\n\n\n# ==================== 3、汇聚层 ===============================\n# 融合部分\nmodel = merge_model(model1, model2)\nmodel.summary()\n# =============================================================\n\nprint(\"model.outputs:\",model.output.shape)\n\n# ============= 4、 全连接层,dropout,分类层 ====================\nmodel = addLayers_model(model)\nprint(model.summary())\n\nplot_model(model, to_file='model/1D2DLSTM_cross.png')\n# =============================================================\n\n# ==================== 5、模型训练指标 ==========================\n# adam优化器, lr:初始学习率为0.1,学习率下降递减采用:ReduceLROnPlateau,在 model.fit 的回调函数中设置\n# adam = keras.optimizers.Adam(lr=0.1)\nadam = keras.optimizers.Adam()\nmodel.compile(loss='categorical_crossentropy',\n optimizer=adam,\n metrics=['accuracy'])\n# =============================================================\n\n# 保存模型结构\nmodel.save('model/1D2DLSTM_cross.h5')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# Number Guessing Game
import random
#assign secrectNumber to random number from range 1-10, inclusive of 10
secrectNumber = random.randint (1, 11)
#initialize number or guesses to 1 and call it guess
numGuesses = 1
#prompt user to enter their name and enter their guess
name = int (input("Enter your name: ))
print("name")
guess = int(input("Enter a guess:"))
print("guess")
#create a while loop thats exits when the guess is equal to the secrect number
#guess the the secrectNumber if guess is > than secrect number the user is will recieve an alert that the number is less than guess
# if guess is < than secrect number the user is will recieve an alert the number isgreater than guess
#if the guess is not equal to the secrect number, do the while loop.
while (guess != secrectNumber > 5):
if guess > secrectNumber:
print ("the secrect number is less than "+ str(guess))
else:
print ("the secrect number is greater than " + str(guess))
numGuesses += 1
if guess == number:
break
#The number of guesses is incremented by +1
#if the guess is not equal to the secrect number, guess again. The user is prompted to ebnter a number
guess = int (input("Enter a number"))
print (" Zee gives congrats to" +str(name) +"! the number is" +str(secrectNumber))
print ("it took you" +str(numGuesses) + "guesses. Great job!")
|
normal
|
{
"blob_id": "d2da346e11fa9508cab22a3a2fd3ca57a0a755e6",
"index": 5420,
"step-1": "# Number Guessing Game\nimport random\n#assign secrectNumber to random number from range 1-10, inclusive of 10\nsecrectNumber = random.randint (1, 11)\n#initialize number or guesses to 1 and call it guess\nnumGuesses = 1\n#prompt user to enter their name and enter their guess\nname = int (input(\"Enter your name: ))\n print(\"name\")\nguess = int(input(\"Enter a guess:\"))\n print(\"guess\")\n\n#create a while loop thats exits when the guess is equal to the secrect number\n#guess the the secrectNumber if guess is > than secrect number the user is will recieve an alert that the number is less than guess\n# if guess is < than secrect number the user is will recieve an alert the number isgreater than guess\n #if the guess is not equal to the secrect number, do the while loop.\nwhile (guess != secrectNumber > 5):\n if guess > secrectNumber:\n print (\"the secrect number is less than \"+ str(guess))\n\n else:\n print (\"the secrect number is greater than \" + str(guess))\n numGuesses += 1\n if guess == number:\n break\n #The number of guesses is incremented by +1\n \n #if the guess is not equal to the secrect number, guess again. The user is prompted to ebnter a number\n guess = int (input(\"Enter a number\"))\n\n print (\" Zee gives congrats to\" +str(name) +\"! the number is\" +str(secrectNumber))\n print (\"it took you\" +str(numGuesses) + \"guesses. Great job!\")\n \n\n \n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from aws_cdk import core as cdk
# For consistency with other languages, `cdk` is the preferred import name for
# the CDK's core module. The following line also imports it as `core` for use
# with examples from the CDK Developer's Guide, which are in the process of
# being updated to use `cdk`. You may delete this import if you don't need it.
from aws_cdk import (core, aws_ec2 as ec2, aws_ecs as ecs, aws_ecr as ecr, aws_iam as iam,
aws_ecs_patterns as ecs_patterns)
class kdECSDemo(cdk.Stack):
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# 建VPC与ECS Cluster
# TODO: 即使指定 max_azs, 也只能部署2个AZ
vpc = ec2.Vpc(self, "ECSVPC", cidr='10.0.0.0/16')
cluster = ecs.Cluster(self, "ECSCluster", vpc=vpc)
#建Task Definition
task_definition = ecs.FargateTaskDefinition(self, "ECSDemoTaskDefinition",
task_role=iam.Role.from_role_arn(self, "fargate_task_role", "arn:aws-cn:iam::402202783068:role/ECS-Task-Role-Firelens"),
execution_role=iam.Role.from_role_arn(self, "fargate_task_execution_role", "arn:aws-cn:iam::402202783068:role/ecsTaskExecutionRole")
)
task_definition.add_volume(name="data")
# App Container
app_container = task_definition.add_container(
"AppContainer",
image=ecs.ContainerImage.from_ecr_repository(
ecr.Repository.from_repository_name(self, id="app-file-image", repository_name="app-file")
),
logging=ecs.FireLensLogDriver()
)
app_container.add_mount_points(ecs.MountPoint(
container_path="/data/logs",
read_only=False,
source_volume="data"
))
# app_container.add_port_mappings(ecs.PortMapping(container_port=80))
# Log Router
fluentbit_container = ecs.FirelensLogRouter(self, "fluentbit_container",
firelens_config=ecs.FirelensConfig(
type=ecs.FirelensLogRouterType.FLUENTBIT,
options=ecs.FirelensOptions(
config_file_value="/extra.conf"
)
),
task_definition=task_definition,
image=ecs.ContainerImage.from_ecr_repository(
ecr.Repository.from_repository_name(self, id="log-router", repository_name="firelens-file")
),
logging=ecs.AwsLogDriver(stream_prefix="/ecs/firelens-fluentbit-demo/")
)
fluentbit_container.add_mount_points(ecs.MountPoint(
container_path="/data/logs",
read_only=False,
source_volume="data"
))
# #建Service
# ecs_patterns.ApplicationLoadBalancedFargateService(self, "ServiceWithLogging",
# cluster=cluster,
# desired_count=1, # Default is 1
# task_definition=task_definition,
# public_load_balancer=True) # Default is False
|
normal
|
{
"blob_id": "12cd3dbf211b202d25dc6f940156536c9fe3f76f",
"index": 3385,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass kdECSDemo(cdk.Stack):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass kdECSDemo(cdk.Stack):\n\n def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs\n ) ->None:\n super().__init__(scope, construct_id, **kwargs)\n vpc = ec2.Vpc(self, 'ECSVPC', cidr='10.0.0.0/16')\n cluster = ecs.Cluster(self, 'ECSCluster', vpc=vpc)\n task_definition = ecs.FargateTaskDefinition(self,\n 'ECSDemoTaskDefinition', task_role=iam.Role.from_role_arn(self,\n 'fargate_task_role',\n 'arn:aws-cn:iam::402202783068:role/ECS-Task-Role-Firelens'),\n execution_role=iam.Role.from_role_arn(self,\n 'fargate_task_execution_role',\n 'arn:aws-cn:iam::402202783068:role/ecsTaskExecutionRole'))\n task_definition.add_volume(name='data')\n app_container = task_definition.add_container('AppContainer', image\n =ecs.ContainerImage.from_ecr_repository(ecr.Repository.\n from_repository_name(self, id='app-file-image', repository_name\n ='app-file')), logging=ecs.FireLensLogDriver())\n app_container.add_mount_points(ecs.MountPoint(container_path=\n '/data/logs', read_only=False, source_volume='data'))\n fluentbit_container = ecs.FirelensLogRouter(self,\n 'fluentbit_container', firelens_config=ecs.FirelensConfig(type=\n ecs.FirelensLogRouterType.FLUENTBIT, options=ecs.\n FirelensOptions(config_file_value='/extra.conf')),\n task_definition=task_definition, image=ecs.ContainerImage.\n from_ecr_repository(ecr.Repository.from_repository_name(self,\n id='log-router', repository_name='firelens-file')), logging=ecs\n .AwsLogDriver(stream_prefix='/ecs/firelens-fluentbit-demo/'))\n fluentbit_container.add_mount_points(ecs.MountPoint(container_path=\n '/data/logs', read_only=False, source_volume='data'))\n",
"step-4": "from aws_cdk import core as cdk\nfrom aws_cdk import core, aws_ec2 as ec2, aws_ecs as ecs, aws_ecr as ecr, aws_iam as iam, aws_ecs_patterns as ecs_patterns\n\n\nclass kdECSDemo(cdk.Stack):\n\n def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs\n ) ->None:\n super().__init__(scope, construct_id, **kwargs)\n vpc = ec2.Vpc(self, 'ECSVPC', cidr='10.0.0.0/16')\n cluster = ecs.Cluster(self, 'ECSCluster', vpc=vpc)\n task_definition = ecs.FargateTaskDefinition(self,\n 'ECSDemoTaskDefinition', task_role=iam.Role.from_role_arn(self,\n 'fargate_task_role',\n 'arn:aws-cn:iam::402202783068:role/ECS-Task-Role-Firelens'),\n execution_role=iam.Role.from_role_arn(self,\n 'fargate_task_execution_role',\n 'arn:aws-cn:iam::402202783068:role/ecsTaskExecutionRole'))\n task_definition.add_volume(name='data')\n app_container = task_definition.add_container('AppContainer', image\n =ecs.ContainerImage.from_ecr_repository(ecr.Repository.\n from_repository_name(self, id='app-file-image', repository_name\n ='app-file')), logging=ecs.FireLensLogDriver())\n app_container.add_mount_points(ecs.MountPoint(container_path=\n '/data/logs', read_only=False, source_volume='data'))\n fluentbit_container = ecs.FirelensLogRouter(self,\n 'fluentbit_container', firelens_config=ecs.FirelensConfig(type=\n ecs.FirelensLogRouterType.FLUENTBIT, options=ecs.\n FirelensOptions(config_file_value='/extra.conf')),\n task_definition=task_definition, image=ecs.ContainerImage.\n from_ecr_repository(ecr.Repository.from_repository_name(self,\n id='log-router', repository_name='firelens-file')), logging=ecs\n .AwsLogDriver(stream_prefix='/ecs/firelens-fluentbit-demo/'))\n fluentbit_container.add_mount_points(ecs.MountPoint(container_path=\n '/data/logs', read_only=False, source_volume='data'))\n",
"step-5": "from aws_cdk import core as cdk\n\n# For consistency with other languages, `cdk` is the preferred import name for\n# the CDK's core module. The following line also imports it as `core` for use\n# with examples from the CDK Developer's Guide, which are in the process of\n# being updated to use `cdk`. You may delete this import if you don't need it.\nfrom aws_cdk import (core, aws_ec2 as ec2, aws_ecs as ecs, aws_ecr as ecr, aws_iam as iam,\n aws_ecs_patterns as ecs_patterns)\n\n\nclass kdECSDemo(cdk.Stack):\n\n def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # 建VPC与ECS Cluster\n # TODO: 即使指定 max_azs, 也只能部署2个AZ\n vpc = ec2.Vpc(self, \"ECSVPC\", cidr='10.0.0.0/16') \n cluster = ecs.Cluster(self, \"ECSCluster\", vpc=vpc)\n\n #建Task Definition\n task_definition = ecs.FargateTaskDefinition(self, \"ECSDemoTaskDefinition\",\n task_role=iam.Role.from_role_arn(self, \"fargate_task_role\", \"arn:aws-cn:iam::402202783068:role/ECS-Task-Role-Firelens\"),\n execution_role=iam.Role.from_role_arn(self, \"fargate_task_execution_role\", \"arn:aws-cn:iam::402202783068:role/ecsTaskExecutionRole\")\n )\n\n task_definition.add_volume(name=\"data\")\n\n # App Container\n app_container = task_definition.add_container(\n \"AppContainer\",\n image=ecs.ContainerImage.from_ecr_repository(\n ecr.Repository.from_repository_name(self, id=\"app-file-image\", repository_name=\"app-file\")\n ), \n logging=ecs.FireLensLogDriver()\n )\n\n app_container.add_mount_points(ecs.MountPoint(\n container_path=\"/data/logs\",\n read_only=False,\n source_volume=\"data\"\n ))\n\n # app_container.add_port_mappings(ecs.PortMapping(container_port=80))\n \n # Log Router\n fluentbit_container = ecs.FirelensLogRouter(self, \"fluentbit_container\",\n firelens_config=ecs.FirelensConfig(\n type=ecs.FirelensLogRouterType.FLUENTBIT,\n options=ecs.FirelensOptions(\n config_file_value=\"/extra.conf\"\n )\n ),\n task_definition=task_definition,\n image=ecs.ContainerImage.from_ecr_repository(\n ecr.Repository.from_repository_name(self, id=\"log-router\", repository_name=\"firelens-file\")\n ),\n logging=ecs.AwsLogDriver(stream_prefix=\"/ecs/firelens-fluentbit-demo/\")\n )\n \n fluentbit_container.add_mount_points(ecs.MountPoint(\n container_path=\"/data/logs\",\n read_only=False,\n source_volume=\"data\"\n ))\n\n # #建Service\n # ecs_patterns.ApplicationLoadBalancedFargateService(self, \"ServiceWithLogging\",\n # cluster=cluster,\n # desired_count=1, # Default is 1\n # task_definition=task_definition,\n # public_load_balancer=True) # Default is False\n\n \n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
time.sleep(2)
def write(i):
ser.write(struct.pack('>BBB', 255, 0, i))
write(0)
time.sleep(1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
usbport = '/dev/ttyS3'
ser = serial.Serial(usbport, 9600, timeout=1)
time.sleep(2)
def write(i):
ser.write(struct.pack('>BBB', 255, 0, i))
write(0)
time.sleep(1)
<|reserved_special_token_1|>
import serial
import time
import struct
usbport = '/dev/ttyS3'
ser = serial.Serial(usbport, 9600, timeout=1)
time.sleep(2)
def write(i):
ser.write(struct.pack('>BBB', 255, 0, i))
write(0)
time.sleep(1)
<|reserved_special_token_1|>
import serial
import time
import struct
# Assign Arduino's serial port address
# Windows example
# usbport = 'COM3'
# Linux example
# usbport = '/dev/ttyUSB0'
# MacOSX example
# usbport = '/dev/tty.usbserial-FTALLOK2'
# basically just see what ports are open - >>> ls /dev/tty*
# Set up serial baud rate
usbport = '/dev/ttyS3'
ser = serial.Serial(usbport,9600,timeout=1)
# time.sleep is necessary - it takes some time to open serial port
time.sleep(2)
def write(i):
ser.write(struct.pack('>BBB',255,0,i))
write(0)
time.sleep(1)
|
flexible
|
{
"blob_id": "6c98be473bf4cd458ea8a801f8b1197c9d8a07b3",
"index": 3514,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntime.sleep(2)\n\n\ndef write(i):\n ser.write(struct.pack('>BBB', 255, 0, i))\n\n\nwrite(0)\ntime.sleep(1)\n",
"step-3": "<mask token>\nusbport = '/dev/ttyS3'\nser = serial.Serial(usbport, 9600, timeout=1)\ntime.sleep(2)\n\n\ndef write(i):\n ser.write(struct.pack('>BBB', 255, 0, i))\n\n\nwrite(0)\ntime.sleep(1)\n",
"step-4": "import serial\nimport time\nimport struct\nusbport = '/dev/ttyS3'\nser = serial.Serial(usbport, 9600, timeout=1)\ntime.sleep(2)\n\n\ndef write(i):\n ser.write(struct.pack('>BBB', 255, 0, i))\n\n\nwrite(0)\ntime.sleep(1)\n",
"step-5": "import serial\r\nimport time\r\nimport struct\r\n# Assign Arduino's serial port address\r\n# Windows example\r\n# usbport = 'COM3'\r\n# Linux example\r\n# usbport = '/dev/ttyUSB0'\r\n# MacOSX example\r\n# usbport = '/dev/tty.usbserial-FTALLOK2'\r\n# basically just see what ports are open - >>> ls /dev/tty*\r\n\r\n# Set up serial baud rate\r\nusbport = '/dev/ttyS3'\r\n\r\nser = serial.Serial(usbport,9600,timeout=1)\r\n# time.sleep is necessary - it takes some time to open serial port\r\ntime.sleep(2)\r\n\r\ndef write(i):\r\n ser.write(struct.pack('>BBB',255,0,i))\r\n\r\nwrite(0)\r\ntime.sleep(1)\r\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from abc import ABC, abstractmethod
class Shape(ABC): # Shape is a child class of ABC
@abstractmethod
def area(self):
pass
@abstractmethod
def perimeter(self):
pass
class Square(Shape):
def __init__(self, length):
self.length = length
square = Square(4)
# this will code will not compile since abstarct methods have not been
# defined in the child class, Square
|
normal
|
{
"blob_id": "520b9246c3c617b18ca57f31ff51051cc3ff51ca",
"index": 5517,
"step-1": "<mask token>\n\n\nclass Shape(ABC):\n\n @abstractmethod\n def area(self):\n pass\n <mask token>\n\n\nclass Square(Shape):\n\n def __init__(self, length):\n self.length = length\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Shape(ABC):\n\n @abstractmethod\n def area(self):\n pass\n\n @abstractmethod\n def perimeter(self):\n pass\n\n\nclass Square(Shape):\n\n def __init__(self, length):\n self.length = length\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Shape(ABC):\n\n @abstractmethod\n def area(self):\n pass\n\n @abstractmethod\n def perimeter(self):\n pass\n\n\nclass Square(Shape):\n\n def __init__(self, length):\n self.length = length\n\n\nsquare = Square(4)\n",
"step-4": "from abc import ABC, abstractmethod\n\n\nclass Shape(ABC):\n\n @abstractmethod\n def area(self):\n pass\n\n @abstractmethod\n def perimeter(self):\n pass\n\n\nclass Square(Shape):\n\n def __init__(self, length):\n self.length = length\n\n\nsquare = Square(4)\n",
"step-5": "from abc import ABC, abstractmethod\n\n\nclass Shape(ABC): # Shape is a child class of ABC\n @abstractmethod\n def area(self):\n pass\n\n @abstractmethod\n def perimeter(self):\n pass\n\n\nclass Square(Shape):\n def __init__(self, length):\n self.length = length\n\n\nsquare = Square(4)\n# this will code will not compile since abstarct methods have not been\n# defined in the child class, Square\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
get_train_data(sys.argv[1], sys.argv[2])
<|reserved_special_token_1|>
from Classify import get_train_data
import sys
<|reserved_special_token_0|>
get_train_data(sys.argv[1], sys.argv[2])
<|reserved_special_token_1|>
#-*-coding:utf-8-*-
from Classify import get_train_data
import sys
'''
获取训练集数据
'''
get_train_data(sys.argv[1], sys.argv[2])
|
flexible
|
{
"blob_id": "513aff6cf29bbce55e2382943767a9a21df2e98e",
"index": 5080,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nget_train_data(sys.argv[1], sys.argv[2])\n",
"step-3": "from Classify import get_train_data\nimport sys\n<mask token>\nget_train_data(sys.argv[1], sys.argv[2])\n",
"step-4": "#-*-coding:utf-8-*-\nfrom Classify import get_train_data\nimport sys\n'''\n 获取训练集数据\n'''\nget_train_data(sys.argv[1], sys.argv[2])",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import json
from page import Page
from random import choice
from os.path import join, expanduser
from file_handler import f_read, f_readlines, open_local
import config
class LetterPage(Page):
def __init__(self, page_num,n):
super(LetterPage, self).__init__(page_num)
self.title = "Letters"
self.in_index = False
self.n = n
self.tagline = "Email klbscroggsbot@gmail.com and your letter will appear here"
self.letters = ""
def background(self):
self.letters = f_read("emails")
if config.NAME == "KLBFAX" and self.n==1 and config.has_gmail_login():
import gmail
details = f_readlines("gmail")
g = gmail.login(details[0],details[1])
unread = g.inbox().mail(unread=True)
for mail in unread:
mail.fetch()
lines = "".join(mail.body.split("\r")).split("\n")
if lines[0] == "EVENT" and "matthew.scroggs.14@ucl.ac.uk" in mail.fr:
try:
with open_local('events','a') as f:
for line in lines:
if line!="EVENT":
f.write(line+"\n")
mail.read()
except:
pass
elif lines[0] == "CARD" and "matthew.scroggs.14@ucl.ac.uk" in mail.fr:
with open('/home/pi/cards/'+lines[1],"w") as f:
f.write("\n".join(lines[2:]))
mail.read()
elif "POINTS" in lines[0].upper() and "belgin.seymenoglu.10@ucl.ac.uk" in mail.fr:
from points import add_points
length = 1
points_to_give = 0
while length<=len(lines[2]):
try:
if lines[2][:length]!="-":
points_to_give = int(lines[2][:length])
length += 1
except:
break
add_points(lines[1].split("=")[0],points_to_give)
mail.read()
else:
newletter = ""
for line in lines:
if line!="":
while len(line)>79:
newletter += line[:79]+"\n"
line=line[79:]
newletter+=line+"\n"
self.letters=newletter+"\n"+"from "+mail.fr+"\n\n"+self.letters
mail.read()
self.letters = self.letters.split("\n")
if len(self.letters)>1000:
self.letters = self.letters[:1000]
with open_local("emails","w") as f:
f.write("\n".join(self.letters))
else:
self.letters = self.letters.split("\n")
def generate_content(self):
letters = self.letters[20*(self.n-1):20*self.n]
letters = "\n".join(letters)
try:
letters = unicode(letters,'latin1')
except:
letters = str(letters)
self.add_title("Have your say",font="size4")
a = str(self.n)+"/21"
self.move_cursor(x=90-len(a))
self.add_text(a, fg="BLUE", bg="YELLOW")
self.move_cursor(x=0)
self.start_random_bg_color()
for line in letters.split("\n"):
line = line.rstrip("\n")
if line == "":
self.end_bg_color()
self.start_random_bg_color()
self.add_text(line,fg="BLACK")
self.add_newline()
self.end_bg_color()
if self.n==21:
self.add_text("~ END OF LETTERS ~")
else:
self.add_text("The letters continue on page "+str(200+self.n))
letters_page1 = LetterPage("200",1)
letters_page1.in_index = True
letters_page1.index_num = "200-220"
letters_page2 = LetterPage("201",2)
letters_page3 = LetterPage("202",3)
letters_page4 = LetterPage("203",4)
letters_page5 = LetterPage("204",5)
letters_page6 = LetterPage("205",6)
letters_page7 = LetterPage("206",7)
letters_page8 = LetterPage("207",8)
letters_page9 = LetterPage("208",9)
letters_page10 = LetterPage("209",10)
letters_page11 = LetterPage("210",11)
letters_page12 = LetterPage("211",12)
letters_page13 = LetterPage("212",13)
letters_page14 = LetterPage("213",14)
letters_page15 = LetterPage("214",15)
letters_page16 = LetterPage("215",16)
letters_page17 = LetterPage("216",17)
letters_page18 = LetterPage("217",18)
letters_page19 = LetterPage("218",19)
letters_page20 = LetterPage("219",20)
letters_page21 = LetterPage("220",21)
|
normal
|
{
"blob_id": "e714fe0e27ec9ea5acb3120a4d2114d3d7674fcf",
"index": 5601,
"step-1": "<mask token>\n\n\nclass LetterPage(Page):\n\n def __init__(self, page_num, n):\n super(LetterPage, self).__init__(page_num)\n self.title = 'Letters'\n self.in_index = False\n self.n = n\n self.tagline = (\n 'Email klbscroggsbot@gmail.com and your letter will appear here')\n self.letters = ''\n\n def background(self):\n self.letters = f_read('emails')\n if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(\n ):\n import gmail\n details = f_readlines('gmail')\n g = gmail.login(details[0], details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = ''.join(mail.body.split('\\r')).split('\\n')\n if lines[0\n ] == 'EVENT' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n try:\n with open_local('events', 'a') as f:\n for line in lines:\n if line != 'EVENT':\n f.write(line + '\\n')\n mail.read()\n except:\n pass\n elif lines[0\n ] == 'CARD' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n with open('/home/pi/cards/' + lines[1], 'w') as f:\n f.write('\\n'.join(lines[2:]))\n mail.read()\n elif 'POINTS' in lines[0].upper(\n ) and 'belgin.seymenoglu.10@ucl.ac.uk' in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length <= len(lines[2]):\n try:\n if lines[2][:length] != '-':\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split('=')[0], points_to_give)\n mail.read()\n else:\n newletter = ''\n for line in lines:\n if line != '':\n while len(line) > 79:\n newletter += line[:79] + '\\n'\n line = line[79:]\n newletter += line + '\\n'\n self.letters = (newletter + '\\n' + 'from ' + mail.fr +\n '\\n\\n' + self.letters)\n mail.read()\n self.letters = self.letters.split('\\n')\n if len(self.letters) > 1000:\n self.letters = self.letters[:1000]\n with open_local('emails', 'w') as f:\n f.write('\\n'.join(self.letters))\n else:\n self.letters = self.letters.split('\\n')\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LetterPage(Page):\n\n def __init__(self, page_num, n):\n super(LetterPage, self).__init__(page_num)\n self.title = 'Letters'\n self.in_index = False\n self.n = n\n self.tagline = (\n 'Email klbscroggsbot@gmail.com and your letter will appear here')\n self.letters = ''\n\n def background(self):\n self.letters = f_read('emails')\n if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(\n ):\n import gmail\n details = f_readlines('gmail')\n g = gmail.login(details[0], details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = ''.join(mail.body.split('\\r')).split('\\n')\n if lines[0\n ] == 'EVENT' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n try:\n with open_local('events', 'a') as f:\n for line in lines:\n if line != 'EVENT':\n f.write(line + '\\n')\n mail.read()\n except:\n pass\n elif lines[0\n ] == 'CARD' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n with open('/home/pi/cards/' + lines[1], 'w') as f:\n f.write('\\n'.join(lines[2:]))\n mail.read()\n elif 'POINTS' in lines[0].upper(\n ) and 'belgin.seymenoglu.10@ucl.ac.uk' in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length <= len(lines[2]):\n try:\n if lines[2][:length] != '-':\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split('=')[0], points_to_give)\n mail.read()\n else:\n newletter = ''\n for line in lines:\n if line != '':\n while len(line) > 79:\n newletter += line[:79] + '\\n'\n line = line[79:]\n newletter += line + '\\n'\n self.letters = (newletter + '\\n' + 'from ' + mail.fr +\n '\\n\\n' + self.letters)\n mail.read()\n self.letters = self.letters.split('\\n')\n if len(self.letters) > 1000:\n self.letters = self.letters[:1000]\n with open_local('emails', 'w') as f:\n f.write('\\n'.join(self.letters))\n else:\n self.letters = self.letters.split('\\n')\n\n def generate_content(self):\n letters = self.letters[20 * (self.n - 1):20 * self.n]\n letters = '\\n'.join(letters)\n try:\n letters = unicode(letters, 'latin1')\n except:\n letters = str(letters)\n self.add_title('Have your say', font='size4')\n a = str(self.n) + '/21'\n self.move_cursor(x=90 - len(a))\n self.add_text(a, fg='BLUE', bg='YELLOW')\n self.move_cursor(x=0)\n self.start_random_bg_color()\n for line in letters.split('\\n'):\n line = line.rstrip('\\n')\n if line == '':\n self.end_bg_color()\n self.start_random_bg_color()\n self.add_text(line, fg='BLACK')\n self.add_newline()\n self.end_bg_color()\n if self.n == 21:\n self.add_text('~ END OF LETTERS ~')\n else:\n self.add_text('The letters continue on page ' + str(200 + self.n))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LetterPage(Page):\n\n def __init__(self, page_num, n):\n super(LetterPage, self).__init__(page_num)\n self.title = 'Letters'\n self.in_index = False\n self.n = n\n self.tagline = (\n 'Email klbscroggsbot@gmail.com and your letter will appear here')\n self.letters = ''\n\n def background(self):\n self.letters = f_read('emails')\n if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(\n ):\n import gmail\n details = f_readlines('gmail')\n g = gmail.login(details[0], details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = ''.join(mail.body.split('\\r')).split('\\n')\n if lines[0\n ] == 'EVENT' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n try:\n with open_local('events', 'a') as f:\n for line in lines:\n if line != 'EVENT':\n f.write(line + '\\n')\n mail.read()\n except:\n pass\n elif lines[0\n ] == 'CARD' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n with open('/home/pi/cards/' + lines[1], 'w') as f:\n f.write('\\n'.join(lines[2:]))\n mail.read()\n elif 'POINTS' in lines[0].upper(\n ) and 'belgin.seymenoglu.10@ucl.ac.uk' in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length <= len(lines[2]):\n try:\n if lines[2][:length] != '-':\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split('=')[0], points_to_give)\n mail.read()\n else:\n newletter = ''\n for line in lines:\n if line != '':\n while len(line) > 79:\n newletter += line[:79] + '\\n'\n line = line[79:]\n newletter += line + '\\n'\n self.letters = (newletter + '\\n' + 'from ' + mail.fr +\n '\\n\\n' + self.letters)\n mail.read()\n self.letters = self.letters.split('\\n')\n if len(self.letters) > 1000:\n self.letters = self.letters[:1000]\n with open_local('emails', 'w') as f:\n f.write('\\n'.join(self.letters))\n else:\n self.letters = self.letters.split('\\n')\n\n def generate_content(self):\n letters = self.letters[20 * (self.n - 1):20 * self.n]\n letters = '\\n'.join(letters)\n try:\n letters = unicode(letters, 'latin1')\n except:\n letters = str(letters)\n self.add_title('Have your say', font='size4')\n a = str(self.n) + '/21'\n self.move_cursor(x=90 - len(a))\n self.add_text(a, fg='BLUE', bg='YELLOW')\n self.move_cursor(x=0)\n self.start_random_bg_color()\n for line in letters.split('\\n'):\n line = line.rstrip('\\n')\n if line == '':\n self.end_bg_color()\n self.start_random_bg_color()\n self.add_text(line, fg='BLACK')\n self.add_newline()\n self.end_bg_color()\n if self.n == 21:\n self.add_text('~ END OF LETTERS ~')\n else:\n self.add_text('The letters continue on page ' + str(200 + self.n))\n\n\nletters_page1 = LetterPage('200', 1)\nletters_page1.in_index = True\nletters_page1.index_num = '200-220'\nletters_page2 = LetterPage('201', 2)\nletters_page3 = LetterPage('202', 3)\nletters_page4 = LetterPage('203', 4)\nletters_page5 = LetterPage('204', 5)\nletters_page6 = LetterPage('205', 6)\nletters_page7 = LetterPage('206', 7)\nletters_page8 = LetterPage('207', 8)\nletters_page9 = LetterPage('208', 9)\nletters_page10 = LetterPage('209', 10)\nletters_page11 = LetterPage('210', 11)\nletters_page12 = LetterPage('211', 12)\nletters_page13 = LetterPage('212', 13)\nletters_page14 = LetterPage('213', 14)\nletters_page15 = LetterPage('214', 15)\nletters_page16 = LetterPage('215', 16)\nletters_page17 = LetterPage('216', 17)\nletters_page18 = LetterPage('217', 18)\nletters_page19 = LetterPage('218', 19)\nletters_page20 = LetterPage('219', 20)\nletters_page21 = LetterPage('220', 21)\n",
"step-4": "import os\nimport json\nfrom page import Page\nfrom random import choice\nfrom os.path import join, expanduser\nfrom file_handler import f_read, f_readlines, open_local\nimport config\n\n\nclass LetterPage(Page):\n\n def __init__(self, page_num, n):\n super(LetterPage, self).__init__(page_num)\n self.title = 'Letters'\n self.in_index = False\n self.n = n\n self.tagline = (\n 'Email klbscroggsbot@gmail.com and your letter will appear here')\n self.letters = ''\n\n def background(self):\n self.letters = f_read('emails')\n if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(\n ):\n import gmail\n details = f_readlines('gmail')\n g = gmail.login(details[0], details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = ''.join(mail.body.split('\\r')).split('\\n')\n if lines[0\n ] == 'EVENT' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n try:\n with open_local('events', 'a') as f:\n for line in lines:\n if line != 'EVENT':\n f.write(line + '\\n')\n mail.read()\n except:\n pass\n elif lines[0\n ] == 'CARD' and 'matthew.scroggs.14@ucl.ac.uk' in mail.fr:\n with open('/home/pi/cards/' + lines[1], 'w') as f:\n f.write('\\n'.join(lines[2:]))\n mail.read()\n elif 'POINTS' in lines[0].upper(\n ) and 'belgin.seymenoglu.10@ucl.ac.uk' in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length <= len(lines[2]):\n try:\n if lines[2][:length] != '-':\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split('=')[0], points_to_give)\n mail.read()\n else:\n newletter = ''\n for line in lines:\n if line != '':\n while len(line) > 79:\n newletter += line[:79] + '\\n'\n line = line[79:]\n newletter += line + '\\n'\n self.letters = (newletter + '\\n' + 'from ' + mail.fr +\n '\\n\\n' + self.letters)\n mail.read()\n self.letters = self.letters.split('\\n')\n if len(self.letters) > 1000:\n self.letters = self.letters[:1000]\n with open_local('emails', 'w') as f:\n f.write('\\n'.join(self.letters))\n else:\n self.letters = self.letters.split('\\n')\n\n def generate_content(self):\n letters = self.letters[20 * (self.n - 1):20 * self.n]\n letters = '\\n'.join(letters)\n try:\n letters = unicode(letters, 'latin1')\n except:\n letters = str(letters)\n self.add_title('Have your say', font='size4')\n a = str(self.n) + '/21'\n self.move_cursor(x=90 - len(a))\n self.add_text(a, fg='BLUE', bg='YELLOW')\n self.move_cursor(x=0)\n self.start_random_bg_color()\n for line in letters.split('\\n'):\n line = line.rstrip('\\n')\n if line == '':\n self.end_bg_color()\n self.start_random_bg_color()\n self.add_text(line, fg='BLACK')\n self.add_newline()\n self.end_bg_color()\n if self.n == 21:\n self.add_text('~ END OF LETTERS ~')\n else:\n self.add_text('The letters continue on page ' + str(200 + self.n))\n\n\nletters_page1 = LetterPage('200', 1)\nletters_page1.in_index = True\nletters_page1.index_num = '200-220'\nletters_page2 = LetterPage('201', 2)\nletters_page3 = LetterPage('202', 3)\nletters_page4 = LetterPage('203', 4)\nletters_page5 = LetterPage('204', 5)\nletters_page6 = LetterPage('205', 6)\nletters_page7 = LetterPage('206', 7)\nletters_page8 = LetterPage('207', 8)\nletters_page9 = LetterPage('208', 9)\nletters_page10 = LetterPage('209', 10)\nletters_page11 = LetterPage('210', 11)\nletters_page12 = LetterPage('211', 12)\nletters_page13 = LetterPage('212', 13)\nletters_page14 = LetterPage('213', 14)\nletters_page15 = LetterPage('214', 15)\nletters_page16 = LetterPage('215', 16)\nletters_page17 = LetterPage('216', 17)\nletters_page18 = LetterPage('217', 18)\nletters_page19 = LetterPage('218', 19)\nletters_page20 = LetterPage('219', 20)\nletters_page21 = LetterPage('220', 21)\n",
"step-5": "import os\nimport json\nfrom page import Page\nfrom random import choice\nfrom os.path import join, expanduser\nfrom file_handler import f_read, f_readlines, open_local\nimport config\n\nclass LetterPage(Page):\n def __init__(self, page_num,n):\n super(LetterPage, self).__init__(page_num)\n self.title = \"Letters\"\n self.in_index = False\n self.n = n\n self.tagline = \"Email klbscroggsbot@gmail.com and your letter will appear here\"\n self.letters = \"\"\n\n def background(self):\n self.letters = f_read(\"emails\")\n if config.NAME == \"KLBFAX\" and self.n==1 and config.has_gmail_login():\n import gmail\n details = f_readlines(\"gmail\")\n\n g = gmail.login(details[0],details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = \"\".join(mail.body.split(\"\\r\")).split(\"\\n\")\n if lines[0] == \"EVENT\" and \"matthew.scroggs.14@ucl.ac.uk\" in mail.fr:\n try:\n with open_local('events','a') as f:\n for line in lines:\n if line!=\"EVENT\":\n f.write(line+\"\\n\")\n mail.read()\n except:\n pass\n elif lines[0] == \"CARD\" and \"matthew.scroggs.14@ucl.ac.uk\" in mail.fr:\n with open('/home/pi/cards/'+lines[1],\"w\") as f:\n f.write(\"\\n\".join(lines[2:]))\n mail.read()\n elif \"POINTS\" in lines[0].upper() and \"belgin.seymenoglu.10@ucl.ac.uk\" in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length<=len(lines[2]):\n try:\n if lines[2][:length]!=\"-\":\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split(\"=\")[0],points_to_give)\n mail.read()\n \n else:\n newletter = \"\"\n for line in lines:\n if line!=\"\":\n while len(line)>79:\n newletter += line[:79]+\"\\n\"\n line=line[79:]\n newletter+=line+\"\\n\"\n \n self.letters=newletter+\"\\n\"+\"from \"+mail.fr+\"\\n\\n\"+self.letters\n mail.read()\n self.letters = self.letters.split(\"\\n\")\n if len(self.letters)>1000:\n self.letters = self.letters[:1000]\n with open_local(\"emails\",\"w\") as f:\n f.write(\"\\n\".join(self.letters))\n else:\n self.letters = self.letters.split(\"\\n\")\n\n\n def generate_content(self):\n letters = self.letters[20*(self.n-1):20*self.n]\n letters = \"\\n\".join(letters)\n try:\n letters = unicode(letters,'latin1')\n except:\n letters = str(letters)\n\n\n self.add_title(\"Have your say\",font=\"size4\")\n a = str(self.n)+\"/21\"\n self.move_cursor(x=90-len(a))\n self.add_text(a, fg=\"BLUE\", bg=\"YELLOW\")\n self.move_cursor(x=0)\n self.start_random_bg_color()\n for line in letters.split(\"\\n\"):\n line = line.rstrip(\"\\n\")\n if line == \"\":\n self.end_bg_color()\n self.start_random_bg_color()\n self.add_text(line,fg=\"BLACK\")\n self.add_newline()\n self.end_bg_color()\n if self.n==21:\n self.add_text(\"~ END OF LETTERS ~\")\n else:\n self.add_text(\"The letters continue on page \"+str(200+self.n))\n\nletters_page1 = LetterPage(\"200\",1)\nletters_page1.in_index = True\nletters_page1.index_num = \"200-220\"\nletters_page2 = LetterPage(\"201\",2)\nletters_page3 = LetterPage(\"202\",3)\nletters_page4 = LetterPage(\"203\",4)\nletters_page5 = LetterPage(\"204\",5)\nletters_page6 = LetterPage(\"205\",6)\nletters_page7 = LetterPage(\"206\",7)\nletters_page8 = LetterPage(\"207\",8)\nletters_page9 = LetterPage(\"208\",9)\nletters_page10 = LetterPage(\"209\",10)\nletters_page11 = LetterPage(\"210\",11)\nletters_page12 = LetterPage(\"211\",12)\nletters_page13 = LetterPage(\"212\",13)\nletters_page14 = LetterPage(\"213\",14)\nletters_page15 = LetterPage(\"214\",15)\nletters_page16 = LetterPage(\"215\",16)\nletters_page17 = LetterPage(\"216\",17)\nletters_page18 = LetterPage(\"217\",18)\nletters_page19 = LetterPage(\"218\",19)\nletters_page20 = LetterPage(\"219\",20)\nletters_page21 = LetterPage(\"220\",21)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def classify(img, c_model):
""" classifies images in a given folder using the 'model'"""
im_size = 128
img = cv2.resize(img, (im_size, im_size))
img = img.astype('float') / 255.0
img = np.expand_dims(img, axis=0)
with class_graph.as_default():
predictions = c_model.predict(img)[0]
return predictions
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
global class_graph
def classify(img, c_model):
""" classifies images in a given folder using the 'model'"""
im_size = 128
img = cv2.resize(img, (im_size, im_size))
img = img.astype('float') / 255.0
img = np.expand_dims(img, axis=0)
with class_graph.as_default():
predictions = c_model.predict(img)[0]
return predictions
if __name__ == '__main__':
im_name = 'data/demo/images(1).jpg'
model_path = 'data/credit-card.model'
class_model = load_model(model_path)
class_graph = tf.get_default_graph()
crop_img = cv2.imread(im_name)
predictions = classify(crop_img, class_model)
print(predictions)
<|reserved_special_token_1|>
import pytesseract
from PIL import Image
import tensorflow as tf
from keras.models import load_model
from tensorflow import Graph
import os
import json
import cv2
import numpy as np
global class_graph
def classify(img, c_model):
""" classifies images in a given folder using the 'model'"""
im_size = 128
img = cv2.resize(img, (im_size, im_size))
img = img.astype('float') / 255.0
img = np.expand_dims(img, axis=0)
with class_graph.as_default():
predictions = c_model.predict(img)[0]
return predictions
if __name__ == '__main__':
im_name = 'data/demo/images(1).jpg'
model_path = 'data/credit-card.model'
class_model = load_model(model_path)
class_graph = tf.get_default_graph()
crop_img = cv2.imread(im_name)
predictions = classify(crop_img, class_model)
print(predictions)
<|reserved_special_token_1|>
import pytesseract
from PIL import Image
import tensorflow as tf
from keras.models import load_model
from tensorflow import Graph
import os
import json
import cv2
import numpy as np
global class_graph
def classify(img, c_model):
#global class_graph
""" classifies images in a given folder using the 'model'"""
#img = load_img(im_path,target_size=(input_height, input_width))
#img = img_to_array(img)
im_size = 128
# resize
img = cv2.resize(img, (im_size,im_size))
img = img.astype("float") / 255.0
img = np.expand_dims(img, axis=0)
with class_graph.as_default():
predictions = c_model.predict(img)[0]
return predictions
if __name__ == '__main__':
im_name = "data/demo/images(1).jpg"
# load model
model_path = "data/credit-card.model"
class_model = load_model(model_path)
class_graph=tf.get_default_graph()
crop_img = cv2.imread(im_name)
predictions = classify(crop_img, class_model)
print(predictions)
|
flexible
|
{
"blob_id": "c7d51f6448400af5630bdc0c29493320af88288e",
"index": 7424,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef classify(img, c_model):\n \"\"\" classifies images in a given folder using the 'model'\"\"\"\n im_size = 128\n img = cv2.resize(img, (im_size, im_size))\n img = img.astype('float') / 255.0\n img = np.expand_dims(img, axis=0)\n with class_graph.as_default():\n predictions = c_model.predict(img)[0]\n return predictions\n\n\n<mask token>\n",
"step-3": "<mask token>\nglobal class_graph\n\n\ndef classify(img, c_model):\n \"\"\" classifies images in a given folder using the 'model'\"\"\"\n im_size = 128\n img = cv2.resize(img, (im_size, im_size))\n img = img.astype('float') / 255.0\n img = np.expand_dims(img, axis=0)\n with class_graph.as_default():\n predictions = c_model.predict(img)[0]\n return predictions\n\n\nif __name__ == '__main__':\n im_name = 'data/demo/images(1).jpg'\n model_path = 'data/credit-card.model'\n class_model = load_model(model_path)\n class_graph = tf.get_default_graph()\n crop_img = cv2.imread(im_name)\n predictions = classify(crop_img, class_model)\n print(predictions)\n",
"step-4": "import pytesseract\nfrom PIL import Image\nimport tensorflow as tf\nfrom keras.models import load_model\nfrom tensorflow import Graph\nimport os\nimport json\nimport cv2\nimport numpy as np\nglobal class_graph\n\n\ndef classify(img, c_model):\n \"\"\" classifies images in a given folder using the 'model'\"\"\"\n im_size = 128\n img = cv2.resize(img, (im_size, im_size))\n img = img.astype('float') / 255.0\n img = np.expand_dims(img, axis=0)\n with class_graph.as_default():\n predictions = c_model.predict(img)[0]\n return predictions\n\n\nif __name__ == '__main__':\n im_name = 'data/demo/images(1).jpg'\n model_path = 'data/credit-card.model'\n class_model = load_model(model_path)\n class_graph = tf.get_default_graph()\n crop_img = cv2.imread(im_name)\n predictions = classify(crop_img, class_model)\n print(predictions)\n",
"step-5": "import pytesseract\nfrom PIL import Image\nimport tensorflow as tf\n\nfrom keras.models import load_model\nfrom tensorflow import Graph\n\nimport os\nimport json\nimport cv2\nimport numpy as np\n\nglobal class_graph\n\n\n\n\ndef classify(img, c_model):\n #global class_graph\n \"\"\" classifies images in a given folder using the 'model'\"\"\"\n\n #img = load_img(im_path,target_size=(input_height, input_width))\n #img = img_to_array(img)\n im_size = 128\n # resize \n\n img = cv2.resize(img, (im_size,im_size))\n\n img = img.astype(\"float\") / 255.0\n img = np.expand_dims(img, axis=0)\n with class_graph.as_default():\n predictions = c_model.predict(img)[0]\n\n return predictions\n\nif __name__ == '__main__':\n im_name = \"data/demo/images(1).jpg\"\n # load model\n model_path = \"data/credit-card.model\"\n class_model = load_model(model_path)\n\n class_graph=tf.get_default_graph()\n\n\n crop_img = cv2.imread(im_name)\n\n predictions = classify(crop_img, class_model)\n print(predictions)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def merge_the_tools(string, k):
if len(string) % k != 0:
exit()
else:
L = []
for i in range(0, len(string), k):
L.append(''.join(list(dict.fromkeys(string[i:i + k]))))
print('\n'.join(L))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def merge_the_tools(string, k):
if len(string) % k != 0:
exit()
else:
L = []
for i in range(0, len(string), k):
L.append(''.join(list(dict.fromkeys(string[i:i + k]))))
print('\n'.join(L))
if __name__ == '__main__':
string, k = input(), int(input())
merge_the_tools(string, k)
<|reserved_special_token_1|>
def merge_the_tools(string, k):
if(len(string)%k != 0):
exit()
else:
L = []
for i in range(0, len(string), k):
L.append(''.join(list(dict.fromkeys(string[i:i+k]))))
print('\n'.join(L))
if __name__ == '__main__':
string, k = input(), int(input())
merge_the_tools(string, k)
# S, N = input(), int(input())
# for part in zip(*[iter(S)] * N):
# asterisk unpacks a list. Example: print(*[1,2,3,4]) = print(1,2,3,4)
# [iter(s)]*n makes a list of n times the same iterator for s.
# Example: [[iter(s)]*3] = ([iter(s), iter(s), iter(s)])
# if s = 'abcdefghi', then zip(*[iter(s)]*3) will have the following effect:
# a,b,c,d,e,f,g,h,i a,b,c,d,e,f,g,h,i a,b,c,d,e,f,g,h,i
# ^ ^ ^
# ^ ^ ^
# ^ ^ ^
# d = dict()
# print(''.join([ d.setdefault(c, c) for c in part if c not in d ]))
|
flexible
|
{
"blob_id": "0004e90622f8b13ec7ce0c1f49e8c8df7ea07269",
"index": 7098,
"step-1": "<mask token>\n",
"step-2": "def merge_the_tools(string, k):\n if len(string) % k != 0:\n exit()\n else:\n L = []\n for i in range(0, len(string), k):\n L.append(''.join(list(dict.fromkeys(string[i:i + k]))))\n print('\\n'.join(L))\n\n\n<mask token>\n",
"step-3": "def merge_the_tools(string, k):\n if len(string) % k != 0:\n exit()\n else:\n L = []\n for i in range(0, len(string), k):\n L.append(''.join(list(dict.fromkeys(string[i:i + k]))))\n print('\\n'.join(L))\n\n\nif __name__ == '__main__':\n string, k = input(), int(input())\n merge_the_tools(string, k)\n",
"step-4": "def merge_the_tools(string, k):\n if(len(string)%k != 0):\n exit()\n else:\n L = []\n for i in range(0, len(string), k):\n L.append(''.join(list(dict.fromkeys(string[i:i+k]))))\n print('\\n'.join(L))\n\nif __name__ == '__main__':\n\n string, k = input(), int(input())\n merge_the_tools(string, k)\n\n\n# S, N = input(), int(input())\n# for part in zip(*[iter(S)] * N):\n# asterisk unpacks a list. Example: print(*[1,2,3,4]) = print(1,2,3,4)\n# [iter(s)]*n makes a list of n times the same iterator for s.\n# Example: [[iter(s)]*3] = ([iter(s), iter(s), iter(s)])\n# if s = 'abcdefghi', then zip(*[iter(s)]*3) will have the following effect:\n# a,b,c,d,e,f,g,h,i a,b,c,d,e,f,g,h,i a,b,c,d,e,f,g,h,i\n# ^ ^ ^\n# ^ ^ ^\n# ^ ^ ^\n# d = dict()\n# print(''.join([ d.setdefault(c, c) for c in part if c not in d ]))\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def createNewDataFrame():
columns = ['document_id', 'content', 'cat', 'subcat']
df_ = pd.DataFrame(columns=columns)
return df_
def getcategories(foldername):
cats = foldername.split('_')
print('The cats are ', cats, len(cats))
cat = ''
sub = ''
if len(cats) == 1:
cat = cats[0]
sub = ''
if len(cats) == 2:
cat = cats[0]
sub = cats[1]
if len(cats) == 3:
cat = cats[0] + '/' + cats[1]
sub = cats[2]
if len(cats) == 4:
cat = cats[0] + '/' + cats[1]
sub = cats[2] + '/' + cats[3]
return cat, sub
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def createNewDataFrame():
columns = ['document_id', 'content', 'cat', 'subcat']
df_ = pd.DataFrame(columns=columns)
return df_
def getcategories(foldername):
cats = foldername.split('_')
print('The cats are ', cats, len(cats))
cat = ''
sub = ''
if len(cats) == 1:
cat = cats[0]
sub = ''
if len(cats) == 2:
cat = cats[0]
sub = cats[1]
if len(cats) == 3:
cat = cats[0] + '/' + cats[1]
sub = cats[2]
if len(cats) == 4:
cat = cats[0] + '/' + cats[1]
sub = cats[2] + '/' + cats[3]
return cat, sub
global df
<|reserved_special_token_0|>
for item in paths:
pdffolders = glob.glob(item + '/*.pdf_work')
cat, subcat = getcategories(item.split('/')[-2])
for eachpdffolder in pdffolders:
doc_id = eachpdffolder.split('/')[-1].split('.')[0]
textfile = glob.glob(eachpdffolder + 'page_*[^_6].txt')
if len(textfile) < 2:
with open(eachpdffolder + '/page_0001.txt', 'r') as myfile0:
content = myfile0.read()
else:
with open(eachpdffolder + '/page_0001.txt', 'r') as myfile:
content = myfile.read()
with open(eachpdffolder + '/page_0002.txt', 'r') as myfile2:
content = content + myfile2.read()
df = df.append([{'document_id': doc_id, 'content': content, 'cat':
cat, 'subcat': subcat}], ignore_index=True)
df.to_csv('../corpus/Full_corpus_fromClientFolder.csv')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def createNewDataFrame():
columns = ['document_id', 'content', 'cat', 'subcat']
df_ = pd.DataFrame(columns=columns)
return df_
def getcategories(foldername):
cats = foldername.split('_')
print('The cats are ', cats, len(cats))
cat = ''
sub = ''
if len(cats) == 1:
cat = cats[0]
sub = ''
if len(cats) == 2:
cat = cats[0]
sub = cats[1]
if len(cats) == 3:
cat = cats[0] + '/' + cats[1]
sub = cats[2]
if len(cats) == 4:
cat = cats[0] + '/' + cats[1]
sub = cats[2] + '/' + cats[3]
return cat, sub
global df
df = createNewDataFrame()
clientFolder = '/home/medilenz/OCR_Process/Firm_logic_july_03/'
paths = glob.glob(clientFolder + '*/')
for item in paths:
pdffolders = glob.glob(item + '/*.pdf_work')
cat, subcat = getcategories(item.split('/')[-2])
for eachpdffolder in pdffolders:
doc_id = eachpdffolder.split('/')[-1].split('.')[0]
textfile = glob.glob(eachpdffolder + 'page_*[^_6].txt')
if len(textfile) < 2:
with open(eachpdffolder + '/page_0001.txt', 'r') as myfile0:
content = myfile0.read()
else:
with open(eachpdffolder + '/page_0001.txt', 'r') as myfile:
content = myfile.read()
with open(eachpdffolder + '/page_0002.txt', 'r') as myfile2:
content = content + myfile2.read()
df = df.append([{'document_id': doc_id, 'content': content, 'cat':
cat, 'subcat': subcat}], ignore_index=True)
df.to_csv('../corpus/Full_corpus_fromClientFolder.csv')
<|reserved_special_token_1|>
import pandas as pd
import os, re, sys
import numpy as np
import glob as glob
def createNewDataFrame():
columns = ['document_id', 'content', 'cat', 'subcat']
df_ = pd.DataFrame(columns=columns)
return df_
def getcategories(foldername):
cats = foldername.split('_')
print('The cats are ', cats, len(cats))
cat = ''
sub = ''
if len(cats) == 1:
cat = cats[0]
sub = ''
if len(cats) == 2:
cat = cats[0]
sub = cats[1]
if len(cats) == 3:
cat = cats[0] + '/' + cats[1]
sub = cats[2]
if len(cats) == 4:
cat = cats[0] + '/' + cats[1]
sub = cats[2] + '/' + cats[3]
return cat, sub
global df
df = createNewDataFrame()
clientFolder = '/home/medilenz/OCR_Process/Firm_logic_july_03/'
paths = glob.glob(clientFolder + '*/')
for item in paths:
pdffolders = glob.glob(item + '/*.pdf_work')
cat, subcat = getcategories(item.split('/')[-2])
for eachpdffolder in pdffolders:
doc_id = eachpdffolder.split('/')[-1].split('.')[0]
textfile = glob.glob(eachpdffolder + 'page_*[^_6].txt')
if len(textfile) < 2:
with open(eachpdffolder + '/page_0001.txt', 'r') as myfile0:
content = myfile0.read()
else:
with open(eachpdffolder + '/page_0001.txt', 'r') as myfile:
content = myfile.read()
with open(eachpdffolder + '/page_0002.txt', 'r') as myfile2:
content = content + myfile2.read()
df = df.append([{'document_id': doc_id, 'content': content, 'cat':
cat, 'subcat': subcat}], ignore_index=True)
df.to_csv('../corpus/Full_corpus_fromClientFolder.csv')
<|reserved_special_token_1|>
# coding: utf-8
# In[1]:
import pandas as pd
import os,re,sys
import numpy as np
import glob as glob
# In[2]:
def createNewDataFrame():
columns = ['document_id','content','cat','subcat']
df_ = pd.DataFrame(columns=columns)
return(df_)
# In[3]:
def getcategories(foldername):
cats = foldername.split('_')
print("The cats are ", cats,len(cats))
cat =''
sub = ''
if (len(cats) == 1):
cat = cats[0]
sub = ''
if (len(cats) == 2):
cat = cats[0]
sub = cats[1]
if(len(cats) == 3):
cat = cats[0]+'/'+cats[1]
sub = cats[2]
if(len(cats) == 4):
cat = cats[0]+'/'+cats[1]
sub = cats[2]+'/'+cats[3]
return(cat,sub)
# In[4]:
global df
df = createNewDataFrame()
clientFolder='/home/medilenz/OCR_Process/Firm_logic_july_03/'
paths = glob.glob(clientFolder+'*/')
for item in paths:
pdffolders = glob.glob(item+'/*.pdf_work')
#print("THe item is ", item)
cat,subcat = getcategories(item.split('/')[-2])
for eachpdffolder in pdffolders:
doc_id=eachpdffolder.split('/')[-1].split('.')[0]
textfile = glob.glob(eachpdffolder+'page_*[^_6].txt')
if(len(textfile) < 2):
with open(eachpdffolder+'/page_0001.txt', 'r') as myfile0:
content = myfile0.read()
else :
with open(eachpdffolder+'/page_0001.txt', 'r') as myfile:
content = myfile.read()
with open(eachpdffolder+'/page_0002.txt', 'r') as myfile2:
content = content + myfile2.read()
df = df.append([{'document_id':doc_id, 'content':content,'cat':cat, 'subcat': subcat}],ignore_index=True)
df.to_csv("../corpus/Full_corpus_fromClientFolder.csv")
|
flexible
|
{
"blob_id": "1aa01845ab98005b1fee33b4fc153bb029e450e0",
"index": 2061,
"step-1": "<mask token>\n\n\ndef createNewDataFrame():\n columns = ['document_id', 'content', 'cat', 'subcat']\n df_ = pd.DataFrame(columns=columns)\n return df_\n\n\ndef getcategories(foldername):\n cats = foldername.split('_')\n print('The cats are ', cats, len(cats))\n cat = ''\n sub = ''\n if len(cats) == 1:\n cat = cats[0]\n sub = ''\n if len(cats) == 2:\n cat = cats[0]\n sub = cats[1]\n if len(cats) == 3:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2]\n if len(cats) == 4:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2] + '/' + cats[3]\n return cat, sub\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef createNewDataFrame():\n columns = ['document_id', 'content', 'cat', 'subcat']\n df_ = pd.DataFrame(columns=columns)\n return df_\n\n\ndef getcategories(foldername):\n cats = foldername.split('_')\n print('The cats are ', cats, len(cats))\n cat = ''\n sub = ''\n if len(cats) == 1:\n cat = cats[0]\n sub = ''\n if len(cats) == 2:\n cat = cats[0]\n sub = cats[1]\n if len(cats) == 3:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2]\n if len(cats) == 4:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2] + '/' + cats[3]\n return cat, sub\n\n\nglobal df\n<mask token>\nfor item in paths:\n pdffolders = glob.glob(item + '/*.pdf_work')\n cat, subcat = getcategories(item.split('/')[-2])\n for eachpdffolder in pdffolders:\n doc_id = eachpdffolder.split('/')[-1].split('.')[0]\n textfile = glob.glob(eachpdffolder + 'page_*[^_6].txt')\n if len(textfile) < 2:\n with open(eachpdffolder + '/page_0001.txt', 'r') as myfile0:\n content = myfile0.read()\n else:\n with open(eachpdffolder + '/page_0001.txt', 'r') as myfile:\n content = myfile.read()\n with open(eachpdffolder + '/page_0002.txt', 'r') as myfile2:\n content = content + myfile2.read()\n df = df.append([{'document_id': doc_id, 'content': content, 'cat':\n cat, 'subcat': subcat}], ignore_index=True)\ndf.to_csv('../corpus/Full_corpus_fromClientFolder.csv')\n",
"step-3": "<mask token>\n\n\ndef createNewDataFrame():\n columns = ['document_id', 'content', 'cat', 'subcat']\n df_ = pd.DataFrame(columns=columns)\n return df_\n\n\ndef getcategories(foldername):\n cats = foldername.split('_')\n print('The cats are ', cats, len(cats))\n cat = ''\n sub = ''\n if len(cats) == 1:\n cat = cats[0]\n sub = ''\n if len(cats) == 2:\n cat = cats[0]\n sub = cats[1]\n if len(cats) == 3:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2]\n if len(cats) == 4:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2] + '/' + cats[3]\n return cat, sub\n\n\nglobal df\ndf = createNewDataFrame()\nclientFolder = '/home/medilenz/OCR_Process/Firm_logic_july_03/'\npaths = glob.glob(clientFolder + '*/')\nfor item in paths:\n pdffolders = glob.glob(item + '/*.pdf_work')\n cat, subcat = getcategories(item.split('/')[-2])\n for eachpdffolder in pdffolders:\n doc_id = eachpdffolder.split('/')[-1].split('.')[0]\n textfile = glob.glob(eachpdffolder + 'page_*[^_6].txt')\n if len(textfile) < 2:\n with open(eachpdffolder + '/page_0001.txt', 'r') as myfile0:\n content = myfile0.read()\n else:\n with open(eachpdffolder + '/page_0001.txt', 'r') as myfile:\n content = myfile.read()\n with open(eachpdffolder + '/page_0002.txt', 'r') as myfile2:\n content = content + myfile2.read()\n df = df.append([{'document_id': doc_id, 'content': content, 'cat':\n cat, 'subcat': subcat}], ignore_index=True)\ndf.to_csv('../corpus/Full_corpus_fromClientFolder.csv')\n",
"step-4": "import pandas as pd\nimport os, re, sys\nimport numpy as np\nimport glob as glob\n\n\ndef createNewDataFrame():\n columns = ['document_id', 'content', 'cat', 'subcat']\n df_ = pd.DataFrame(columns=columns)\n return df_\n\n\ndef getcategories(foldername):\n cats = foldername.split('_')\n print('The cats are ', cats, len(cats))\n cat = ''\n sub = ''\n if len(cats) == 1:\n cat = cats[0]\n sub = ''\n if len(cats) == 2:\n cat = cats[0]\n sub = cats[1]\n if len(cats) == 3:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2]\n if len(cats) == 4:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2] + '/' + cats[3]\n return cat, sub\n\n\nglobal df\ndf = createNewDataFrame()\nclientFolder = '/home/medilenz/OCR_Process/Firm_logic_july_03/'\npaths = glob.glob(clientFolder + '*/')\nfor item in paths:\n pdffolders = glob.glob(item + '/*.pdf_work')\n cat, subcat = getcategories(item.split('/')[-2])\n for eachpdffolder in pdffolders:\n doc_id = eachpdffolder.split('/')[-1].split('.')[0]\n textfile = glob.glob(eachpdffolder + 'page_*[^_6].txt')\n if len(textfile) < 2:\n with open(eachpdffolder + '/page_0001.txt', 'r') as myfile0:\n content = myfile0.read()\n else:\n with open(eachpdffolder + '/page_0001.txt', 'r') as myfile:\n content = myfile.read()\n with open(eachpdffolder + '/page_0002.txt', 'r') as myfile2:\n content = content + myfile2.read()\n df = df.append([{'document_id': doc_id, 'content': content, 'cat':\n cat, 'subcat': subcat}], ignore_index=True)\ndf.to_csv('../corpus/Full_corpus_fromClientFolder.csv')\n",
"step-5": "\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\nimport os,re,sys\nimport numpy as np\nimport glob as glob\n\n\n# In[2]:\n\ndef createNewDataFrame():\n \n columns = ['document_id','content','cat','subcat']\n df_ = pd.DataFrame(columns=columns)\n return(df_)\n\n\n# In[3]:\n\ndef getcategories(foldername):\n cats = foldername.split('_')\n print(\"The cats are \", cats,len(cats))\n cat =''\n sub = '' \n if (len(cats) == 1):\n cat = cats[0]\n sub = ''\n if (len(cats) == 2):\n cat = cats[0]\n sub = cats[1]\n if(len(cats) == 3):\n cat = cats[0]+'/'+cats[1]\n sub = cats[2]\n if(len(cats) == 4):\n cat = cats[0]+'/'+cats[1]\n sub = cats[2]+'/'+cats[3]\n \n return(cat,sub) \n\n\n# In[4]:\n\nglobal df\ndf = createNewDataFrame()\n\nclientFolder='/home/medilenz/OCR_Process/Firm_logic_july_03/'\n\npaths = glob.glob(clientFolder+'*/')\nfor item in paths:\n pdffolders = glob.glob(item+'/*.pdf_work') \n #print(\"THe item is \", item)\n cat,subcat = getcategories(item.split('/')[-2])\n for eachpdffolder in pdffolders:\n doc_id=eachpdffolder.split('/')[-1].split('.')[0] \n textfile = glob.glob(eachpdffolder+'page_*[^_6].txt') \n if(len(textfile) < 2):\n with open(eachpdffolder+'/page_0001.txt', 'r') as myfile0:\n content = myfile0.read() \n else :\n with open(eachpdffolder+'/page_0001.txt', 'r') as myfile:\n content = myfile.read()\n with open(eachpdffolder+'/page_0002.txt', 'r') as myfile2:\n content = content + myfile2.read()\n \n df = df.append([{'document_id':doc_id, 'content':content,'cat':cat, 'subcat': subcat}],ignore_index=True) \n\n\ndf.to_csv(\"../corpus/Full_corpus_fromClientFolder.csv\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import sys
import numpy as np
####################################################################################################
### These functions all perform QA checks on input files.
### These should catch many errors, but is not exhaustive.
####################################################################################################
####################################################################################################
def check_usage(subpuc_names,year,subpuc_usage):
if len(subpuc_names) == len(subpuc_usage[1:]):
pass
else: sys.exit('There is an issue with your subpuc_usage.csv file. Number of sub-PUC(s) is incorrect.')
year_available = 0
for i in range(len(subpuc_usage[0,1:])):
if subpuc_usage[0,1+i] == year:
year_available = 1
else: pass
if year_available == 0:
sys.exit('There is an issue with your subpuc_usage.csv file. '+str(year)+' is missing.')
else: pass
####################################################################################################
####################################################################################################
def check_usetime(subpuc_names,subpuc_usetime):
if len(subpuc_names) == len(subpuc_usetime):
pass
else: sys.exit('There is an issue with your subpuc_usetimescales.csv file. Number of sub-PUC(s) is incorrect.')
for i in range(len(subpuc_usetime)):
if subpuc_usetime[i,1] >= 0.0 and subpuc_usetime[i,1] <= 6.0:
pass
else: sys.exit('There is a bounds issue in your subpuc_usetimescales.csv files.')
####################################################################################################
####################################################################################################
def check_controls(subpuc_names,subpuc_controls):
if len(subpuc_names) == len(subpuc_controls):
pass
else: sys.exit('There is an issue with your subpuc_controls.csv file. Number of sub-PUC(s) is incorrect.')
for i in range(len(subpuc_controls)):
if subpuc_controls[i,1] >= 0.0 and subpuc_controls[i,1] <= 1.0:
pass
else: sys.exit('There is a bounds issue in your subpuc_controls.csv files.')
####################################################################################################
####################################################################################################
def check_1st_order_spec(subpuc_names,first_ord_spec):
if len(subpuc_names) == len(first_ord_spec):
pass
else: sys.exit('There is an issue with your subpuc_1st_order_speciation.csv file. Number of sub-PUC(s) is incorrect.')
for i in range(len(first_ord_spec)):
if np.sum(first_ord_spec[i,0:3]) >= 0.99 and np.sum(first_ord_spec[i,0:3]) <= 1.01:
pass
else: sys.exit('There is an issue with your subpuc_1st_order_speciation.csv file. Water + Inorganic + Organic out of bounds.')
if first_ord_spec[i,2] >= first_ord_spec[i,3]:
pass
else: sys.exit('There is an issue with your subpuc_1st_order_speciation.csv file. TOG > Organic.')
for j in range(len(first_ord_spec[0,:])):
if first_ord_spec[i,j] >= 0.0 and first_ord_spec[i,j] <= 1.0:
pass
else: sys.exit('There is a bounds issue in your subpuc_1st_order_speciation.csv files.')
####################################################################################################
####################################################################################################
def check_organic_spec(subpuc_names,organic_spec,chem_index):
if len(subpuc_names) == len(organic_spec[0,:]):
pass
else: sys.exit('There is an issue with your subpuc_organic_speciation.csv file. Number of sub-PUC(s) is incorrect.')
for i in range(len(organic_spec[0,:])):
if np.nansum(organic_spec[1:,i]) >= 0.99 and np.nansum(organic_spec[1:,i]) <= 1.01:
pass
else: sys.exit('There is an issue with your subpuc_organic_speciation.csv file. Total speciation out of bounds.')
if len(chem_index) == len(organic_spec[1:,0]):
pass
else: sys.exit('There is an issue with your subpuc_organic_speciation.csv file. Number of species is incorrect.')
####################################################################################################
####################################################################################################
def check_chem_assignments(chem_props_vars,chem_props_strs,chem_index):
if len(chem_index) == len(chem_props_vars) and len(chem_index) == len(chem_props_strs):
pass
else: sys.exit('There is an issue with your chemical_assignments.csv file. Number of species is incorrect.')
####################################################################################################
|
normal
|
{
"blob_id": "7413c06a990894c34ee5174d84f0e3bd20abf51f",
"index": 3294,
"step-1": "<mask token>\n\n\ndef check_controls(subpuc_names, subpuc_controls):\n if len(subpuc_names) == len(subpuc_controls):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_controls.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(subpuc_controls)):\n if subpuc_controls[i, 1] >= 0.0 and subpuc_controls[i, 1] <= 1.0:\n pass\n else:\n sys.exit(\n 'There is a bounds issue in your subpuc_controls.csv files.')\n\n\ndef check_1st_order_spec(subpuc_names, first_ord_spec):\n if len(subpuc_names) == len(first_ord_spec):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_1st_order_speciation.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(first_ord_spec)):\n if np.sum(first_ord_spec[i, 0:3]) >= 0.99 and np.sum(first_ord_spec\n [i, 0:3]) <= 1.01:\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_1st_order_speciation.csv file. Water + Inorganic + Organic out of bounds.'\n )\n if first_ord_spec[i, 2] >= first_ord_spec[i, 3]:\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_1st_order_speciation.csv file. TOG > Organic.'\n )\n for j in range(len(first_ord_spec[0, :])):\n if first_ord_spec[i, j] >= 0.0 and first_ord_spec[i, j] <= 1.0:\n pass\n else:\n sys.exit(\n 'There is a bounds issue in your subpuc_1st_order_speciation.csv files.'\n )\n\n\ndef check_organic_spec(subpuc_names, organic_spec, chem_index):\n if len(subpuc_names) == len(organic_spec[0, :]):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_organic_speciation.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(organic_spec[0, :])):\n if np.nansum(organic_spec[1:, i]) >= 0.99 and np.nansum(organic_spec\n [1:, i]) <= 1.01:\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_organic_speciation.csv file. Total speciation out of bounds.'\n )\n if len(chem_index) == len(organic_spec[1:, 0]):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_organic_speciation.csv file. Number of species is incorrect.'\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_usetime(subpuc_names, subpuc_usetime):\n if len(subpuc_names) == len(subpuc_usetime):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_usetimescales.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(subpuc_usetime)):\n if subpuc_usetime[i, 1] >= 0.0 and subpuc_usetime[i, 1] <= 6.0:\n pass\n else:\n sys.exit(\n 'There is a bounds issue in your subpuc_usetimescales.csv files.'\n )\n\n\ndef check_controls(subpuc_names, subpuc_controls):\n if len(subpuc_names) == len(subpuc_controls):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_controls.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(subpuc_controls)):\n if subpuc_controls[i, 1] >= 0.0 and subpuc_controls[i, 1] <= 1.0:\n pass\n else:\n sys.exit(\n 'There is a bounds issue in your subpuc_controls.csv files.')\n\n\ndef check_1st_order_spec(subpuc_names, first_ord_spec):\n if len(subpuc_names) == len(first_ord_spec):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_1st_order_speciation.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(first_ord_spec)):\n if np.sum(first_ord_spec[i, 0:3]) >= 0.99 and np.sum(first_ord_spec\n [i, 0:3]) <= 1.01:\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_1st_order_speciation.csv file. Water + Inorganic + Organic out of bounds.'\n )\n if first_ord_spec[i, 2] >= first_ord_spec[i, 3]:\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_1st_order_speciation.csv file. TOG > Organic.'\n )\n for j in range(len(first_ord_spec[0, :])):\n if first_ord_spec[i, j] >= 0.0 and first_ord_spec[i, j] <= 1.0:\n pass\n else:\n sys.exit(\n 'There is a bounds issue in your subpuc_1st_order_speciation.csv files.'\n )\n\n\ndef check_organic_spec(subpuc_names, organic_spec, chem_index):\n if len(subpuc_names) == len(organic_spec[0, :]):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_organic_speciation.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(organic_spec[0, :])):\n if np.nansum(organic_spec[1:, i]) >= 0.99 and np.nansum(organic_spec\n [1:, i]) <= 1.01:\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_organic_speciation.csv file. Total speciation out of bounds.'\n )\n if len(chem_index) == len(organic_spec[1:, 0]):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_organic_speciation.csv file. Number of species is incorrect.'\n )\n\n\ndef check_chem_assignments(chem_props_vars, chem_props_strs, chem_index):\n if len(chem_index) == len(chem_props_vars) and len(chem_index) == len(\n chem_props_strs):\n pass\n else:\n sys.exit(\n 'There is an issue with your chemical_assignments.csv file. Number of species is incorrect.'\n )\n",
"step-3": "<mask token>\n\n\ndef check_usage(subpuc_names, year, subpuc_usage):\n if len(subpuc_names) == len(subpuc_usage[1:]):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_usage.csv file. Number of sub-PUC(s) is incorrect.'\n )\n year_available = 0\n for i in range(len(subpuc_usage[0, 1:])):\n if subpuc_usage[0, 1 + i] == year:\n year_available = 1\n else:\n pass\n if year_available == 0:\n sys.exit('There is an issue with your subpuc_usage.csv file. ' +\n str(year) + ' is missing.')\n else:\n pass\n\n\ndef check_usetime(subpuc_names, subpuc_usetime):\n if len(subpuc_names) == len(subpuc_usetime):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_usetimescales.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(subpuc_usetime)):\n if subpuc_usetime[i, 1] >= 0.0 and subpuc_usetime[i, 1] <= 6.0:\n pass\n else:\n sys.exit(\n 'There is a bounds issue in your subpuc_usetimescales.csv files.'\n )\n\n\ndef check_controls(subpuc_names, subpuc_controls):\n if len(subpuc_names) == len(subpuc_controls):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_controls.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(subpuc_controls)):\n if subpuc_controls[i, 1] >= 0.0 and subpuc_controls[i, 1] <= 1.0:\n pass\n else:\n sys.exit(\n 'There is a bounds issue in your subpuc_controls.csv files.')\n\n\ndef check_1st_order_spec(subpuc_names, first_ord_spec):\n if len(subpuc_names) == len(first_ord_spec):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_1st_order_speciation.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(first_ord_spec)):\n if np.sum(first_ord_spec[i, 0:3]) >= 0.99 and np.sum(first_ord_spec\n [i, 0:3]) <= 1.01:\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_1st_order_speciation.csv file. Water + Inorganic + Organic out of bounds.'\n )\n if first_ord_spec[i, 2] >= first_ord_spec[i, 3]:\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_1st_order_speciation.csv file. TOG > Organic.'\n )\n for j in range(len(first_ord_spec[0, :])):\n if first_ord_spec[i, j] >= 0.0 and first_ord_spec[i, j] <= 1.0:\n pass\n else:\n sys.exit(\n 'There is a bounds issue in your subpuc_1st_order_speciation.csv files.'\n )\n\n\ndef check_organic_spec(subpuc_names, organic_spec, chem_index):\n if len(subpuc_names) == len(organic_spec[0, :]):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_organic_speciation.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(organic_spec[0, :])):\n if np.nansum(organic_spec[1:, i]) >= 0.99 and np.nansum(organic_spec\n [1:, i]) <= 1.01:\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_organic_speciation.csv file. Total speciation out of bounds.'\n )\n if len(chem_index) == len(organic_spec[1:, 0]):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_organic_speciation.csv file. Number of species is incorrect.'\n )\n\n\ndef check_chem_assignments(chem_props_vars, chem_props_strs, chem_index):\n if len(chem_index) == len(chem_props_vars) and len(chem_index) == len(\n chem_props_strs):\n pass\n else:\n sys.exit(\n 'There is an issue with your chemical_assignments.csv file. Number of species is incorrect.'\n )\n",
"step-4": "import sys\nimport numpy as np\n\n\ndef check_usage(subpuc_names, year, subpuc_usage):\n if len(subpuc_names) == len(subpuc_usage[1:]):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_usage.csv file. Number of sub-PUC(s) is incorrect.'\n )\n year_available = 0\n for i in range(len(subpuc_usage[0, 1:])):\n if subpuc_usage[0, 1 + i] == year:\n year_available = 1\n else:\n pass\n if year_available == 0:\n sys.exit('There is an issue with your subpuc_usage.csv file. ' +\n str(year) + ' is missing.')\n else:\n pass\n\n\ndef check_usetime(subpuc_names, subpuc_usetime):\n if len(subpuc_names) == len(subpuc_usetime):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_usetimescales.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(subpuc_usetime)):\n if subpuc_usetime[i, 1] >= 0.0 and subpuc_usetime[i, 1] <= 6.0:\n pass\n else:\n sys.exit(\n 'There is a bounds issue in your subpuc_usetimescales.csv files.'\n )\n\n\ndef check_controls(subpuc_names, subpuc_controls):\n if len(subpuc_names) == len(subpuc_controls):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_controls.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(subpuc_controls)):\n if subpuc_controls[i, 1] >= 0.0 and subpuc_controls[i, 1] <= 1.0:\n pass\n else:\n sys.exit(\n 'There is a bounds issue in your subpuc_controls.csv files.')\n\n\ndef check_1st_order_spec(subpuc_names, first_ord_spec):\n if len(subpuc_names) == len(first_ord_spec):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_1st_order_speciation.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(first_ord_spec)):\n if np.sum(first_ord_spec[i, 0:3]) >= 0.99 and np.sum(first_ord_spec\n [i, 0:3]) <= 1.01:\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_1st_order_speciation.csv file. Water + Inorganic + Organic out of bounds.'\n )\n if first_ord_spec[i, 2] >= first_ord_spec[i, 3]:\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_1st_order_speciation.csv file. TOG > Organic.'\n )\n for j in range(len(first_ord_spec[0, :])):\n if first_ord_spec[i, j] >= 0.0 and first_ord_spec[i, j] <= 1.0:\n pass\n else:\n sys.exit(\n 'There is a bounds issue in your subpuc_1st_order_speciation.csv files.'\n )\n\n\ndef check_organic_spec(subpuc_names, organic_spec, chem_index):\n if len(subpuc_names) == len(organic_spec[0, :]):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_organic_speciation.csv file. Number of sub-PUC(s) is incorrect.'\n )\n for i in range(len(organic_spec[0, :])):\n if np.nansum(organic_spec[1:, i]) >= 0.99 and np.nansum(organic_spec\n [1:, i]) <= 1.01:\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_organic_speciation.csv file. Total speciation out of bounds.'\n )\n if len(chem_index) == len(organic_spec[1:, 0]):\n pass\n else:\n sys.exit(\n 'There is an issue with your subpuc_organic_speciation.csv file. Number of species is incorrect.'\n )\n\n\ndef check_chem_assignments(chem_props_vars, chem_props_strs, chem_index):\n if len(chem_index) == len(chem_props_vars) and len(chem_index) == len(\n chem_props_strs):\n pass\n else:\n sys.exit(\n 'There is an issue with your chemical_assignments.csv file. Number of species is incorrect.'\n )\n",
"step-5": "import sys\nimport numpy as np\n\n####################################################################################################\n### These functions all perform QA checks on input files. \n### These should catch many errors, but is not exhaustive. \n####################################################################################################\n\n####################################################################################################\ndef check_usage(subpuc_names,year,subpuc_usage):\n if len(subpuc_names) == len(subpuc_usage[1:]):\n pass\n else: sys.exit('There is an issue with your subpuc_usage.csv file. Number of sub-PUC(s) is incorrect.')\n year_available = 0\n for i in range(len(subpuc_usage[0,1:])):\n if subpuc_usage[0,1+i] == year:\n year_available = 1\n else: pass\n if year_available == 0:\n sys.exit('There is an issue with your subpuc_usage.csv file. '+str(year)+' is missing.')\n else: pass\n####################################################################################################\n\n####################################################################################################\ndef check_usetime(subpuc_names,subpuc_usetime):\n if len(subpuc_names) == len(subpuc_usetime):\n pass\n else: sys.exit('There is an issue with your subpuc_usetimescales.csv file. Number of sub-PUC(s) is incorrect.')\n for i in range(len(subpuc_usetime)):\n if subpuc_usetime[i,1] >= 0.0 and subpuc_usetime[i,1] <= 6.0:\n pass\n else: sys.exit('There is a bounds issue in your subpuc_usetimescales.csv files.')\n####################################################################################################\n\n####################################################################################################\ndef check_controls(subpuc_names,subpuc_controls):\n if len(subpuc_names) == len(subpuc_controls):\n pass\n else: sys.exit('There is an issue with your subpuc_controls.csv file. Number of sub-PUC(s) is incorrect.')\n for i in range(len(subpuc_controls)):\n if subpuc_controls[i,1] >= 0.0 and subpuc_controls[i,1] <= 1.0:\n pass\n else: sys.exit('There is a bounds issue in your subpuc_controls.csv files.')\n####################################################################################################\n\n####################################################################################################\ndef check_1st_order_spec(subpuc_names,first_ord_spec):\n if len(subpuc_names) == len(first_ord_spec):\n pass\n else: sys.exit('There is an issue with your subpuc_1st_order_speciation.csv file. Number of sub-PUC(s) is incorrect.')\n for i in range(len(first_ord_spec)):\n if np.sum(first_ord_spec[i,0:3]) >= 0.99 and np.sum(first_ord_spec[i,0:3]) <= 1.01:\n pass\n else: sys.exit('There is an issue with your subpuc_1st_order_speciation.csv file. Water + Inorganic + Organic out of bounds.')\n if first_ord_spec[i,2] >= first_ord_spec[i,3]:\n pass\n else: sys.exit('There is an issue with your subpuc_1st_order_speciation.csv file. TOG > Organic.')\n for j in range(len(first_ord_spec[0,:])):\n if first_ord_spec[i,j] >= 0.0 and first_ord_spec[i,j] <= 1.0:\n pass\n else: sys.exit('There is a bounds issue in your subpuc_1st_order_speciation.csv files.')\n####################################################################################################\n\n####################################################################################################\ndef check_organic_spec(subpuc_names,organic_spec,chem_index):\n if len(subpuc_names) == len(organic_spec[0,:]):\n pass\n else: sys.exit('There is an issue with your subpuc_organic_speciation.csv file. Number of sub-PUC(s) is incorrect.')\n for i in range(len(organic_spec[0,:])):\n if np.nansum(organic_spec[1:,i]) >= 0.99 and np.nansum(organic_spec[1:,i]) <= 1.01:\n pass\n else: sys.exit('There is an issue with your subpuc_organic_speciation.csv file. Total speciation out of bounds.')\n if len(chem_index) == len(organic_spec[1:,0]):\n pass\n else: sys.exit('There is an issue with your subpuc_organic_speciation.csv file. Number of species is incorrect.')\n####################################################################################################\n\n####################################################################################################\ndef check_chem_assignments(chem_props_vars,chem_props_strs,chem_index):\n if len(chem_index) == len(chem_props_vars) and len(chem_index) == len(chem_props_strs):\n pass\n else: sys.exit('There is an issue with your chemical_assignments.csv file. Number of species is incorrect.')\n####################################################################################################",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class PredictDigitView(MethodView):
def post(self):
repo = ClassifierRepo(CLASSIFIER_STORAGE)
service = PredictDigitService(repo)
image_data_uri = request.json['image']
prediction = service.handle(image_data_uri)
return Response(str(prediction).encode(), status=200)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IndexView(View):
<|reserved_special_token_0|>
class PredictDigitView(MethodView):
def post(self):
repo = ClassifierRepo(CLASSIFIER_STORAGE)
service = PredictDigitService(repo)
image_data_uri = request.json['image']
prediction = service.handle(image_data_uri)
return Response(str(prediction).encode(), status=200)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IndexView(View):
def dispatch_request(self):
return render_template('index.html')
class PredictDigitView(MethodView):
def post(self):
repo = ClassifierRepo(CLASSIFIER_STORAGE)
service = PredictDigitService(repo)
image_data_uri = request.json['image']
prediction = service.handle(image_data_uri)
return Response(str(prediction).encode(), status=200)
<|reserved_special_token_1|>
from flask import render_template, request, Response
from flask.views import MethodView, View
from flask.views import View
from repo import ClassifierRepo
from services import PredictDigitService
from settings import CLASSIFIER_STORAGE
class IndexView(View):
def dispatch_request(self):
return render_template('index.html')
class PredictDigitView(MethodView):
def post(self):
repo = ClassifierRepo(CLASSIFIER_STORAGE)
service = PredictDigitService(repo)
image_data_uri = request.json['image']
prediction = service.handle(image_data_uri)
return Response(str(prediction).encode(), status=200)
|
flexible
|
{
"blob_id": "3ea42e7ad5301314a39bf522280c084342cd18c5",
"index": 332,
"step-1": "<mask token>\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n",
"step-2": "<mask token>\n\n\nclass IndexView(View):\n <mask token>\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n",
"step-3": "<mask token>\n\n\nclass IndexView(View):\n\n def dispatch_request(self):\n return render_template('index.html')\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n",
"step-4": "from flask import render_template, request, Response\nfrom flask.views import MethodView, View\nfrom flask.views import View\nfrom repo import ClassifierRepo\nfrom services import PredictDigitService\nfrom settings import CLASSIFIER_STORAGE\n\n\nclass IndexView(View):\n\n def dispatch_request(self):\n return render_template('index.html')\n\n\nclass PredictDigitView(MethodView):\n\n def post(self):\n repo = ClassifierRepo(CLASSIFIER_STORAGE)\n service = PredictDigitService(repo)\n image_data_uri = request.json['image']\n prediction = service.handle(image_data_uri)\n return Response(str(prediction).encode(), status=200)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def main():
""" main entry point for module execution
"""
argument_spec = dict(src=dict(type='path'), replace_src=dict(), lines=
dict(aliases=['commands'], type='list'), parents=dict(type='list'),
before=dict(type='list'), after=dict(type='list'), match=dict(
default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block', 'config']),
running_config=dict(aliases=['config']), intended_config=dict(),
defaults=dict(type='bool', default=False), backup=dict(type='bool',
default=False), save_when=dict(choices=['always', 'never',
'modified'], default='never'), diff_against=dict(choices=['running',
'startup', 'intended']), diff_ignore_lines=dict(type='list'), save=
dict(default=False, type='bool', removed_in_version='2.4'), force=
dict(default=False, type='bool', removed_in_version='2.2'))
argument_spec.update(nxos_argument_spec)
mutually_exclusive = [('lines', 'src', 'replace_src'), ('parents',
'src'), ('save', 'save_when')]
required_if = [('match', 'strict', ['lines']), ('match', 'exact', [
'lines']), ('replace', 'block', ['lines']), ('replace', 'config', [
'replace_src']), ('diff_against', 'intended', ['intended_config'])]
module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=
mutually_exclusive, required_if=required_if, supports_check_mode=True)
warnings = list()
nxos_check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
config = None
info = get_capabilities(module).get('device_info', {})
os_platform = info.get('network_os_platform', '')
if module.params['replace'] == 'config':
if '9K' not in os_platform:
module.fail_json(msg=
'replace: config is supported only for Nexus 9K series switches'
)
if module.params['replace_src']:
if module.params['replace'] != 'config':
module.fail_json(msg='replace: config is required with replace_src'
)
if module.params['backup'] or module._diff and module.params['diff_against'
] == 'running':
contents = get_config(module)
config = NetworkConfig(indent=2, contents=contents)
if module.params['backup']:
result['__backup__'] = contents
if any((module.params['src'], module.params['lines'], module.params[
'replace_src'])):
match = module.params['match']
replace = module.params['replace']
candidate = get_candidate(module)
if match != 'none' and replace != 'config':
config = get_running_config(module, config)
path = module.params['parents']
configobjs = candidate.difference(config, match=match, replace=
replace, path=path)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
result['updates'] = commands
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
running_config = None
startup_config = None
diff_ignore_lines = module.params['diff_ignore_lines']
if module.params['save']:
module.params['save_when'] = 'always'
if module.params['save_when'] != 'never':
output = execute_show_commands(module, ['show running-config',
'show startup-config'])
running_config = NetworkConfig(indent=1, contents=output[0],
ignore_lines=diff_ignore_lines)
startup_config = NetworkConfig(indent=1, contents=output[1],
ignore_lines=diff_ignore_lines)
if running_config.sha1 != startup_config.sha1 or module.params[
'save_when'] == 'always':
result['changed'] = True
if not module.check_mode:
cmd = {'command': 'copy running-config startup-config',
'output': 'text'}
run_commands(module, [cmd])
else:
module.warn(
'Skipping command `copy running-config startup-config` due to check_mode. Configuration not copied to non-volatile storage'
)
if module._diff:
if not running_config:
output = execute_show_commands(module, 'show running-config')
contents = output[0]
else:
contents = running_config.config_text
running_config = NetworkConfig(indent=1, contents=contents,
ignore_lines=diff_ignore_lines)
if module.params['diff_against'] == 'running':
if module.check_mode:
module.warn(
'unable to perform diff against running-config due to check mode'
)
contents = None
else:
contents = config.config_text
elif module.params['diff_against'] == 'startup':
if not startup_config:
output = execute_show_commands(module, 'show startup-config')
contents = output[0]
else:
contents = output[0]
contents = startup_config.config_text
elif module.params['diff_against'] == 'intended':
contents = module.params['intended_config']
if contents is not None:
base_config = NetworkConfig(indent=1, contents=contents,
ignore_lines=diff_ignore_lines)
if running_config.sha1 != base_config.sha1:
if module.params['diff_against'] == 'intended':
before = running_config
after = base_config
elif module.params['diff_against'] in ('startup', 'running'):
before = base_config
after = running_config
result.update({'changed': True, 'diff': {'before': str(
before), 'after': str(after)}})
module.exit_json(**result)
<|reserved_special_token_1|>
def main():
' main entry point for module execution\n '
argument_spec = dict(src=dict(type='path'), replace_src=dict(), lines=dict(aliases=['commands'], type='list'), parents=dict(type='list'), before=dict(type='list'), after=dict(type='list'), match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), replace=dict(default='line', choices=['line', 'block', 'config']), running_config=dict(aliases=['config']), intended_config=dict(), defaults=dict(type='bool', default=False), backup=dict(type='bool', default=False), save_when=dict(choices=['always', 'never', 'modified'], default='never'), diff_against=dict(choices=['running', 'startup', 'intended']), diff_ignore_lines=dict(type='list'), save=dict(default=False, type='bool', removed_in_version='2.4'), force=dict(default=False, type='bool', removed_in_version='2.2'))
argument_spec.update(nxos_argument_spec)
mutually_exclusive = [('lines', 'src', 'replace_src'), ('parents', 'src'), ('save', 'save_when')]
required_if = [('match', 'strict', ['lines']), ('match', 'exact', ['lines']), ('replace', 'block', ['lines']), ('replace', 'config', ['replace_src']), ('diff_against', 'intended', ['intended_config'])]
module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, required_if=required_if, supports_check_mode=True)
warnings = list()
nxos_check_args(module, warnings)
result = {
'changed': False,
'warnings': warnings,
}
config = None
info = get_capabilities(module).get('device_info', {
})
os_platform = info.get('network_os_platform', '')
if (module.params['replace'] == 'config'):
if ('9K' not in os_platform):
module.fail_json(msg='replace: config is supported only for Nexus 9K series switches')
if module.params['replace_src']:
if (module.params['replace'] != 'config'):
module.fail_json(msg='replace: config is required with replace_src')
if (module.params['backup'] or (module._diff and (module.params['diff_against'] == 'running'))):
contents = get_config(module)
config = NetworkConfig(indent=2, contents=contents)
if module.params['backup']:
result['__backup__'] = contents
if any((module.params['src'], module.params['lines'], module.params['replace_src'])):
match = module.params['match']
replace = module.params['replace']
candidate = get_candidate(module)
if ((match != 'none') and (replace != 'config')):
config = get_running_config(module, config)
path = module.params['parents']
configobjs = candidate.difference(config, match=match, replace=replace, path=path)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
result['updates'] = commands
if (not module.check_mode):
load_config(module, commands)
result['changed'] = True
running_config = None
startup_config = None
diff_ignore_lines = module.params['diff_ignore_lines']
if module.params['save']:
module.params['save_when'] = 'always'
if (module.params['save_when'] != 'never'):
output = execute_show_commands(module, ['show running-config', 'show startup-config'])
running_config = NetworkConfig(indent=1, contents=output[0], ignore_lines=diff_ignore_lines)
startup_config = NetworkConfig(indent=1, contents=output[1], ignore_lines=diff_ignore_lines)
if ((running_config.sha1 != startup_config.sha1) or (module.params['save_when'] == 'always')):
result['changed'] = True
if (not module.check_mode):
cmd = {
'command': 'copy running-config startup-config',
'output': 'text',
}
run_commands(module, [cmd])
else:
module.warn('Skipping command `copy running-config startup-config` due to check_mode. Configuration not copied to non-volatile storage')
if module._diff:
if (not running_config):
output = execute_show_commands(module, 'show running-config')
contents = output[0]
else:
contents = running_config.config_text
running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)
if (module.params['diff_against'] == 'running'):
if module.check_mode:
module.warn('unable to perform diff against running-config due to check mode')
contents = None
else:
contents = config.config_text
elif (module.params['diff_against'] == 'startup'):
if (not startup_config):
output = execute_show_commands(module, 'show startup-config')
contents = output[0]
else:
contents = output[0]
contents = startup_config.config_text
elif (module.params['diff_against'] == 'intended'):
contents = module.params['intended_config']
if (contents is not None):
base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)
if (running_config.sha1 != base_config.sha1):
if (module.params['diff_against'] == 'intended'):
before = running_config
after = base_config
elif (module.params['diff_against'] in ('startup', 'running')):
before = base_config
after = running_config
result.update({
'changed': True,
'diff': {
'before': str(before),
'after': str(after),
},
})
module.exit_json(**result)
|
flexible
|
{
"blob_id": "99b5ac74da95dff399c31d58e19bac65e538a34b",
"index": 8012,
"step-1": "<mask token>\n",
"step-2": "def main():\n \"\"\" main entry point for module execution\n \"\"\"\n argument_spec = dict(src=dict(type='path'), replace_src=dict(), lines=\n dict(aliases=['commands'], type='list'), parents=dict(type='list'),\n before=dict(type='list'), after=dict(type='list'), match=dict(\n default='line', choices=['line', 'strict', 'exact', 'none']),\n replace=dict(default='line', choices=['line', 'block', 'config']),\n running_config=dict(aliases=['config']), intended_config=dict(),\n defaults=dict(type='bool', default=False), backup=dict(type='bool',\n default=False), save_when=dict(choices=['always', 'never',\n 'modified'], default='never'), diff_against=dict(choices=['running',\n 'startup', 'intended']), diff_ignore_lines=dict(type='list'), save=\n dict(default=False, type='bool', removed_in_version='2.4'), force=\n dict(default=False, type='bool', removed_in_version='2.2'))\n argument_spec.update(nxos_argument_spec)\n mutually_exclusive = [('lines', 'src', 'replace_src'), ('parents',\n 'src'), ('save', 'save_when')]\n required_if = [('match', 'strict', ['lines']), ('match', 'exact', [\n 'lines']), ('replace', 'block', ['lines']), ('replace', 'config', [\n 'replace_src']), ('diff_against', 'intended', ['intended_config'])]\n module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=\n mutually_exclusive, required_if=required_if, supports_check_mode=True)\n warnings = list()\n nxos_check_args(module, warnings)\n result = {'changed': False, 'warnings': warnings}\n config = None\n info = get_capabilities(module).get('device_info', {})\n os_platform = info.get('network_os_platform', '')\n if module.params['replace'] == 'config':\n if '9K' not in os_platform:\n module.fail_json(msg=\n 'replace: config is supported only for Nexus 9K series switches'\n )\n if module.params['replace_src']:\n if module.params['replace'] != 'config':\n module.fail_json(msg='replace: config is required with replace_src'\n )\n if module.params['backup'] or module._diff and module.params['diff_against'\n ] == 'running':\n contents = get_config(module)\n config = NetworkConfig(indent=2, contents=contents)\n if module.params['backup']:\n result['__backup__'] = contents\n if any((module.params['src'], module.params['lines'], module.params[\n 'replace_src'])):\n match = module.params['match']\n replace = module.params['replace']\n candidate = get_candidate(module)\n if match != 'none' and replace != 'config':\n config = get_running_config(module, config)\n path = module.params['parents']\n configobjs = candidate.difference(config, match=match, replace=\n replace, path=path)\n else:\n configobjs = candidate.items\n if configobjs:\n commands = dumps(configobjs, 'commands').split('\\n')\n if module.params['before']:\n commands[:0] = module.params['before']\n if module.params['after']:\n commands.extend(module.params['after'])\n result['commands'] = commands\n result['updates'] = commands\n if not module.check_mode:\n load_config(module, commands)\n result['changed'] = True\n running_config = None\n startup_config = None\n diff_ignore_lines = module.params['diff_ignore_lines']\n if module.params['save']:\n module.params['save_when'] = 'always'\n if module.params['save_when'] != 'never':\n output = execute_show_commands(module, ['show running-config',\n 'show startup-config'])\n running_config = NetworkConfig(indent=1, contents=output[0],\n ignore_lines=diff_ignore_lines)\n startup_config = NetworkConfig(indent=1, contents=output[1],\n ignore_lines=diff_ignore_lines)\n if running_config.sha1 != startup_config.sha1 or module.params[\n 'save_when'] == 'always':\n result['changed'] = True\n if not module.check_mode:\n cmd = {'command': 'copy running-config startup-config',\n 'output': 'text'}\n run_commands(module, [cmd])\n else:\n module.warn(\n 'Skipping command `copy running-config startup-config` due to check_mode. Configuration not copied to non-volatile storage'\n )\n if module._diff:\n if not running_config:\n output = execute_show_commands(module, 'show running-config')\n contents = output[0]\n else:\n contents = running_config.config_text\n running_config = NetworkConfig(indent=1, contents=contents,\n ignore_lines=diff_ignore_lines)\n if module.params['diff_against'] == 'running':\n if module.check_mode:\n module.warn(\n 'unable to perform diff against running-config due to check mode'\n )\n contents = None\n else:\n contents = config.config_text\n elif module.params['diff_against'] == 'startup':\n if not startup_config:\n output = execute_show_commands(module, 'show startup-config')\n contents = output[0]\n else:\n contents = output[0]\n contents = startup_config.config_text\n elif module.params['diff_against'] == 'intended':\n contents = module.params['intended_config']\n if contents is not None:\n base_config = NetworkConfig(indent=1, contents=contents,\n ignore_lines=diff_ignore_lines)\n if running_config.sha1 != base_config.sha1:\n if module.params['diff_against'] == 'intended':\n before = running_config\n after = base_config\n elif module.params['diff_against'] in ('startup', 'running'):\n before = base_config\n after = running_config\n result.update({'changed': True, 'diff': {'before': str(\n before), 'after': str(after)}})\n module.exit_json(**result)\n",
"step-3": "def main():\n ' main entry point for module execution\\n '\n argument_spec = dict(src=dict(type='path'), replace_src=dict(), lines=dict(aliases=['commands'], type='list'), parents=dict(type='list'), before=dict(type='list'), after=dict(type='list'), match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), replace=dict(default='line', choices=['line', 'block', 'config']), running_config=dict(aliases=['config']), intended_config=dict(), defaults=dict(type='bool', default=False), backup=dict(type='bool', default=False), save_when=dict(choices=['always', 'never', 'modified'], default='never'), diff_against=dict(choices=['running', 'startup', 'intended']), diff_ignore_lines=dict(type='list'), save=dict(default=False, type='bool', removed_in_version='2.4'), force=dict(default=False, type='bool', removed_in_version='2.2'))\n argument_spec.update(nxos_argument_spec)\n mutually_exclusive = [('lines', 'src', 'replace_src'), ('parents', 'src'), ('save', 'save_when')]\n required_if = [('match', 'strict', ['lines']), ('match', 'exact', ['lines']), ('replace', 'block', ['lines']), ('replace', 'config', ['replace_src']), ('diff_against', 'intended', ['intended_config'])]\n module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, required_if=required_if, supports_check_mode=True)\n warnings = list()\n nxos_check_args(module, warnings)\n result = {\n 'changed': False,\n 'warnings': warnings,\n }\n config = None\n info = get_capabilities(module).get('device_info', {\n \n })\n os_platform = info.get('network_os_platform', '')\n if (module.params['replace'] == 'config'):\n if ('9K' not in os_platform):\n module.fail_json(msg='replace: config is supported only for Nexus 9K series switches')\n if module.params['replace_src']:\n if (module.params['replace'] != 'config'):\n module.fail_json(msg='replace: config is required with replace_src')\n if (module.params['backup'] or (module._diff and (module.params['diff_against'] == 'running'))):\n contents = get_config(module)\n config = NetworkConfig(indent=2, contents=contents)\n if module.params['backup']:\n result['__backup__'] = contents\n if any((module.params['src'], module.params['lines'], module.params['replace_src'])):\n match = module.params['match']\n replace = module.params['replace']\n candidate = get_candidate(module)\n if ((match != 'none') and (replace != 'config')):\n config = get_running_config(module, config)\n path = module.params['parents']\n configobjs = candidate.difference(config, match=match, replace=replace, path=path)\n else:\n configobjs = candidate.items\n if configobjs:\n commands = dumps(configobjs, 'commands').split('\\n')\n if module.params['before']:\n commands[:0] = module.params['before']\n if module.params['after']:\n commands.extend(module.params['after'])\n result['commands'] = commands\n result['updates'] = commands\n if (not module.check_mode):\n load_config(module, commands)\n result['changed'] = True\n running_config = None\n startup_config = None\n diff_ignore_lines = module.params['diff_ignore_lines']\n if module.params['save']:\n module.params['save_when'] = 'always'\n if (module.params['save_when'] != 'never'):\n output = execute_show_commands(module, ['show running-config', 'show startup-config'])\n running_config = NetworkConfig(indent=1, contents=output[0], ignore_lines=diff_ignore_lines)\n startup_config = NetworkConfig(indent=1, contents=output[1], ignore_lines=diff_ignore_lines)\n if ((running_config.sha1 != startup_config.sha1) or (module.params['save_when'] == 'always')):\n result['changed'] = True\n if (not module.check_mode):\n cmd = {\n 'command': 'copy running-config startup-config',\n 'output': 'text',\n }\n run_commands(module, [cmd])\n else:\n module.warn('Skipping command `copy running-config startup-config` due to check_mode. Configuration not copied to non-volatile storage')\n if module._diff:\n if (not running_config):\n output = execute_show_commands(module, 'show running-config')\n contents = output[0]\n else:\n contents = running_config.config_text\n running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)\n if (module.params['diff_against'] == 'running'):\n if module.check_mode:\n module.warn('unable to perform diff against running-config due to check mode')\n contents = None\n else:\n contents = config.config_text\n elif (module.params['diff_against'] == 'startup'):\n if (not startup_config):\n output = execute_show_commands(module, 'show startup-config')\n contents = output[0]\n else:\n contents = output[0]\n contents = startup_config.config_text\n elif (module.params['diff_against'] == 'intended'):\n contents = module.params['intended_config']\n if (contents is not None):\n base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)\n if (running_config.sha1 != base_config.sha1):\n if (module.params['diff_against'] == 'intended'):\n before = running_config\n after = base_config\n elif (module.params['diff_against'] in ('startup', 'running')):\n before = base_config\n after = running_config\n result.update({\n 'changed': True,\n 'diff': {\n 'before': str(before),\n 'after': str(after),\n },\n })\n module.exit_json(**result)",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import random
import csv
# 提取随机问,同类组成正例,异类组成负例,正:负=1:3
with open('final_regroup.csv', 'w', newline='') as train:
writer = csv.writer(train)
with open('final_syn_train.csv', 'r') as zhidao:
reader = csv.reader(zhidao)
cluster = []
cur = []
stand = ''
# 将同一标准问的随机问组成一个数组
for line in reader:
if line[1] == stand:
cur.append(line[0])
else:
if cur:
cluster.append(cur)
stand = line[1]
cur = [line[0]]
cluster.append(cur)
# 遍历每个分类中的每个句子,在同类数组中取一条数据组成正例,在异类数组中取3条数据组成反例
for i in range(len(cluster)):
for j in range(len(cluster[i])):
k = random.randint(0, len(cluster[i])-1)
writer.writerow([cluster[i][j], cluster[i][k], 1])
m = n = 0
for _ in range(3):
while m == i:
m = random.randint(0, len(cluster)-1)
n = random.randint(0, len(cluster[m])-1)
writer.writerow([cluster[i][j], cluster[m][n], 0])
# 提取随机问,与正确标准问组成正例,与非正确标准问组成负例,正:负=1:3 (此方法效果更好)
with open('final_regroup.csv', 'w', newline='') as train:
writer = csv.writer(train)
with open('standard.csv', 'r') as standard:
reader = csv.reader(standard)
stand = []
for line in reader:
stand.append(line[0])
with open('final_syn_train.csv', 'r', encoding='gbk') as zhidao:
reader = csv.reader(zhidao)
for line in reader:
writer.writerow([line[0], line[1], 1])
for _ in range(3):
k = random.randint(0, 208)
writer.writerow([line[0], stand[k], 0])
|
normal
|
{
"blob_id": "3a09cbd71d23b1320af9b8ddcfc65b223e487b21",
"index": 1811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('final_regroup.csv', 'w', newline='') as train:\n writer = csv.writer(train)\n with open('final_syn_train.csv', 'r') as zhidao:\n reader = csv.reader(zhidao)\n cluster = []\n cur = []\n stand = ''\n for line in reader:\n if line[1] == stand:\n cur.append(line[0])\n else:\n if cur:\n cluster.append(cur)\n stand = line[1]\n cur = [line[0]]\n cluster.append(cur)\n for i in range(len(cluster)):\n for j in range(len(cluster[i])):\n k = random.randint(0, len(cluster[i]) - 1)\n writer.writerow([cluster[i][j], cluster[i][k], 1])\n m = n = 0\n for _ in range(3):\n while m == i:\n m = random.randint(0, len(cluster) - 1)\n n = random.randint(0, len(cluster[m]) - 1)\n writer.writerow([cluster[i][j], cluster[m][n], 0])\nwith open('final_regroup.csv', 'w', newline='') as train:\n writer = csv.writer(train)\n with open('standard.csv', 'r') as standard:\n reader = csv.reader(standard)\n stand = []\n for line in reader:\n stand.append(line[0])\n with open('final_syn_train.csv', 'r', encoding='gbk') as zhidao:\n reader = csv.reader(zhidao)\n for line in reader:\n writer.writerow([line[0], line[1], 1])\n for _ in range(3):\n k = random.randint(0, 208)\n writer.writerow([line[0], stand[k], 0])\n",
"step-3": "import random\nimport csv\nwith open('final_regroup.csv', 'w', newline='') as train:\n writer = csv.writer(train)\n with open('final_syn_train.csv', 'r') as zhidao:\n reader = csv.reader(zhidao)\n cluster = []\n cur = []\n stand = ''\n for line in reader:\n if line[1] == stand:\n cur.append(line[0])\n else:\n if cur:\n cluster.append(cur)\n stand = line[1]\n cur = [line[0]]\n cluster.append(cur)\n for i in range(len(cluster)):\n for j in range(len(cluster[i])):\n k = random.randint(0, len(cluster[i]) - 1)\n writer.writerow([cluster[i][j], cluster[i][k], 1])\n m = n = 0\n for _ in range(3):\n while m == i:\n m = random.randint(0, len(cluster) - 1)\n n = random.randint(0, len(cluster[m]) - 1)\n writer.writerow([cluster[i][j], cluster[m][n], 0])\nwith open('final_regroup.csv', 'w', newline='') as train:\n writer = csv.writer(train)\n with open('standard.csv', 'r') as standard:\n reader = csv.reader(standard)\n stand = []\n for line in reader:\n stand.append(line[0])\n with open('final_syn_train.csv', 'r', encoding='gbk') as zhidao:\n reader = csv.reader(zhidao)\n for line in reader:\n writer.writerow([line[0], line[1], 1])\n for _ in range(3):\n k = random.randint(0, 208)\n writer.writerow([line[0], stand[k], 0])\n",
"step-4": "import random\r\nimport csv\r\n\r\n\r\n# 提取随机问,同类组成正例,异类组成负例,正:负=1:3\r\nwith open('final_regroup.csv', 'w', newline='') as train:\r\n writer = csv.writer(train)\r\n with open('final_syn_train.csv', 'r') as zhidao:\r\n reader = csv.reader(zhidao)\r\n cluster = []\r\n cur = []\r\n stand = ''\r\n # 将同一标准问的随机问组成一个数组\r\n for line in reader:\r\n if line[1] == stand:\r\n cur.append(line[0])\r\n else:\r\n if cur:\r\n cluster.append(cur)\r\n stand = line[1]\r\n cur = [line[0]]\r\n cluster.append(cur)\r\n\r\n # 遍历每个分类中的每个句子,在同类数组中取一条数据组成正例,在异类数组中取3条数据组成反例\r\n for i in range(len(cluster)):\r\n for j in range(len(cluster[i])):\r\n k = random.randint(0, len(cluster[i])-1)\r\n writer.writerow([cluster[i][j], cluster[i][k], 1])\r\n m = n = 0\r\n for _ in range(3):\r\n while m == i:\r\n m = random.randint(0, len(cluster)-1)\r\n n = random.randint(0, len(cluster[m])-1)\r\n writer.writerow([cluster[i][j], cluster[m][n], 0])\r\n\r\n\r\n# 提取随机问,与正确标准问组成正例,与非正确标准问组成负例,正:负=1:3 (此方法效果更好)\r\nwith open('final_regroup.csv', 'w', newline='') as train:\r\n writer = csv.writer(train)\r\n with open('standard.csv', 'r') as standard:\r\n reader = csv.reader(standard)\r\n stand = []\r\n for line in reader:\r\n stand.append(line[0])\r\n with open('final_syn_train.csv', 'r', encoding='gbk') as zhidao:\r\n reader = csv.reader(zhidao)\r\n for line in reader:\r\n writer.writerow([line[0], line[1], 1])\r\n for _ in range(3):\r\n k = random.randint(0, 208)\r\n writer.writerow([line[0], stand[k], 0])",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.getLogger('crawl').setLevel(logging.INFO)
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
es = Elasticsearch()
crawl.crawl_domain(es, 'aaronparecki.com')
<|reserved_special_token_1|>
import crawl
import logging
from elasticsearch import Elasticsearch
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.getLogger('crawl').setLevel(logging.INFO)
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
es = Elasticsearch()
crawl.crawl_domain(es, 'aaronparecki.com')
<|reserved_special_token_1|>
#!/usr/bin/env python
import crawl
import logging
from elasticsearch import Elasticsearch
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.getLogger("crawl").setLevel(logging.INFO)
logging.getLogger("elasticsearch").setLevel(logging.ERROR)
es = Elasticsearch()
crawl.crawl_domain(es, "aaronparecki.com")
|
flexible
|
{
"blob_id": "21d07c2b80aa00d0c75da342d37195b6829593b6",
"index": 1110,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('crawl').setLevel(logging.INFO)\n logging.getLogger('elasticsearch').setLevel(logging.ERROR)\n es = Elasticsearch()\n crawl.crawl_domain(es, 'aaronparecki.com')\n",
"step-3": "import crawl\nimport logging\nfrom elasticsearch import Elasticsearch\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('crawl').setLevel(logging.INFO)\n logging.getLogger('elasticsearch').setLevel(logging.ERROR)\n es = Elasticsearch()\n crawl.crawl_domain(es, 'aaronparecki.com')\n",
"step-4": "#!/usr/bin/env python \nimport crawl\nimport logging\nfrom elasticsearch import Elasticsearch\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n logging.getLogger(\"crawl\").setLevel(logging.INFO)\n logging.getLogger(\"elasticsearch\").setLevel(logging.ERROR)\n \n es = Elasticsearch()\n crawl.crawl_domain(es, \"aaronparecki.com\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
start()
<|reserved_special_token_1|>
from adventurelib import *
from horror import *
from dating import *
from popquiz import *
from comedy import *
from island import *
start()
|
flexible
|
{
"blob_id": "8a37299154aded37147e1650cbf52a5cdf7d91da",
"index": 4225,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nstart()\n",
"step-3": "from adventurelib import *\nfrom horror import *\nfrom dating import *\nfrom popquiz import *\nfrom comedy import *\nfrom island import *\nstart()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for threshold in range(1, 6):
rolls = np.random.randint(1, 7, size=10 ** 7)
rerolls = np.random.randint(1, 7, size=10 ** 7)
avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls))
print(
f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.'
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
for threshold in range(1, 6):
rolls = np.random.randint(1, 7, size=10 ** 7)
rerolls = np.random.randint(1, 7, size=10 ** 7)
avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls))
print(
f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.'
)
<|reserved_special_token_1|>
'''
You're playing casino dice game. You roll a die once. If you reroll, you earn the amount equal to the number on your second roll otherwise, you earn the amount equal to the number on your first roll.
Assuming you adopt a profit-maximizing strategy, what would be the expected amount of money you would win?
This question was asked in a data scientist interview at Tinder.
'''
import numpy as np
for threshold in range(1, 6):
rolls = np.random.randint(1, 7, size=10**7)
rerolls = np.random.randint(1, 7, size=10**7)
avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls))
print(f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.')
|
flexible
|
{
"blob_id": "e5d704541acd0f68a7885d7323118e1552e064c9",
"index": 6170,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor threshold in range(1, 6):\n rolls = np.random.randint(1, 7, size=10 ** 7)\n rerolls = np.random.randint(1, 7, size=10 ** 7)\n avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls))\n print(\n f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.'\n )\n",
"step-3": "<mask token>\nimport numpy as np\nfor threshold in range(1, 6):\n rolls = np.random.randint(1, 7, size=10 ** 7)\n rerolls = np.random.randint(1, 7, size=10 ** 7)\n avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls))\n print(\n f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.'\n )\n",
"step-4": "'''\nYou're playing casino dice game. You roll a die once. If you reroll, you earn the amount equal to the number on your second roll otherwise, you earn the amount equal to the number on your first roll.\n\nAssuming you adopt a profit-maximizing strategy, what would be the expected amount of money you would win?\n\nThis question was asked in a data scientist interview at Tinder.\n'''\n\nimport numpy as np\n\nfor threshold in range(1, 6):\n rolls = np.random.randint(1, 7, size=10**7)\n rerolls = np.random.randint(1, 7, size=10**7)\n avg_roll = np.mean(np.where(rolls <= threshold, rerolls, rolls))\n print(f'Rerolling all {threshold}s and below yields an average roll of {avg_roll}.')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.contrib import admin
# Register your models here.
from .models import Participant
class ParticipantAdmin(admin.ModelAdmin):
fieldsets = [
("Personal information", {'fields': ['email', 'name', 'institution', 'assistant']}),
("Asistance", {'fields': ['assistant', 'participant_hash']}),
("Contribution", {'fields': ['contribution', 'title', 'abstract', 'link']}),
]
list_display = ('email', 'name', 'assistant', 'contribution', 'title')
list_filter = ['assistant', 'contribution']
admin.site.register(Participant, ParticipantAdmin)
|
normal
|
{
"blob_id": "c43b899234ffff09225153dcaf097591c7176430",
"index": 841,
"step-1": "<mask token>\n\n\nclass ParticipantAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ParticipantAdmin(admin.ModelAdmin):\n fieldsets = [('Personal information', {'fields': ['email', 'name',\n 'institution', 'assistant']}), ('Asistance', {'fields': [\n 'assistant', 'participant_hash']}), ('Contribution', {'fields': [\n 'contribution', 'title', 'abstract', 'link']})]\n list_display = 'email', 'name', 'assistant', 'contribution', 'title'\n list_filter = ['assistant', 'contribution']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ParticipantAdmin(admin.ModelAdmin):\n fieldsets = [('Personal information', {'fields': ['email', 'name',\n 'institution', 'assistant']}), ('Asistance', {'fields': [\n 'assistant', 'participant_hash']}), ('Contribution', {'fields': [\n 'contribution', 'title', 'abstract', 'link']})]\n list_display = 'email', 'name', 'assistant', 'contribution', 'title'\n list_filter = ['assistant', 'contribution']\n\n\nadmin.site.register(Participant, ParticipantAdmin)\n",
"step-4": "from django.contrib import admin\nfrom .models import Participant\n\n\nclass ParticipantAdmin(admin.ModelAdmin):\n fieldsets = [('Personal information', {'fields': ['email', 'name',\n 'institution', 'assistant']}), ('Asistance', {'fields': [\n 'assistant', 'participant_hash']}), ('Contribution', {'fields': [\n 'contribution', 'title', 'abstract', 'link']})]\n list_display = 'email', 'name', 'assistant', 'contribution', 'title'\n list_filter = ['assistant', 'contribution']\n\n\nadmin.site.register(Participant, ParticipantAdmin)\n",
"step-5": "from django.contrib import admin\n\n# Register your models here.\nfrom .models import Participant\n\n\nclass ParticipantAdmin(admin.ModelAdmin):\n fieldsets = [\n (\"Personal information\", {'fields': ['email', 'name', 'institution', 'assistant']}),\n (\"Asistance\", {'fields': ['assistant', 'participant_hash']}),\n (\"Contribution\", {'fields': ['contribution', 'title', 'abstract', 'link']}),\n ]\n list_display = ('email', 'name', 'assistant', 'contribution', 'title')\n list_filter = ['assistant', 'contribution']\n\nadmin.site.register(Participant, ParticipantAdmin)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os
import subprocess
import sys
import time
# print sys.argv
start = time.time()
subprocess.call(sys.argv[1:], shell=True)
stop = time.time()
print "\nTook %.1f seconds" % (stop - start)
|
normal
|
{
"blob_id": "530ec3df27cc4c8f0798566f0c66cfbffe510786",
"index": 8611,
"step-1": "import os\r\nimport subprocess\r\nimport sys\r\nimport time\r\n\r\n# print sys.argv\r\nstart = time.time()\r\nsubprocess.call(sys.argv[1:], shell=True)\r\nstop = time.time()\r\nprint \"\\nTook %.1f seconds\" % (stop - start)\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class ActionWeather(Action):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ActionWeather(Action):
<|reserved_special_token_0|>
def run(self, dispatcher, tracker, domain):
loc = tracker.get_slot('department')
if loc == 'algology':
buttons = [Button(title='Prof. Dr. Öznur Öken', payload='/Dr1')]
elif loc == 'brain and neurosurgery':
buttons = [Button(title='Doç. Dr. Gülşah Bademci', payload=
'/btn1'), Button(title='Doç. Dr. Suat CANBAY', payload='/btn2')
]
elif loc == 'child hematology':
buttons = [Button(title='Prof. Dr. Hatice Emel Özyürek',
payload='/btn1')]
elif loc == 'child nephrology':
buttons = [Button(title='Prof. Dr. Süleyman Kalman', payload=
'/btn1')]
elif loc == 'child health and illness':
buttons = [Button(title='Prof. Dr. Musa Kazım Çağlar', payload=
'/btn1'), Button(title='Prof. Dr. Süleyman Kalman', payload
='/btn2'), Button(title='Prof. Dr. Hatice Emel Özyürek',
payload='/btn3'), Button(title=
'Yar. Doç. Dr. Pakize Elif Alkışn', payload='/btn4'),
Button(title='Uzm. Dr. Mustafa Yücel Kızıltan', payload=
'/btn5'), Button(title='Uzm. Dr. Gökalp Başbozkurt',
payload='/btn6'), Button(title='Uzm. Dr. Hafsa Uçur',
payload='/btn7'), Button(title='Uzm. Dr. Hüsniye Altan',
payload='/btn8'), Button(title='Uzm. Dr. Sarkhan Elbayıyev',
payload='/btn9'), Button(title='Uzm. Dr. Shahın Guliyev',
payload='/btn10')]
elif loc == 'dermatology':
buttons = [Button(title='Uzm. Dr. Aylin Gözübüyükoğulları',
payload='/Dr1'), Button(title='Uzm. Dr. Yeşim Akpınar Kara',
payload='/Dr2')]
elif loc == 'diet policlinic':
buttons = [Button(title='Uzm. Dyt. Gaye Başkurt', payload=
'/Dr1'), Button(title='Dyt. Deniz Özdemir', payload='/Dr2'),
Button(title='Dyt. Halime Besler', payload='/Dr3')]
elif loc == 'endocrinology':
buttons = [Button(title='Prof. Dr. Serdar Güler', payload='/Dr1')]
elif loc == 'infectious diseases':
buttons = [Button(title='Uzm. Dr. Mine Işık Arıgün', payload=
'/Dr1')]
elif loc == 'physical therapy and rehabilitation':
buttons = [Button(title='Prof. Dr. Öznur Öken', payload='/Dr1'),
Button(title='Uzm. Dr. Beril Özturan', payload='/Dr2')]
elif loc == 'gastroenterology':
buttons = [Button(title='Doç. Dr. Reskan Altun', payload='/Dr1'
), Button(title='Doç. Dr. Yasemin Özderin Özin', payload=
'/Dr2')]
elif loc == 'general surgery':
buttons = [Button(title='Prof. Dr. Mehmet Mahir Özmen', payload
='/Dr1'), Button(title='Yar. Doç. Dr. Cem Emir Güldoğan',
payload='/Dr2'), Button(title='Yar. Doç. Dr. Emre Gündoğdu',
payload='/Dr3')]
elif loc == 'chest diseases':
buttons = [Button(title='Prof. Dr. Uğur Gönüllü', payload='/Dr1')]
elif loc == 'eye diseases':
buttons = [Button(title='Op. Dr. Samim Özdeş', payload='/Dr1')]
elif loc == 'hematology policlinic':
buttons = [Button(title='Prof. Dr. Oral Nevruz', payload='/Dr1')]
elif loc == 'internal diseases':
buttons = [Button(title='Doç. Dr. Beril Akman', payload='/Dr1'),
Button(title='Uzm. Dr. Sercan Cansaran', payload='/Dr2'),
Button(title='Uzm. Dr. Sevgi Karabuğa', payload='/Dr3'),
Button(title='Yar. Doç. Dr. Gökhan Celbek', payload='/Dr4')]
elif loc == 'gynecology and Obstetrics':
buttons = [Button(title='Yar. Doç. Dr. Müberra Namlı Kalem',
payload='/Dr1'), Button(title='Yar. Doç. Dr. Coşkun Şimşir',
payload='/Dr2'), Button(title='Prof. Dr. Ali Ergün',
payload='/Dr3'), Button(title='Doç. Dr. Korhan Kahraman',
payload='/Dr4'), Button(title='Doç. Dr. Turgut Var',
payload='/Dr5'), Button(title=
'Doç. Dr. Türkan Örnek Gülpınar', payload='/Dr6'), Button(
title='Op. Dr. Aslı Yücetürk', payload='/Dr7'), Button(
title='Op. Dr. Ebru Yüce', payload='/Dr8'), Button(title=
'Prof. Dr. Timur Gürgan', payload='/Dr9')]
elif loc == 'cardiac surgery':
buttons = [Button(title='Prof. Dr. Erol Şener', payload='/Dr1'),
Button(title='Yar. Doç. Dr. Emre Boysan', payload='/Dr2'),
Button(title='Yar. Doç. Renda Cırcı', payload='/Dr3')]
elif loc == 'cardiology':
buttons = [Button(title='Prof. Dr. Erdoğan İlkay', payload=
'/Dr1'), Button(title='Doç. Dr. Alper Canbay', payload=
'/Dr2'), Button(title='Uzm. Dr. Çiğdem Koca Tarı', payload=
'/Dr3'), Button(title='Uzm. Dr. Erol Kalender', payload='/Dr4')
]
elif loc == 'ENT diseases':
buttons = [Button(title='Prof. Dr. Ali Altuntaş', payload=
'/Dr1'), Button(title='Prof. Dr. Serdar Karahatay', payload
='/Dr2'), Button(title='Yar. Doç Dr. Canset Aydın', payload
='/Dr3')]
elif loc == 'nephrology':
buttons = [Button(title='Doç. Dr. Beril Akman', payload='/Dr1')]
elif loc == 'neurology':
buttons = [Button(title='Prof. Dr. Mehmet Zülküf Önal', payload
='/Dr1'), Button(title='Yar. Doç. Dr. Akçay Övünç Ozon',
payload='/Dr2')]
elif loc == 'orthopedics and traumatology':
buttons = [Button(title='Yar. Doç. Dr. Uğur Gönç', payload=
'/Dr1'), Button(title='Op. Dr. Mesut Atabek', payload=
'/Dr2'), Button(title='Prof. Dr. levent Çelebi', payload=
'/Dr3')]
elif loc == 'plastic surgery':
buttons = [Button(title='Op. Dr. Ergin Işık', payload='/Dr1'),
Button(title='Op. Dr. Serdar Düzgün', payload='/Dr2')]
elif loc == 'psychiatry':
buttons = [Button(title='Prof. Dr. Ali Bozkurt', payload='/Dr1')]
elif loc == 'psychologist':
buttons = [Button(title='Psk. Ezgi Kılınç', payload='/Dr1')]
elif loc == 'rheumatology':
buttons = [Button(title='Doç. Dr. Orhan Küçükşahin', payload=
'/Dr1')]
elif loc == 'medical oncology':
buttons = [Button(title='Prof. Dr. Fikret Arpacı', payload=
'/Dr1'), Button(title='Doç. Dr. Gökhan Erdem', payload='/Dr2')]
elif loc == 'urology':
response = 'Müsait doktor bulunmamaktadır...'
response = ''
dispatcher.utter_button_message('my message', buttons)
return [SlotSet('doctor', response)]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ActionWeather(Action):
def name(self):
return 'action_doctor'
def run(self, dispatcher, tracker, domain):
loc = tracker.get_slot('department')
if loc == 'algology':
buttons = [Button(title='Prof. Dr. Öznur Öken', payload='/Dr1')]
elif loc == 'brain and neurosurgery':
buttons = [Button(title='Doç. Dr. Gülşah Bademci', payload=
'/btn1'), Button(title='Doç. Dr. Suat CANBAY', payload='/btn2')
]
elif loc == 'child hematology':
buttons = [Button(title='Prof. Dr. Hatice Emel Özyürek',
payload='/btn1')]
elif loc == 'child nephrology':
buttons = [Button(title='Prof. Dr. Süleyman Kalman', payload=
'/btn1')]
elif loc == 'child health and illness':
buttons = [Button(title='Prof. Dr. Musa Kazım Çağlar', payload=
'/btn1'), Button(title='Prof. Dr. Süleyman Kalman', payload
='/btn2'), Button(title='Prof. Dr. Hatice Emel Özyürek',
payload='/btn3'), Button(title=
'Yar. Doç. Dr. Pakize Elif Alkışn', payload='/btn4'),
Button(title='Uzm. Dr. Mustafa Yücel Kızıltan', payload=
'/btn5'), Button(title='Uzm. Dr. Gökalp Başbozkurt',
payload='/btn6'), Button(title='Uzm. Dr. Hafsa Uçur',
payload='/btn7'), Button(title='Uzm. Dr. Hüsniye Altan',
payload='/btn8'), Button(title='Uzm. Dr. Sarkhan Elbayıyev',
payload='/btn9'), Button(title='Uzm. Dr. Shahın Guliyev',
payload='/btn10')]
elif loc == 'dermatology':
buttons = [Button(title='Uzm. Dr. Aylin Gözübüyükoğulları',
payload='/Dr1'), Button(title='Uzm. Dr. Yeşim Akpınar Kara',
payload='/Dr2')]
elif loc == 'diet policlinic':
buttons = [Button(title='Uzm. Dyt. Gaye Başkurt', payload=
'/Dr1'), Button(title='Dyt. Deniz Özdemir', payload='/Dr2'),
Button(title='Dyt. Halime Besler', payload='/Dr3')]
elif loc == 'endocrinology':
buttons = [Button(title='Prof. Dr. Serdar Güler', payload='/Dr1')]
elif loc == 'infectious diseases':
buttons = [Button(title='Uzm. Dr. Mine Işık Arıgün', payload=
'/Dr1')]
elif loc == 'physical therapy and rehabilitation':
buttons = [Button(title='Prof. Dr. Öznur Öken', payload='/Dr1'),
Button(title='Uzm. Dr. Beril Özturan', payload='/Dr2')]
elif loc == 'gastroenterology':
buttons = [Button(title='Doç. Dr. Reskan Altun', payload='/Dr1'
), Button(title='Doç. Dr. Yasemin Özderin Özin', payload=
'/Dr2')]
elif loc == 'general surgery':
buttons = [Button(title='Prof. Dr. Mehmet Mahir Özmen', payload
='/Dr1'), Button(title='Yar. Doç. Dr. Cem Emir Güldoğan',
payload='/Dr2'), Button(title='Yar. Doç. Dr. Emre Gündoğdu',
payload='/Dr3')]
elif loc == 'chest diseases':
buttons = [Button(title='Prof. Dr. Uğur Gönüllü', payload='/Dr1')]
elif loc == 'eye diseases':
buttons = [Button(title='Op. Dr. Samim Özdeş', payload='/Dr1')]
elif loc == 'hematology policlinic':
buttons = [Button(title='Prof. Dr. Oral Nevruz', payload='/Dr1')]
elif loc == 'internal diseases':
buttons = [Button(title='Doç. Dr. Beril Akman', payload='/Dr1'),
Button(title='Uzm. Dr. Sercan Cansaran', payload='/Dr2'),
Button(title='Uzm. Dr. Sevgi Karabuğa', payload='/Dr3'),
Button(title='Yar. Doç. Dr. Gökhan Celbek', payload='/Dr4')]
elif loc == 'gynecology and Obstetrics':
buttons = [Button(title='Yar. Doç. Dr. Müberra Namlı Kalem',
payload='/Dr1'), Button(title='Yar. Doç. Dr. Coşkun Şimşir',
payload='/Dr2'), Button(title='Prof. Dr. Ali Ergün',
payload='/Dr3'), Button(title='Doç. Dr. Korhan Kahraman',
payload='/Dr4'), Button(title='Doç. Dr. Turgut Var',
payload='/Dr5'), Button(title=
'Doç. Dr. Türkan Örnek Gülpınar', payload='/Dr6'), Button(
title='Op. Dr. Aslı Yücetürk', payload='/Dr7'), Button(
title='Op. Dr. Ebru Yüce', payload='/Dr8'), Button(title=
'Prof. Dr. Timur Gürgan', payload='/Dr9')]
elif loc == 'cardiac surgery':
buttons = [Button(title='Prof. Dr. Erol Şener', payload='/Dr1'),
Button(title='Yar. Doç. Dr. Emre Boysan', payload='/Dr2'),
Button(title='Yar. Doç. Renda Cırcı', payload='/Dr3')]
elif loc == 'cardiology':
buttons = [Button(title='Prof. Dr. Erdoğan İlkay', payload=
'/Dr1'), Button(title='Doç. Dr. Alper Canbay', payload=
'/Dr2'), Button(title='Uzm. Dr. Çiğdem Koca Tarı', payload=
'/Dr3'), Button(title='Uzm. Dr. Erol Kalender', payload='/Dr4')
]
elif loc == 'ENT diseases':
buttons = [Button(title='Prof. Dr. Ali Altuntaş', payload=
'/Dr1'), Button(title='Prof. Dr. Serdar Karahatay', payload
='/Dr2'), Button(title='Yar. Doç Dr. Canset Aydın', payload
='/Dr3')]
elif loc == 'nephrology':
buttons = [Button(title='Doç. Dr. Beril Akman', payload='/Dr1')]
elif loc == 'neurology':
buttons = [Button(title='Prof. Dr. Mehmet Zülküf Önal', payload
='/Dr1'), Button(title='Yar. Doç. Dr. Akçay Övünç Ozon',
payload='/Dr2')]
elif loc == 'orthopedics and traumatology':
buttons = [Button(title='Yar. Doç. Dr. Uğur Gönç', payload=
'/Dr1'), Button(title='Op. Dr. Mesut Atabek', payload=
'/Dr2'), Button(title='Prof. Dr. levent Çelebi', payload=
'/Dr3')]
elif loc == 'plastic surgery':
buttons = [Button(title='Op. Dr. Ergin Işık', payload='/Dr1'),
Button(title='Op. Dr. Serdar Düzgün', payload='/Dr2')]
elif loc == 'psychiatry':
buttons = [Button(title='Prof. Dr. Ali Bozkurt', payload='/Dr1')]
elif loc == 'psychologist':
buttons = [Button(title='Psk. Ezgi Kılınç', payload='/Dr1')]
elif loc == 'rheumatology':
buttons = [Button(title='Doç. Dr. Orhan Küçükşahin', payload=
'/Dr1')]
elif loc == 'medical oncology':
buttons = [Button(title='Prof. Dr. Fikret Arpacı', payload=
'/Dr1'), Button(title='Doç. Dr. Gökhan Erdem', payload='/Dr2')]
elif loc == 'urology':
response = 'Müsait doktor bulunmamaktadır...'
response = ''
dispatcher.utter_button_message('my message', buttons)
return [SlotSet('doctor', response)]
<|reserved_special_token_1|>
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from rasa_core.actions.action import Action
from rasa_core.events import SlotSet
from rasa_core.dispatcher import Button, Element, Dispatcher
import json
import pickle
class ActionWeather(Action):
def name(self):
return 'action_doctor'
def run(self, dispatcher, tracker, domain):
loc = tracker.get_slot('department')
if loc == 'algology':
buttons = [Button(title='Prof. Dr. Öznur Öken', payload='/Dr1')]
elif loc == 'brain and neurosurgery':
buttons = [Button(title='Doç. Dr. Gülşah Bademci', payload=
'/btn1'), Button(title='Doç. Dr. Suat CANBAY', payload='/btn2')
]
elif loc == 'child hematology':
buttons = [Button(title='Prof. Dr. Hatice Emel Özyürek',
payload='/btn1')]
elif loc == 'child nephrology':
buttons = [Button(title='Prof. Dr. Süleyman Kalman', payload=
'/btn1')]
elif loc == 'child health and illness':
buttons = [Button(title='Prof. Dr. Musa Kazım Çağlar', payload=
'/btn1'), Button(title='Prof. Dr. Süleyman Kalman', payload
='/btn2'), Button(title='Prof. Dr. Hatice Emel Özyürek',
payload='/btn3'), Button(title=
'Yar. Doç. Dr. Pakize Elif Alkışn', payload='/btn4'),
Button(title='Uzm. Dr. Mustafa Yücel Kızıltan', payload=
'/btn5'), Button(title='Uzm. Dr. Gökalp Başbozkurt',
payload='/btn6'), Button(title='Uzm. Dr. Hafsa Uçur',
payload='/btn7'), Button(title='Uzm. Dr. Hüsniye Altan',
payload='/btn8'), Button(title='Uzm. Dr. Sarkhan Elbayıyev',
payload='/btn9'), Button(title='Uzm. Dr. Shahın Guliyev',
payload='/btn10')]
elif loc == 'dermatology':
buttons = [Button(title='Uzm. Dr. Aylin Gözübüyükoğulları',
payload='/Dr1'), Button(title='Uzm. Dr. Yeşim Akpınar Kara',
payload='/Dr2')]
elif loc == 'diet policlinic':
buttons = [Button(title='Uzm. Dyt. Gaye Başkurt', payload=
'/Dr1'), Button(title='Dyt. Deniz Özdemir', payload='/Dr2'),
Button(title='Dyt. Halime Besler', payload='/Dr3')]
elif loc == 'endocrinology':
buttons = [Button(title='Prof. Dr. Serdar Güler', payload='/Dr1')]
elif loc == 'infectious diseases':
buttons = [Button(title='Uzm. Dr. Mine Işık Arıgün', payload=
'/Dr1')]
elif loc == 'physical therapy and rehabilitation':
buttons = [Button(title='Prof. Dr. Öznur Öken', payload='/Dr1'),
Button(title='Uzm. Dr. Beril Özturan', payload='/Dr2')]
elif loc == 'gastroenterology':
buttons = [Button(title='Doç. Dr. Reskan Altun', payload='/Dr1'
), Button(title='Doç. Dr. Yasemin Özderin Özin', payload=
'/Dr2')]
elif loc == 'general surgery':
buttons = [Button(title='Prof. Dr. Mehmet Mahir Özmen', payload
='/Dr1'), Button(title='Yar. Doç. Dr. Cem Emir Güldoğan',
payload='/Dr2'), Button(title='Yar. Doç. Dr. Emre Gündoğdu',
payload='/Dr3')]
elif loc == 'chest diseases':
buttons = [Button(title='Prof. Dr. Uğur Gönüllü', payload='/Dr1')]
elif loc == 'eye diseases':
buttons = [Button(title='Op. Dr. Samim Özdeş', payload='/Dr1')]
elif loc == 'hematology policlinic':
buttons = [Button(title='Prof. Dr. Oral Nevruz', payload='/Dr1')]
elif loc == 'internal diseases':
buttons = [Button(title='Doç. Dr. Beril Akman', payload='/Dr1'),
Button(title='Uzm. Dr. Sercan Cansaran', payload='/Dr2'),
Button(title='Uzm. Dr. Sevgi Karabuğa', payload='/Dr3'),
Button(title='Yar. Doç. Dr. Gökhan Celbek', payload='/Dr4')]
elif loc == 'gynecology and Obstetrics':
buttons = [Button(title='Yar. Doç. Dr. Müberra Namlı Kalem',
payload='/Dr1'), Button(title='Yar. Doç. Dr. Coşkun Şimşir',
payload='/Dr2'), Button(title='Prof. Dr. Ali Ergün',
payload='/Dr3'), Button(title='Doç. Dr. Korhan Kahraman',
payload='/Dr4'), Button(title='Doç. Dr. Turgut Var',
payload='/Dr5'), Button(title=
'Doç. Dr. Türkan Örnek Gülpınar', payload='/Dr6'), Button(
title='Op. Dr. Aslı Yücetürk', payload='/Dr7'), Button(
title='Op. Dr. Ebru Yüce', payload='/Dr8'), Button(title=
'Prof. Dr. Timur Gürgan', payload='/Dr9')]
elif loc == 'cardiac surgery':
buttons = [Button(title='Prof. Dr. Erol Şener', payload='/Dr1'),
Button(title='Yar. Doç. Dr. Emre Boysan', payload='/Dr2'),
Button(title='Yar. Doç. Renda Cırcı', payload='/Dr3')]
elif loc == 'cardiology':
buttons = [Button(title='Prof. Dr. Erdoğan İlkay', payload=
'/Dr1'), Button(title='Doç. Dr. Alper Canbay', payload=
'/Dr2'), Button(title='Uzm. Dr. Çiğdem Koca Tarı', payload=
'/Dr3'), Button(title='Uzm. Dr. Erol Kalender', payload='/Dr4')
]
elif loc == 'ENT diseases':
buttons = [Button(title='Prof. Dr. Ali Altuntaş', payload=
'/Dr1'), Button(title='Prof. Dr. Serdar Karahatay', payload
='/Dr2'), Button(title='Yar. Doç Dr. Canset Aydın', payload
='/Dr3')]
elif loc == 'nephrology':
buttons = [Button(title='Doç. Dr. Beril Akman', payload='/Dr1')]
elif loc == 'neurology':
buttons = [Button(title='Prof. Dr. Mehmet Zülküf Önal', payload
='/Dr1'), Button(title='Yar. Doç. Dr. Akçay Övünç Ozon',
payload='/Dr2')]
elif loc == 'orthopedics and traumatology':
buttons = [Button(title='Yar. Doç. Dr. Uğur Gönç', payload=
'/Dr1'), Button(title='Op. Dr. Mesut Atabek', payload=
'/Dr2'), Button(title='Prof. Dr. levent Çelebi', payload=
'/Dr3')]
elif loc == 'plastic surgery':
buttons = [Button(title='Op. Dr. Ergin Işık', payload='/Dr1'),
Button(title='Op. Dr. Serdar Düzgün', payload='/Dr2')]
elif loc == 'psychiatry':
buttons = [Button(title='Prof. Dr. Ali Bozkurt', payload='/Dr1')]
elif loc == 'psychologist':
buttons = [Button(title='Psk. Ezgi Kılınç', payload='/Dr1')]
elif loc == 'rheumatology':
buttons = [Button(title='Doç. Dr. Orhan Küçükşahin', payload=
'/Dr1')]
elif loc == 'medical oncology':
buttons = [Button(title='Prof. Dr. Fikret Arpacı', payload=
'/Dr1'), Button(title='Doç. Dr. Gökhan Erdem', payload='/Dr2')]
elif loc == 'urology':
response = 'Müsait doktor bulunmamaktadır...'
response = ''
dispatcher.utter_button_message('my message', buttons)
return [SlotSet('doctor', response)]
<|reserved_special_token_1|>
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from rasa_core.actions.action import Action
from rasa_core.events import SlotSet
from rasa_core.dispatcher import Button, Element, Dispatcher
import json
import pickle
class ActionWeather(Action):
def name(self):
return 'action_doctor'
def run(self, dispatcher, tracker, domain):
loc = tracker.get_slot('department')
#response = tracker.current_slot_values()
# response = '#' + json.dumps(aaa) + '#'
if loc == 'algology':
#response = "Prof. Dr. Öznur Öken"
buttons = [
Button(title="Prof. Dr. Öznur Öken", payload="/Dr1")
]
elif loc == 'brain and neurosurgery':
#response = "1- Doç. Dr. Gülşah Bademci\n2- Doç. Dr. Suat CANBAY"
buttons = [
Button(title="Doç. Dr. Gülşah Bademci", payload="/btn1"),
Button(title="Doç. Dr. Suat CANBAY", payload="/btn2")
]
elif loc == 'child hematology':
#response = "Prof. Dr. Hatice Emel Özyürek"
buttons = [
Button(title="Prof. Dr. Hatice Emel Özyürek", payload="/btn1")
]
elif loc == 'child nephrology':
#response = "Prof. Dr. Süleyman Kalman"
buttons = [
Button(title="Prof. Dr. Süleyman Kalman", payload="/btn1")
]
elif loc == 'child health and illness':
#response = "1- Prof. Dr. Musa Kazım Çağlar\n2- Prof. Dr. Süleyman Kalman\n3- Prof. Dr. Hatice Emel Özyürek\n4- Yar. Doç. Dr. Pakize Elif Alkış\n5- Uzm. Dr. Mustafa Yücel Kızıltan\n6- Uzm. Dr. Gökalp Başbozkurt\n7- Uzm. Dr. Hafsa Uçur\n8- Uzm. Dr. Hüsniye Altan\n 9- Uzm. Dr. Sarkhan Elbayıyev\n 10- Uzm. Dr. Shahın Guliyev"
buttons = [
Button(title="Prof. Dr. Musa Kazım Çağlar", payload="/btn1"),
Button(title="Prof. Dr. Süleyman Kalman", payload="/btn2"),
Button(title="Prof. Dr. Hatice Emel Özyürek", payload="/btn3"),
Button(title="Yar. Doç. Dr. Pakize Elif Alkışn", payload="/btn4"),
Button(title="Uzm. Dr. Mustafa Yücel Kızıltan", payload="/btn5"),
Button(title="Uzm. Dr. Gökalp Başbozkurt", payload="/btn6"),
Button(title="Uzm. Dr. Hafsa Uçur", payload="/btn7"),
Button(title="Uzm. Dr. Hüsniye Altan", payload="/btn8"),
Button(title="Uzm. Dr. Sarkhan Elbayıyev", payload="/btn9"),
Button(title="Uzm. Dr. Shahın Guliyev", payload="/btn10")
]
elif loc == 'dermatology':
#response = "1- Uzm. Dr. Aylin Gözübüyükoğulları\n2- Uzm. Dr. Yeşim Akpınar Kara"
buttons = [
Button(title="Uzm. Dr. Aylin Gözübüyükoğulları", payload="/Dr1"),
Button(title="Uzm. Dr. Yeşim Akpınar Kara", payload="/Dr2")
]
elif loc == 'diet policlinic':
#response = "1- Uzm. Dyt. Gaye Başkurt\n2- Dyt. Deniz Özdemir\n3- Dyt. Halime Besler"
buttons = [
Button(title="Uzm. Dyt. Gaye Başkurt", payload="/Dr1"),
Button(title="Dyt. Deniz Özdemir", payload="/Dr2"),
Button(title="Dyt. Halime Besler", payload="/Dr3")
]
elif loc == 'endocrinology':
#response = "Prof. Dr. Serdar Güler"
buttons = [
Button(title="Prof. Dr. Serdar Güler", payload="/Dr1")
]
elif loc == 'infectious diseases':
#response = "Uzm. Dr. Mine Işık Arıgün"
buttons = [
Button(title="Uzm. Dr. Mine Işık Arıgün", payload="/Dr1")
]
elif loc == 'physical therapy and rehabilitation':
#response = "1- Prof. Dr. Öznur Öken\n2- Uzm. Dr. Beril Özturan"
buttons = [
Button(title="Prof. Dr. Öznur Öken", payload="/Dr1"),
Button(title="Uzm. Dr. Beril Özturan", payload="/Dr2")
]
elif loc == 'gastroenterology':
#response = "1- Doç. Dr. Reskan Altun\n2- Doç. Dr. Yasemin Özderin Özin"
buttons = [
Button(title="Doç. Dr. Reskan Altun", payload="/Dr1"),
Button(title="Doç. Dr. Yasemin Özderin Özin", payload="/Dr2")
]
elif loc == 'general surgery':
#response = "1- Prof. Dr. Mehmet Mahir Özmen\n2- Yar. Doç. Dr. Cem Emir Güldoğan\n3- Yar. Doç. Dr. Emre Gündoğdu"
buttons = [
Button(title="Prof. Dr. Mehmet Mahir Özmen", payload="/Dr1"),
Button(title="Yar. Doç. Dr. Cem Emir Güldoğan", payload="/Dr2"),
Button(title="Yar. Doç. Dr. Emre Gündoğdu", payload="/Dr3")
]
elif loc == 'chest diseases':
#response = "Prof. Dr. Uğur Gönüllü"
buttons = [
Button(title="Prof. Dr. Uğur Gönüllü", payload="/Dr1")
]
elif loc == 'eye diseases':
#response = "Op. Dr. Samim Özdeş"
buttons = [
Button(title="Op. Dr. Samim Özdeş", payload="/Dr1")
]
elif loc == 'hematology policlinic':
#response = "Prof. Dr. Oral Nevruz"
buttons = [
Button(title="Prof. Dr. Oral Nevruz", payload="/Dr1")
]
elif loc == 'internal diseases':
#response = "1- Doç. Dr. Beril Akman\n2- Uzm. Dr. Sercan Cansaran\n3- Uzm. Dr. Sevgi Karabuğa\n4- Yar. Doç. Dr. Gökhan Celbek"
buttons = [
Button(title="Doç. Dr. Beril Akman", payload="/Dr1"),
Button(title="Uzm. Dr. Sercan Cansaran", payload="/Dr2"),
Button(title="Uzm. Dr. Sevgi Karabuğa", payload="/Dr3"),
Button(title="Yar. Doç. Dr. Gökhan Celbek", payload="/Dr4")
]
elif loc == 'gynecology and Obstetrics':
#response = "1- Yar. Doç. Dr. Müberra Namlı Kalem\n2- Yar. Doç. Dr. Coşkun Şimşir\n3- Prof. Dr. Ali Ergün\n4- Doç. Dr. Korhan Kahraman\n5- Doç. Dr. Turgut Var\n6- Doç. Dr. Türkan Örnek Gülpınar\n7- Op. Dr. Aslı Yücetürk\n8- Op. Dr. Ebru Yüce\n9- Prof. Dr. Timur Gürgan"
buttons = [
Button(title="Yar. Doç. Dr. Müberra Namlı Kalem", payload="/Dr1"),
Button(title="Yar. Doç. Dr. Coşkun Şimşir", payload="/Dr2"),
Button(title="Prof. Dr. Ali Ergün", payload="/Dr3"),
Button(title="Doç. Dr. Korhan Kahraman", payload="/Dr4"),
Button(title="Doç. Dr. Turgut Var", payload="/Dr5"),
Button(title="Doç. Dr. Türkan Örnek Gülpınar", payload="/Dr6"),
Button(title="Op. Dr. Aslı Yücetürk", payload="/Dr7"),
Button(title="Op. Dr. Ebru Yüce", payload="/Dr8"),
Button(title="Prof. Dr. Timur Gürgan", payload="/Dr9")
]
elif loc == 'cardiac surgery':
#response = "1- Prof. Dr. Erol Şener\n2- Yar. Doç. Dr. Emre Boysan\n2- Yar. Doç. Renda Cırcı"
buttons = [
Button(title="Prof. Dr. Erol Şener", payload="/Dr1"),
Button(title="Yar. Doç. Dr. Emre Boysan", payload="/Dr2"),
Button(title="Yar. Doç. Renda Cırcı", payload="/Dr3")
]
elif loc == 'cardiology':
#response = "1- Prof. Dr. Erdoğan İlkay\n2- Doç. Dr. Alper Canbay\n3- Uzm. Dr. Çiğdem Koca Tarı\n4- Uzm. Dr. Erol Kalender"
buttons = [
Button(title="Prof. Dr. Erdoğan İlkay", payload="/Dr1"),
Button(title="Doç. Dr. Alper Canbay", payload="/Dr2"),
Button(title="Uzm. Dr. Çiğdem Koca Tarı", payload="/Dr3"),
Button(title="Uzm. Dr. Erol Kalender", payload="/Dr4")
]
elif loc == 'ENT diseases':
#response = "1- Prof. Dr. Ali Altuntaş\n2- Prof. Dr. Serdar Karahatay\n3- Yar. Doç Dr. Canset Aydın"
buttons = [
Button(title="Prof. Dr. Ali Altuntaş", payload="/Dr1"),
Button(title="Prof. Dr. Serdar Karahatay", payload="/Dr2"),
Button(title="Yar. Doç Dr. Canset Aydın", payload="/Dr3")
]
elif loc == 'nephrology':
#response = "Doç. Dr. Beril Akman"
buttons = [
Button(title="Doç. Dr. Beril Akman", payload="/Dr1")
]
elif loc == 'neurology':
#response = "1- Prof. Dr. Mehmet Zülküf Önal\n2- Yar. Doç. Dr. Akçay Övünç Ozon"
buttons = [
Button(title="Prof. Dr. Mehmet Zülküf Önal", payload="/Dr1"),
Button(title="Yar. Doç. Dr. Akçay Övünç Ozon", payload="/Dr2")
]
elif loc == 'orthopedics and traumatology':
#response = "1- Yar. Doç. Dr. Uğur Gönç\n2- Op. Dr. Mesut Atabek\n3- Prof. Dr. levent Çelebi"
buttons = [
Button(title="Yar. Doç. Dr. Uğur Gönç", payload="/Dr1"),
Button(title="Op. Dr. Mesut Atabek", payload="/Dr2"),
Button(title="Prof. Dr. levent Çelebi", payload="/Dr3")
]
elif loc == 'plastic surgery':
#response = "1- Op. Dr. Ergin Işık\n2- Op. Dr. Serdar Düzgün"
buttons = [
Button(title="Op. Dr. Ergin Işık", payload="/Dr1"),
Button(title="Op. Dr. Serdar Düzgün", payload="/Dr2")
]
elif loc == 'psychiatry':
#response = "Prof. Dr. Ali Bozkurt"
buttons = [
Button(title="Prof. Dr. Ali Bozkurt", payload="/Dr1")
]
elif loc == 'psychologist':
#response = "Psk. Ezgi Kılınç"
buttons = [
Button(title="Psk. Ezgi Kılınç", payload="/Dr1")
]
elif loc == 'rheumatology':
#response = "Doç. Dr. Orhan Küçükşahin"
buttons = [
Button(title="Doç. Dr. Orhan Küçükşahin", payload="/Dr1")
]
elif loc == 'medical oncology':
#response = ["Prof. Dr. Fikret Arpacı", "Doç. Dr. Gökhan Erdem"]
buttons = [
Button(title="Prof. Dr. Fikret Arpacı", payload="/Dr1"),
Button(title="Doç. Dr. Gökhan Erdem", payload="/Dr2")
]
elif loc == 'urology':
response = "Müsait doktor bulunmamaktadır..."
#response = "abc\n\nasd"
response=""
# buttons = [
# Button(title="Btn1", payload="/btn1"),
# Button(title="Btn2", payload="/btn2")
# ]
dispatcher.utter_button_message("my message", buttons)
return [SlotSet('doctor', response)]
|
flexible
|
{
"blob_id": "f87d08f3bb6faa237cce8379de3aaaa3270a4a34",
"index": 3854,
"step-1": "<mask token>\n\n\nclass ActionWeather(Action):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ActionWeather(Action):\n <mask token>\n\n def run(self, dispatcher, tracker, domain):\n loc = tracker.get_slot('department')\n if loc == 'algology':\n buttons = [Button(title='Prof. Dr. Öznur Öken', payload='/Dr1')]\n elif loc == 'brain and neurosurgery':\n buttons = [Button(title='Doç. Dr. Gülşah Bademci', payload=\n '/btn1'), Button(title='Doç. Dr. Suat CANBAY', payload='/btn2')\n ]\n elif loc == 'child hematology':\n buttons = [Button(title='Prof. Dr. Hatice Emel Özyürek',\n payload='/btn1')]\n elif loc == 'child nephrology':\n buttons = [Button(title='Prof. Dr. Süleyman Kalman', payload=\n '/btn1')]\n elif loc == 'child health and illness':\n buttons = [Button(title='Prof. Dr. Musa Kazım Çağlar', payload=\n '/btn1'), Button(title='Prof. Dr. Süleyman Kalman', payload\n ='/btn2'), Button(title='Prof. Dr. Hatice Emel Özyürek',\n payload='/btn3'), Button(title=\n 'Yar. Doç. Dr. Pakize Elif Alkışn', payload='/btn4'),\n Button(title='Uzm. Dr. Mustafa Yücel Kızıltan', payload=\n '/btn5'), Button(title='Uzm. Dr. Gökalp Başbozkurt',\n payload='/btn6'), Button(title='Uzm. Dr. Hafsa Uçur',\n payload='/btn7'), Button(title='Uzm. Dr. Hüsniye Altan',\n payload='/btn8'), Button(title='Uzm. Dr. Sarkhan Elbayıyev',\n payload='/btn9'), Button(title='Uzm. Dr. Shahın Guliyev',\n payload='/btn10')]\n elif loc == 'dermatology':\n buttons = [Button(title='Uzm. Dr. Aylin Gözübüyükoğulları',\n payload='/Dr1'), Button(title='Uzm. Dr. Yeşim Akpınar Kara',\n payload='/Dr2')]\n elif loc == 'diet policlinic':\n buttons = [Button(title='Uzm. Dyt. Gaye Başkurt', payload=\n '/Dr1'), Button(title='Dyt. Deniz Özdemir', payload='/Dr2'),\n Button(title='Dyt. Halime Besler', payload='/Dr3')]\n elif loc == 'endocrinology':\n buttons = [Button(title='Prof. Dr. Serdar Güler', payload='/Dr1')]\n elif loc == 'infectious diseases':\n buttons = [Button(title='Uzm. Dr. Mine Işık Arıgün', payload=\n '/Dr1')]\n elif loc == 'physical therapy and rehabilitation':\n buttons = [Button(title='Prof. Dr. Öznur Öken', payload='/Dr1'),\n Button(title='Uzm. Dr. Beril Özturan', payload='/Dr2')]\n elif loc == 'gastroenterology':\n buttons = [Button(title='Doç. Dr. Reskan Altun', payload='/Dr1'\n ), Button(title='Doç. Dr. Yasemin Özderin Özin', payload=\n '/Dr2')]\n elif loc == 'general surgery':\n buttons = [Button(title='Prof. Dr. Mehmet Mahir Özmen', payload\n ='/Dr1'), Button(title='Yar. Doç. Dr. Cem Emir Güldoğan',\n payload='/Dr2'), Button(title='Yar. Doç. Dr. Emre Gündoğdu',\n payload='/Dr3')]\n elif loc == 'chest diseases':\n buttons = [Button(title='Prof. Dr. Uğur Gönüllü', payload='/Dr1')]\n elif loc == 'eye diseases':\n buttons = [Button(title='Op. Dr. Samim Özdeş', payload='/Dr1')]\n elif loc == 'hematology policlinic':\n buttons = [Button(title='Prof. Dr. Oral Nevruz', payload='/Dr1')]\n elif loc == 'internal diseases':\n buttons = [Button(title='Doç. Dr. Beril Akman', payload='/Dr1'),\n Button(title='Uzm. Dr. Sercan Cansaran', payload='/Dr2'),\n Button(title='Uzm. Dr. Sevgi Karabuğa', payload='/Dr3'),\n Button(title='Yar. Doç. Dr. Gökhan Celbek', payload='/Dr4')]\n elif loc == 'gynecology and Obstetrics':\n buttons = [Button(title='Yar. Doç. Dr. Müberra Namlı Kalem',\n payload='/Dr1'), Button(title='Yar. Doç. Dr. Coşkun Şimşir',\n payload='/Dr2'), Button(title='Prof. Dr. Ali Ergün',\n payload='/Dr3'), Button(title='Doç. Dr. Korhan Kahraman',\n payload='/Dr4'), Button(title='Doç. Dr. Turgut Var',\n payload='/Dr5'), Button(title=\n 'Doç. Dr. Türkan Örnek Gülpınar', payload='/Dr6'), Button(\n title='Op. Dr. Aslı Yücetürk', payload='/Dr7'), Button(\n title='Op. Dr. Ebru Yüce', payload='/Dr8'), Button(title=\n 'Prof. Dr. Timur Gürgan', payload='/Dr9')]\n elif loc == 'cardiac surgery':\n buttons = [Button(title='Prof. Dr. Erol Şener', payload='/Dr1'),\n Button(title='Yar. Doç. Dr. Emre Boysan', payload='/Dr2'),\n Button(title='Yar. Doç. Renda Cırcı', payload='/Dr3')]\n elif loc == 'cardiology':\n buttons = [Button(title='Prof. Dr. Erdoğan İlkay', payload=\n '/Dr1'), Button(title='Doç. Dr. Alper Canbay', payload=\n '/Dr2'), Button(title='Uzm. Dr. Çiğdem Koca Tarı', payload=\n '/Dr3'), Button(title='Uzm. Dr. Erol Kalender', payload='/Dr4')\n ]\n elif loc == 'ENT diseases':\n buttons = [Button(title='Prof. Dr. Ali Altuntaş', payload=\n '/Dr1'), Button(title='Prof. Dr. Serdar Karahatay', payload\n ='/Dr2'), Button(title='Yar. Doç Dr. Canset Aydın', payload\n ='/Dr3')]\n elif loc == 'nephrology':\n buttons = [Button(title='Doç. Dr. Beril Akman', payload='/Dr1')]\n elif loc == 'neurology':\n buttons = [Button(title='Prof. Dr. Mehmet Zülküf Önal', payload\n ='/Dr1'), Button(title='Yar. Doç. Dr. Akçay Övünç Ozon',\n payload='/Dr2')]\n elif loc == 'orthopedics and traumatology':\n buttons = [Button(title='Yar. Doç. Dr. Uğur Gönç', payload=\n '/Dr1'), Button(title='Op. Dr. Mesut Atabek', payload=\n '/Dr2'), Button(title='Prof. Dr. levent Çelebi', payload=\n '/Dr3')]\n elif loc == 'plastic surgery':\n buttons = [Button(title='Op. Dr. Ergin Işık', payload='/Dr1'),\n Button(title='Op. Dr. Serdar Düzgün', payload='/Dr2')]\n elif loc == 'psychiatry':\n buttons = [Button(title='Prof. Dr. Ali Bozkurt', payload='/Dr1')]\n elif loc == 'psychologist':\n buttons = [Button(title='Psk. Ezgi Kılınç', payload='/Dr1')]\n elif loc == 'rheumatology':\n buttons = [Button(title='Doç. Dr. Orhan Küçükşahin', payload=\n '/Dr1')]\n elif loc == 'medical oncology':\n buttons = [Button(title='Prof. Dr. Fikret Arpacı', payload=\n '/Dr1'), Button(title='Doç. Dr. Gökhan Erdem', payload='/Dr2')]\n elif loc == 'urology':\n response = 'Müsait doktor bulunmamaktadır...'\n response = ''\n dispatcher.utter_button_message('my message', buttons)\n return [SlotSet('doctor', response)]\n",
"step-3": "<mask token>\n\n\nclass ActionWeather(Action):\n\n def name(self):\n return 'action_doctor'\n\n def run(self, dispatcher, tracker, domain):\n loc = tracker.get_slot('department')\n if loc == 'algology':\n buttons = [Button(title='Prof. Dr. Öznur Öken', payload='/Dr1')]\n elif loc == 'brain and neurosurgery':\n buttons = [Button(title='Doç. Dr. Gülşah Bademci', payload=\n '/btn1'), Button(title='Doç. Dr. Suat CANBAY', payload='/btn2')\n ]\n elif loc == 'child hematology':\n buttons = [Button(title='Prof. Dr. Hatice Emel Özyürek',\n payload='/btn1')]\n elif loc == 'child nephrology':\n buttons = [Button(title='Prof. Dr. Süleyman Kalman', payload=\n '/btn1')]\n elif loc == 'child health and illness':\n buttons = [Button(title='Prof. Dr. Musa Kazım Çağlar', payload=\n '/btn1'), Button(title='Prof. Dr. Süleyman Kalman', payload\n ='/btn2'), Button(title='Prof. Dr. Hatice Emel Özyürek',\n payload='/btn3'), Button(title=\n 'Yar. Doç. Dr. Pakize Elif Alkışn', payload='/btn4'),\n Button(title='Uzm. Dr. Mustafa Yücel Kızıltan', payload=\n '/btn5'), Button(title='Uzm. Dr. Gökalp Başbozkurt',\n payload='/btn6'), Button(title='Uzm. Dr. Hafsa Uçur',\n payload='/btn7'), Button(title='Uzm. Dr. Hüsniye Altan',\n payload='/btn8'), Button(title='Uzm. Dr. Sarkhan Elbayıyev',\n payload='/btn9'), Button(title='Uzm. Dr. Shahın Guliyev',\n payload='/btn10')]\n elif loc == 'dermatology':\n buttons = [Button(title='Uzm. Dr. Aylin Gözübüyükoğulları',\n payload='/Dr1'), Button(title='Uzm. Dr. Yeşim Akpınar Kara',\n payload='/Dr2')]\n elif loc == 'diet policlinic':\n buttons = [Button(title='Uzm. Dyt. Gaye Başkurt', payload=\n '/Dr1'), Button(title='Dyt. Deniz Özdemir', payload='/Dr2'),\n Button(title='Dyt. Halime Besler', payload='/Dr3')]\n elif loc == 'endocrinology':\n buttons = [Button(title='Prof. Dr. Serdar Güler', payload='/Dr1')]\n elif loc == 'infectious diseases':\n buttons = [Button(title='Uzm. Dr. Mine Işık Arıgün', payload=\n '/Dr1')]\n elif loc == 'physical therapy and rehabilitation':\n buttons = [Button(title='Prof. Dr. Öznur Öken', payload='/Dr1'),\n Button(title='Uzm. Dr. Beril Özturan', payload='/Dr2')]\n elif loc == 'gastroenterology':\n buttons = [Button(title='Doç. Dr. Reskan Altun', payload='/Dr1'\n ), Button(title='Doç. Dr. Yasemin Özderin Özin', payload=\n '/Dr2')]\n elif loc == 'general surgery':\n buttons = [Button(title='Prof. Dr. Mehmet Mahir Özmen', payload\n ='/Dr1'), Button(title='Yar. Doç. Dr. Cem Emir Güldoğan',\n payload='/Dr2'), Button(title='Yar. Doç. Dr. Emre Gündoğdu',\n payload='/Dr3')]\n elif loc == 'chest diseases':\n buttons = [Button(title='Prof. Dr. Uğur Gönüllü', payload='/Dr1')]\n elif loc == 'eye diseases':\n buttons = [Button(title='Op. Dr. Samim Özdeş', payload='/Dr1')]\n elif loc == 'hematology policlinic':\n buttons = [Button(title='Prof. Dr. Oral Nevruz', payload='/Dr1')]\n elif loc == 'internal diseases':\n buttons = [Button(title='Doç. Dr. Beril Akman', payload='/Dr1'),\n Button(title='Uzm. Dr. Sercan Cansaran', payload='/Dr2'),\n Button(title='Uzm. Dr. Sevgi Karabuğa', payload='/Dr3'),\n Button(title='Yar. Doç. Dr. Gökhan Celbek', payload='/Dr4')]\n elif loc == 'gynecology and Obstetrics':\n buttons = [Button(title='Yar. Doç. Dr. Müberra Namlı Kalem',\n payload='/Dr1'), Button(title='Yar. Doç. Dr. Coşkun Şimşir',\n payload='/Dr2'), Button(title='Prof. Dr. Ali Ergün',\n payload='/Dr3'), Button(title='Doç. Dr. Korhan Kahraman',\n payload='/Dr4'), Button(title='Doç. Dr. Turgut Var',\n payload='/Dr5'), Button(title=\n 'Doç. Dr. Türkan Örnek Gülpınar', payload='/Dr6'), Button(\n title='Op. Dr. Aslı Yücetürk', payload='/Dr7'), Button(\n title='Op. Dr. Ebru Yüce', payload='/Dr8'), Button(title=\n 'Prof. Dr. Timur Gürgan', payload='/Dr9')]\n elif loc == 'cardiac surgery':\n buttons = [Button(title='Prof. Dr. Erol Şener', payload='/Dr1'),\n Button(title='Yar. Doç. Dr. Emre Boysan', payload='/Dr2'),\n Button(title='Yar. Doç. Renda Cırcı', payload='/Dr3')]\n elif loc == 'cardiology':\n buttons = [Button(title='Prof. Dr. Erdoğan İlkay', payload=\n '/Dr1'), Button(title='Doç. Dr. Alper Canbay', payload=\n '/Dr2'), Button(title='Uzm. Dr. Çiğdem Koca Tarı', payload=\n '/Dr3'), Button(title='Uzm. Dr. Erol Kalender', payload='/Dr4')\n ]\n elif loc == 'ENT diseases':\n buttons = [Button(title='Prof. Dr. Ali Altuntaş', payload=\n '/Dr1'), Button(title='Prof. Dr. Serdar Karahatay', payload\n ='/Dr2'), Button(title='Yar. Doç Dr. Canset Aydın', payload\n ='/Dr3')]\n elif loc == 'nephrology':\n buttons = [Button(title='Doç. Dr. Beril Akman', payload='/Dr1')]\n elif loc == 'neurology':\n buttons = [Button(title='Prof. Dr. Mehmet Zülküf Önal', payload\n ='/Dr1'), Button(title='Yar. Doç. Dr. Akçay Övünç Ozon',\n payload='/Dr2')]\n elif loc == 'orthopedics and traumatology':\n buttons = [Button(title='Yar. Doç. Dr. Uğur Gönç', payload=\n '/Dr1'), Button(title='Op. Dr. Mesut Atabek', payload=\n '/Dr2'), Button(title='Prof. Dr. levent Çelebi', payload=\n '/Dr3')]\n elif loc == 'plastic surgery':\n buttons = [Button(title='Op. Dr. Ergin Işık', payload='/Dr1'),\n Button(title='Op. Dr. Serdar Düzgün', payload='/Dr2')]\n elif loc == 'psychiatry':\n buttons = [Button(title='Prof. Dr. Ali Bozkurt', payload='/Dr1')]\n elif loc == 'psychologist':\n buttons = [Button(title='Psk. Ezgi Kılınç', payload='/Dr1')]\n elif loc == 'rheumatology':\n buttons = [Button(title='Doç. Dr. Orhan Küçükşahin', payload=\n '/Dr1')]\n elif loc == 'medical oncology':\n buttons = [Button(title='Prof. Dr. Fikret Arpacı', payload=\n '/Dr1'), Button(title='Doç. Dr. Gökhan Erdem', payload='/Dr2')]\n elif loc == 'urology':\n response = 'Müsait doktor bulunmamaktadır...'\n response = ''\n dispatcher.utter_button_message('my message', buttons)\n return [SlotSet('doctor', response)]\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom rasa_core.actions.action import Action\nfrom rasa_core.events import SlotSet\nfrom rasa_core.dispatcher import Button, Element, Dispatcher\nimport json\nimport pickle\n\n\nclass ActionWeather(Action):\n\n def name(self):\n return 'action_doctor'\n\n def run(self, dispatcher, tracker, domain):\n loc = tracker.get_slot('department')\n if loc == 'algology':\n buttons = [Button(title='Prof. Dr. Öznur Öken', payload='/Dr1')]\n elif loc == 'brain and neurosurgery':\n buttons = [Button(title='Doç. Dr. Gülşah Bademci', payload=\n '/btn1'), Button(title='Doç. Dr. Suat CANBAY', payload='/btn2')\n ]\n elif loc == 'child hematology':\n buttons = [Button(title='Prof. Dr. Hatice Emel Özyürek',\n payload='/btn1')]\n elif loc == 'child nephrology':\n buttons = [Button(title='Prof. Dr. Süleyman Kalman', payload=\n '/btn1')]\n elif loc == 'child health and illness':\n buttons = [Button(title='Prof. Dr. Musa Kazım Çağlar', payload=\n '/btn1'), Button(title='Prof. Dr. Süleyman Kalman', payload\n ='/btn2'), Button(title='Prof. Dr. Hatice Emel Özyürek',\n payload='/btn3'), Button(title=\n 'Yar. Doç. Dr. Pakize Elif Alkışn', payload='/btn4'),\n Button(title='Uzm. Dr. Mustafa Yücel Kızıltan', payload=\n '/btn5'), Button(title='Uzm. Dr. Gökalp Başbozkurt',\n payload='/btn6'), Button(title='Uzm. Dr. Hafsa Uçur',\n payload='/btn7'), Button(title='Uzm. Dr. Hüsniye Altan',\n payload='/btn8'), Button(title='Uzm. Dr. Sarkhan Elbayıyev',\n payload='/btn9'), Button(title='Uzm. Dr. Shahın Guliyev',\n payload='/btn10')]\n elif loc == 'dermatology':\n buttons = [Button(title='Uzm. Dr. Aylin Gözübüyükoğulları',\n payload='/Dr1'), Button(title='Uzm. Dr. Yeşim Akpınar Kara',\n payload='/Dr2')]\n elif loc == 'diet policlinic':\n buttons = [Button(title='Uzm. Dyt. Gaye Başkurt', payload=\n '/Dr1'), Button(title='Dyt. Deniz Özdemir', payload='/Dr2'),\n Button(title='Dyt. Halime Besler', payload='/Dr3')]\n elif loc == 'endocrinology':\n buttons = [Button(title='Prof. Dr. Serdar Güler', payload='/Dr1')]\n elif loc == 'infectious diseases':\n buttons = [Button(title='Uzm. Dr. Mine Işık Arıgün', payload=\n '/Dr1')]\n elif loc == 'physical therapy and rehabilitation':\n buttons = [Button(title='Prof. Dr. Öznur Öken', payload='/Dr1'),\n Button(title='Uzm. Dr. Beril Özturan', payload='/Dr2')]\n elif loc == 'gastroenterology':\n buttons = [Button(title='Doç. Dr. Reskan Altun', payload='/Dr1'\n ), Button(title='Doç. Dr. Yasemin Özderin Özin', payload=\n '/Dr2')]\n elif loc == 'general surgery':\n buttons = [Button(title='Prof. Dr. Mehmet Mahir Özmen', payload\n ='/Dr1'), Button(title='Yar. Doç. Dr. Cem Emir Güldoğan',\n payload='/Dr2'), Button(title='Yar. Doç. Dr. Emre Gündoğdu',\n payload='/Dr3')]\n elif loc == 'chest diseases':\n buttons = [Button(title='Prof. Dr. Uğur Gönüllü', payload='/Dr1')]\n elif loc == 'eye diseases':\n buttons = [Button(title='Op. Dr. Samim Özdeş', payload='/Dr1')]\n elif loc == 'hematology policlinic':\n buttons = [Button(title='Prof. Dr. Oral Nevruz', payload='/Dr1')]\n elif loc == 'internal diseases':\n buttons = [Button(title='Doç. Dr. Beril Akman', payload='/Dr1'),\n Button(title='Uzm. Dr. Sercan Cansaran', payload='/Dr2'),\n Button(title='Uzm. Dr. Sevgi Karabuğa', payload='/Dr3'),\n Button(title='Yar. Doç. Dr. Gökhan Celbek', payload='/Dr4')]\n elif loc == 'gynecology and Obstetrics':\n buttons = [Button(title='Yar. Doç. Dr. Müberra Namlı Kalem',\n payload='/Dr1'), Button(title='Yar. Doç. Dr. Coşkun Şimşir',\n payload='/Dr2'), Button(title='Prof. Dr. Ali Ergün',\n payload='/Dr3'), Button(title='Doç. Dr. Korhan Kahraman',\n payload='/Dr4'), Button(title='Doç. Dr. Turgut Var',\n payload='/Dr5'), Button(title=\n 'Doç. Dr. Türkan Örnek Gülpınar', payload='/Dr6'), Button(\n title='Op. Dr. Aslı Yücetürk', payload='/Dr7'), Button(\n title='Op. Dr. Ebru Yüce', payload='/Dr8'), Button(title=\n 'Prof. Dr. Timur Gürgan', payload='/Dr9')]\n elif loc == 'cardiac surgery':\n buttons = [Button(title='Prof. Dr. Erol Şener', payload='/Dr1'),\n Button(title='Yar. Doç. Dr. Emre Boysan', payload='/Dr2'),\n Button(title='Yar. Doç. Renda Cırcı', payload='/Dr3')]\n elif loc == 'cardiology':\n buttons = [Button(title='Prof. Dr. Erdoğan İlkay', payload=\n '/Dr1'), Button(title='Doç. Dr. Alper Canbay', payload=\n '/Dr2'), Button(title='Uzm. Dr. Çiğdem Koca Tarı', payload=\n '/Dr3'), Button(title='Uzm. Dr. Erol Kalender', payload='/Dr4')\n ]\n elif loc == 'ENT diseases':\n buttons = [Button(title='Prof. Dr. Ali Altuntaş', payload=\n '/Dr1'), Button(title='Prof. Dr. Serdar Karahatay', payload\n ='/Dr2'), Button(title='Yar. Doç Dr. Canset Aydın', payload\n ='/Dr3')]\n elif loc == 'nephrology':\n buttons = [Button(title='Doç. Dr. Beril Akman', payload='/Dr1')]\n elif loc == 'neurology':\n buttons = [Button(title='Prof. Dr. Mehmet Zülküf Önal', payload\n ='/Dr1'), Button(title='Yar. Doç. Dr. Akçay Övünç Ozon',\n payload='/Dr2')]\n elif loc == 'orthopedics and traumatology':\n buttons = [Button(title='Yar. Doç. Dr. Uğur Gönç', payload=\n '/Dr1'), Button(title='Op. Dr. Mesut Atabek', payload=\n '/Dr2'), Button(title='Prof. Dr. levent Çelebi', payload=\n '/Dr3')]\n elif loc == 'plastic surgery':\n buttons = [Button(title='Op. Dr. Ergin Işık', payload='/Dr1'),\n Button(title='Op. Dr. Serdar Düzgün', payload='/Dr2')]\n elif loc == 'psychiatry':\n buttons = [Button(title='Prof. Dr. Ali Bozkurt', payload='/Dr1')]\n elif loc == 'psychologist':\n buttons = [Button(title='Psk. Ezgi Kılınç', payload='/Dr1')]\n elif loc == 'rheumatology':\n buttons = [Button(title='Doç. Dr. Orhan Küçükşahin', payload=\n '/Dr1')]\n elif loc == 'medical oncology':\n buttons = [Button(title='Prof. Dr. Fikret Arpacı', payload=\n '/Dr1'), Button(title='Doç. Dr. Gökhan Erdem', payload='/Dr2')]\n elif loc == 'urology':\n response = 'Müsait doktor bulunmamaktadır...'\n response = ''\n dispatcher.utter_button_message('my message', buttons)\n return [SlotSet('doctor', response)]\n",
"step-5": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom rasa_core.actions.action import Action\nfrom rasa_core.events import SlotSet\nfrom rasa_core.dispatcher import Button, Element, Dispatcher\nimport json\nimport pickle\n\nclass ActionWeather(Action):\n def name(self):\n return 'action_doctor'\n\n def run(self, dispatcher, tracker, domain):\n\n loc = tracker.get_slot('department')\n #response = tracker.current_slot_values()\n # response = '#' + json.dumps(aaa) + '#'\n\n if loc == 'algology':\n #response = \"Prof. Dr. Öznur Öken\"\n buttons = [\n Button(title=\"Prof. Dr. Öznur Öken\", payload=\"/Dr1\")\n ]\n\n elif loc == 'brain and neurosurgery':\n #response = \"1- Doç. Dr. Gülşah Bademci\\n2- Doç. Dr. Suat CANBAY\"\n buttons = [\n Button(title=\"Doç. Dr. Gülşah Bademci\", payload=\"/btn1\"),\n Button(title=\"Doç. Dr. Suat CANBAY\", payload=\"/btn2\")\n ]\n\n elif loc == 'child hematology':\n #response = \"Prof. Dr. Hatice Emel Özyürek\"\n buttons = [\n Button(title=\"Prof. Dr. Hatice Emel Özyürek\", payload=\"/btn1\")\n ]\n\n elif loc == 'child nephrology':\n #response = \"Prof. Dr. Süleyman Kalman\"\n buttons = [\n Button(title=\"Prof. Dr. Süleyman Kalman\", payload=\"/btn1\")\n ]\n\n elif loc == 'child health and illness':\n #response = \"1- Prof. Dr. Musa Kazım Çağlar\\n2- Prof. Dr. Süleyman Kalman\\n3- Prof. Dr. Hatice Emel Özyürek\\n4- Yar. Doç. Dr. Pakize Elif Alkış\\n5- Uzm. Dr. Mustafa Yücel Kızıltan\\n6- Uzm. Dr. Gökalp Başbozkurt\\n7- Uzm. Dr. Hafsa Uçur\\n8- Uzm. Dr. Hüsniye Altan\\n 9- Uzm. Dr. Sarkhan Elbayıyev\\n 10- Uzm. Dr. Shahın Guliyev\"\n buttons = [\n Button(title=\"Prof. Dr. Musa Kazım Çağlar\", payload=\"/btn1\"),\n Button(title=\"Prof. Dr. Süleyman Kalman\", payload=\"/btn2\"),\n Button(title=\"Prof. Dr. Hatice Emel Özyürek\", payload=\"/btn3\"),\n Button(title=\"Yar. Doç. Dr. Pakize Elif Alkışn\", payload=\"/btn4\"),\n Button(title=\"Uzm. Dr. Mustafa Yücel Kızıltan\", payload=\"/btn5\"),\n Button(title=\"Uzm. Dr. Gökalp Başbozkurt\", payload=\"/btn6\"),\n Button(title=\"Uzm. Dr. Hafsa Uçur\", payload=\"/btn7\"),\n Button(title=\"Uzm. Dr. Hüsniye Altan\", payload=\"/btn8\"),\n Button(title=\"Uzm. Dr. Sarkhan Elbayıyev\", payload=\"/btn9\"),\n Button(title=\"Uzm. Dr. Shahın Guliyev\", payload=\"/btn10\")\n ]\n elif loc == 'dermatology':\n #response = \"1- Uzm. Dr. Aylin Gözübüyükoğulları\\n2- Uzm. Dr. Yeşim Akpınar Kara\"\n buttons = [\n Button(title=\"Uzm. Dr. Aylin Gözübüyükoğulları\", payload=\"/Dr1\"),\n Button(title=\"Uzm. Dr. Yeşim Akpınar Kara\", payload=\"/Dr2\")\n ]\n elif loc == 'diet policlinic':\n #response = \"1- Uzm. Dyt. Gaye Başkurt\\n2- Dyt. Deniz Özdemir\\n3- Dyt. Halime Besler\"\n buttons = [\n Button(title=\"Uzm. Dyt. Gaye Başkurt\", payload=\"/Dr1\"),\n Button(title=\"Dyt. Deniz Özdemir\", payload=\"/Dr2\"),\n Button(title=\"Dyt. Halime Besler\", payload=\"/Dr3\")\n ]\n\n elif loc == 'endocrinology':\n #response = \"Prof. Dr. Serdar Güler\"\n buttons = [\n Button(title=\"Prof. Dr. Serdar Güler\", payload=\"/Dr1\")\n ]\n\n elif loc == 'infectious diseases':\n #response = \"Uzm. Dr. Mine Işık Arıgün\"\n buttons = [\n Button(title=\"Uzm. Dr. Mine Işık Arıgün\", payload=\"/Dr1\")\n ]\n\n elif loc == 'physical therapy and rehabilitation':\n #response = \"1- Prof. Dr. Öznur Öken\\n2- Uzm. Dr. Beril Özturan\"\n buttons = [\n Button(title=\"Prof. Dr. Öznur Öken\", payload=\"/Dr1\"),\n Button(title=\"Uzm. Dr. Beril Özturan\", payload=\"/Dr2\")\n ]\n\n elif loc == 'gastroenterology':\n #response = \"1- Doç. Dr. Reskan Altun\\n2- Doç. Dr. Yasemin Özderin Özin\"\n buttons = [\n Button(title=\"Doç. Dr. Reskan Altun\", payload=\"/Dr1\"),\n Button(title=\"Doç. Dr. Yasemin Özderin Özin\", payload=\"/Dr2\")\n ]\n\n elif loc == 'general surgery':\n #response = \"1- Prof. Dr. Mehmet Mahir Özmen\\n2- Yar. Doç. Dr. Cem Emir Güldoğan\\n3- Yar. Doç. Dr. Emre Gündoğdu\"\n buttons = [\n Button(title=\"Prof. Dr. Mehmet Mahir Özmen\", payload=\"/Dr1\"),\n Button(title=\"Yar. Doç. Dr. Cem Emir Güldoğan\", payload=\"/Dr2\"),\n Button(title=\"Yar. Doç. Dr. Emre Gündoğdu\", payload=\"/Dr3\")\n ]\n\n elif loc == 'chest diseases':\n #response = \"Prof. Dr. Uğur Gönüllü\"\n buttons = [\n Button(title=\"Prof. Dr. Uğur Gönüllü\", payload=\"/Dr1\")\n ]\n\n\n elif loc == 'eye diseases':\n #response = \"Op. Dr. Samim Özdeş\"\n buttons = [\n Button(title=\"Op. Dr. Samim Özdeş\", payload=\"/Dr1\")\n ]\n\n elif loc == 'hematology policlinic':\n #response = \"Prof. Dr. Oral Nevruz\"\n buttons = [\n Button(title=\"Prof. Dr. Oral Nevruz\", payload=\"/Dr1\")\n ]\n\n elif loc == 'internal diseases':\n #response = \"1- Doç. Dr. Beril Akman\\n2- Uzm. Dr. Sercan Cansaran\\n3- Uzm. Dr. Sevgi Karabuğa\\n4- Yar. Doç. Dr. Gökhan Celbek\"\n buttons = [\n Button(title=\"Doç. Dr. Beril Akman\", payload=\"/Dr1\"),\n Button(title=\"Uzm. Dr. Sercan Cansaran\", payload=\"/Dr2\"),\n Button(title=\"Uzm. Dr. Sevgi Karabuğa\", payload=\"/Dr3\"),\n Button(title=\"Yar. Doç. Dr. Gökhan Celbek\", payload=\"/Dr4\")\n ]\n\n elif loc == 'gynecology and Obstetrics':\n #response = \"1- Yar. Doç. Dr. Müberra Namlı Kalem\\n2- Yar. Doç. Dr. Coşkun Şimşir\\n3- Prof. Dr. Ali Ergün\\n4- Doç. Dr. Korhan Kahraman\\n5- Doç. Dr. Turgut Var\\n6- Doç. Dr. Türkan Örnek Gülpınar\\n7- Op. Dr. Aslı Yücetürk\\n8- Op. Dr. Ebru Yüce\\n9- Prof. Dr. Timur Gürgan\"\n buttons = [\n Button(title=\"Yar. Doç. Dr. Müberra Namlı Kalem\", payload=\"/Dr1\"),\n Button(title=\"Yar. Doç. Dr. Coşkun Şimşir\", payload=\"/Dr2\"),\n Button(title=\"Prof. Dr. Ali Ergün\", payload=\"/Dr3\"),\n Button(title=\"Doç. Dr. Korhan Kahraman\", payload=\"/Dr4\"),\n Button(title=\"Doç. Dr. Turgut Var\", payload=\"/Dr5\"),\n Button(title=\"Doç. Dr. Türkan Örnek Gülpınar\", payload=\"/Dr6\"),\n Button(title=\"Op. Dr. Aslı Yücetürk\", payload=\"/Dr7\"),\n Button(title=\"Op. Dr. Ebru Yüce\", payload=\"/Dr8\"),\n Button(title=\"Prof. Dr. Timur Gürgan\", payload=\"/Dr9\")\n ]\n\n elif loc == 'cardiac surgery':\n #response = \"1- Prof. Dr. Erol Şener\\n2- Yar. Doç. Dr. Emre Boysan\\n2- Yar. Doç. Renda Cırcı\"\n buttons = [\n Button(title=\"Prof. Dr. Erol Şener\", payload=\"/Dr1\"),\n Button(title=\"Yar. Doç. Dr. Emre Boysan\", payload=\"/Dr2\"),\n Button(title=\"Yar. Doç. Renda Cırcı\", payload=\"/Dr3\")\n ]\n\n elif loc == 'cardiology':\n #response = \"1- Prof. Dr. Erdoğan İlkay\\n2- Doç. Dr. Alper Canbay\\n3- Uzm. Dr. Çiğdem Koca Tarı\\n4- Uzm. Dr. Erol Kalender\"\n buttons = [\n Button(title=\"Prof. Dr. Erdoğan İlkay\", payload=\"/Dr1\"),\n Button(title=\"Doç. Dr. Alper Canbay\", payload=\"/Dr2\"),\n Button(title=\"Uzm. Dr. Çiğdem Koca Tarı\", payload=\"/Dr3\"),\n Button(title=\"Uzm. Dr. Erol Kalender\", payload=\"/Dr4\")\n ]\n\n elif loc == 'ENT diseases':\n #response = \"1- Prof. Dr. Ali Altuntaş\\n2- Prof. Dr. Serdar Karahatay\\n3- Yar. Doç Dr. Canset Aydın\"\n buttons = [\n Button(title=\"Prof. Dr. Ali Altuntaş\", payload=\"/Dr1\"),\n Button(title=\"Prof. Dr. Serdar Karahatay\", payload=\"/Dr2\"),\n Button(title=\"Yar. Doç Dr. Canset Aydın\", payload=\"/Dr3\")\n ]\n\n elif loc == 'nephrology':\n #response = \"Doç. Dr. Beril Akman\"\n buttons = [\n Button(title=\"Doç. Dr. Beril Akman\", payload=\"/Dr1\")\n ]\n\n elif loc == 'neurology':\n #response = \"1- Prof. Dr. Mehmet Zülküf Önal\\n2- Yar. Doç. Dr. Akçay Övünç Ozon\"\n buttons = [\n Button(title=\"Prof. Dr. Mehmet Zülküf Önal\", payload=\"/Dr1\"),\n Button(title=\"Yar. Doç. Dr. Akçay Övünç Ozon\", payload=\"/Dr2\")\n ]\n\n elif loc == 'orthopedics and traumatology':\n #response = \"1- Yar. Doç. Dr. Uğur Gönç\\n2- Op. Dr. Mesut Atabek\\n3- Prof. Dr. levent Çelebi\"\n buttons = [\n Button(title=\"Yar. Doç. Dr. Uğur Gönç\", payload=\"/Dr1\"),\n Button(title=\"Op. Dr. Mesut Atabek\", payload=\"/Dr2\"),\n Button(title=\"Prof. Dr. levent Çelebi\", payload=\"/Dr3\")\n\n ]\n\n elif loc == 'plastic surgery':\n #response = \"1- Op. Dr. Ergin Işık\\n2- Op. Dr. Serdar Düzgün\"\n buttons = [\n Button(title=\"Op. Dr. Ergin Işık\", payload=\"/Dr1\"),\n Button(title=\"Op. Dr. Serdar Düzgün\", payload=\"/Dr2\")\n\n ]\n\n elif loc == 'psychiatry':\n #response = \"Prof. Dr. Ali Bozkurt\"\n buttons = [\n Button(title=\"Prof. Dr. Ali Bozkurt\", payload=\"/Dr1\")\n\n ]\n\n elif loc == 'psychologist':\n #response = \"Psk. Ezgi Kılınç\"\n buttons = [\n Button(title=\"Psk. Ezgi Kılınç\", payload=\"/Dr1\")\n\n ]\n\n elif loc == 'rheumatology':\n #response = \"Doç. Dr. Orhan Küçükşahin\"\n buttons = [\n Button(title=\"Doç. Dr. Orhan Küçükşahin\", payload=\"/Dr1\")\n\n ]\n\n\n elif loc == 'medical oncology':\n #response = [\"Prof. Dr. Fikret Arpacı\", \"Doç. Dr. Gökhan Erdem\"]\n buttons = [\n Button(title=\"Prof. Dr. Fikret Arpacı\", payload=\"/Dr1\"),\n Button(title=\"Doç. Dr. Gökhan Erdem\", payload=\"/Dr2\")\n\n ]\n\n elif loc == 'urology':\n response = \"Müsait doktor bulunmamaktadır...\"\n\n #response = \"abc\\n\\nasd\"\n\n response=\"\"\n # buttons = [\n # Button(title=\"Btn1\", payload=\"/btn1\"),\n # Button(title=\"Btn2\", payload=\"/btn2\")\n # ]\n dispatcher.utter_button_message(\"my message\", buttons)\n return [SlotSet('doctor', response)]\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(5):
score = int(input())
if score < 40:
score = 40
result += score
print(result // 5)
<|reserved_special_token_1|>
result = 0
for i in range(5):
score = int(input())
if score < 40:
score = 40
result += score
print(result // 5)
<|reserved_special_token_1|>
# 백준 문제(2021.5.22)
# 10039번) 상현이가 가르치는 아이폰 앱 개발 수업의 수강생은 원섭, 세희, 상근, 숭, 강수이다.
# 어제 이 수업의 기말고사가 있었고, 상현이는 지금 학생들의 기말고사 시험지를 채점하고 있다.
# 기말고사 점수가 40점 이상인 학생들은 그 점수 그대로 자신의 성적이 된다.
# 하지만, 40점 미만인 학생들은 보충학습을 듣는 조건을 수락하면 40점을 받게 된다.
# 보충학습은 거부할 수 없기 때문에, 40점 미만인 학생들은 항상 40점을 받게 된다.
# 학생 5명의 점수가 주어졌을 때, 평균 점수를 구하는 프로그램을 작성하시오.
result = 0
for i in range(5) :
score = int(input())
if(score < 40) :
score = 40
result += score
print(result//5)
|
flexible
|
{
"blob_id": "4a13a0d7aa2371d7c8963a01b7cc1b93f4110d5e",
"index": 5356,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(5):\n score = int(input())\n if score < 40:\n score = 40\n result += score\nprint(result // 5)\n",
"step-3": "result = 0\nfor i in range(5):\n score = int(input())\n if score < 40:\n score = 40\n result += score\nprint(result // 5)\n",
"step-4": "# 백준 문제(2021.5.22)\n# 10039번) 상현이가 가르치는 아이폰 앱 개발 수업의 수강생은 원섭, 세희, 상근, 숭, 강수이다.\n# 어제 이 수업의 기말고사가 있었고, 상현이는 지금 학생들의 기말고사 시험지를 채점하고 있다. \n# 기말고사 점수가 40점 이상인 학생들은 그 점수 그대로 자신의 성적이 된다. \n# 하지만, 40점 미만인 학생들은 보충학습을 듣는 조건을 수락하면 40점을 받게 된다. \n# 보충학습은 거부할 수 없기 때문에, 40점 미만인 학생들은 항상 40점을 받게 된다.\n# 학생 5명의 점수가 주어졌을 때, 평균 점수를 구하는 프로그램을 작성하시오.\n\nresult = 0\n\nfor i in range(5) :\n score = int(input())\n if(score < 40) :\n score = 40\n result += score\nprint(result//5)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def check(request):
if not request.user.is_authenticated:
return redirect('/auth/login/')
else:
return redirect('/worker/')
def loginpg(request):
return render(request, 'registration/login.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check(request):
if not request.user.is_authenticated:
return redirect('/auth/login/')
else:
return redirect('/worker/')
def loginpg(request):
return render(request, 'registration/login.html')
<|reserved_special_token_0|>
def auth(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('/worker/')
else:
return render(request, 'registration/login_error.html')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check(request):
if not request.user.is_authenticated:
return redirect('/auth/login/')
else:
return redirect('/worker/')
def loginpg(request):
return render(request, 'registration/login.html')
def logoutpg(request):
logout(request)
return render(request, 'registration/logout.html')
def auth(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('/worker/')
else:
return render(request, 'registration/login_error.html')
<|reserved_special_token_1|>
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
def check(request):
if not request.user.is_authenticated:
return redirect('/auth/login/')
else:
return redirect('/worker/')
def loginpg(request):
return render(request, 'registration/login.html')
def logoutpg(request):
logout(request)
return render(request, 'registration/logout.html')
def auth(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('/worker/')
else:
return render(request, 'registration/login_error.html')
<|reserved_special_token_1|>
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
# Create your views here.
def check(request):
if not request.user.is_authenticated:
return redirect('/auth/login/')
else:
return redirect('/worker/')
def loginpg(request):
return render(request, 'registration/login.html')
def logoutpg(request):
logout(request)
return render(request, 'registration/logout.html')
def auth(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('/worker/')
else:
return render(request, 'registration/login_error.html')
|
flexible
|
{
"blob_id": "fc2afc99dc754b58c36bc76c723727337851cc3e",
"index": 5326,
"step-1": "<mask token>\n\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\n<mask token>\n\n\ndef auth(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/worker/')\n else:\n return render(request, 'registration/login_error.html')\n",
"step-3": "<mask token>\n\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\ndef logoutpg(request):\n logout(request)\n return render(request, 'registration/logout.html')\n\n\ndef auth(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/worker/')\n else:\n return render(request, 'registration/login_error.html')\n",
"step-4": "from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponse\n\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\ndef logoutpg(request):\n logout(request)\n return render(request, 'registration/logout.html')\n\n\ndef auth(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/worker/')\n else:\n return render(request, 'registration/login_error.html')\n",
"step-5": "from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponse\n\n# Create your views here.\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\ndef logoutpg(request):\n logout(request) \n return render(request, 'registration/logout.html')\n\n\n\ndef auth(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/worker/')\n else:\n return render(request, 'registration/login_error.html')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
__all__ = ['language']
from StringTemplate import *
|
normal
|
{
"blob_id": "e70c25ce1d61437aacfe7fad0a51e096e1ce4f5d",
"index": 5212,
"step-1": "<mask token>\n",
"step-2": "__all__ = ['language']\n<mask token>\n",
"step-3": "__all__ = ['language']\nfrom StringTemplate import *\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def generar_numero_aleatorio():
return random.randint(1, 100)
def es_el_numero(resp_usuario, resp_correc):
return resp_usuario == resp_correc
def numero_dado_es_mayor(resp_usuario, resp_correc):
return resp_usuario > resp_correc
<|reserved_special_token_0|>
def el_numero_es_mayor():
print('El numero que diste es mayor al correcto, intenta de nuevo!')
def el_numero_es_menor():
print('El numero que diste es menor al correcto, intenta de nuevo!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generar_numero_aleatorio():
return random.randint(1, 100)
def es_el_numero(resp_usuario, resp_correc):
return resp_usuario == resp_correc
def numero_dado_es_mayor(resp_usuario, resp_correc):
return resp_usuario > resp_correc
def juego_terminado(numero_correcto, numero_intentos):
print('El juego ha terminado!')
print('El numero correcto era', numero_correcto, 'y lo resolviste en',
numero_intentos, 'intentos.', sep=' ')
def el_numero_es_mayor():
print('El numero que diste es mayor al correcto, intenta de nuevo!')
def el_numero_es_menor():
print('El numero que diste es menor al correcto, intenta de nuevo!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generar_numero_aleatorio():
return random.randint(1, 100)
def es_el_numero(resp_usuario, resp_correc):
return resp_usuario == resp_correc
def numero_dado_es_mayor(resp_usuario, resp_correc):
return resp_usuario > resp_correc
def juego_terminado(numero_correcto, numero_intentos):
print('El juego ha terminado!')
print('El numero correcto era', numero_correcto, 'y lo resolviste en',
numero_intentos, 'intentos.', sep=' ')
def el_numero_es_mayor():
print('El numero que diste es mayor al correcto, intenta de nuevo!')
def el_numero_es_menor():
print('El numero que diste es menor al correcto, intenta de nuevo!')
def iniciar_juego():
gano = False
intentos = 1
numero = 0
respuesta_correc = generar_numero_aleatorio()
while not gano:
numero = int(input('Ingresa un numero: '))
if es_el_numero(numero, respuesta_correc):
juego_terminado(respuesta_correc, intentos)
gano = True
else:
if numero_dado_es_mayor(numero, respuesta_correc):
el_numero_es_mayor()
else:
el_numero_es_menor()
intentos += 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import random
def generar_numero_aleatorio():
return random.randint(1, 100)
def es_el_numero(resp_usuario, resp_correc):
return resp_usuario == resp_correc
def numero_dado_es_mayor(resp_usuario, resp_correc):
return resp_usuario > resp_correc
def juego_terminado(numero_correcto, numero_intentos):
print('El juego ha terminado!')
print('El numero correcto era', numero_correcto, 'y lo resolviste en',
numero_intentos, 'intentos.', sep=' ')
def el_numero_es_mayor():
print('El numero que diste es mayor al correcto, intenta de nuevo!')
def el_numero_es_menor():
print('El numero que diste es menor al correcto, intenta de nuevo!')
def iniciar_juego():
gano = False
intentos = 1
numero = 0
respuesta_correc = generar_numero_aleatorio()
while not gano:
numero = int(input('Ingresa un numero: '))
if es_el_numero(numero, respuesta_correc):
juego_terminado(respuesta_correc, intentos)
gano = True
else:
if numero_dado_es_mayor(numero, respuesta_correc):
el_numero_es_mayor()
else:
el_numero_es_menor()
intentos += 1
iniciar_juego()
<|reserved_special_token_1|>
'''
Confeccionar un programa que genere un número aleatorio entre 1 y 100 y no se muestre.
El operador debe tratar de adivinar el número ingresado.
Cada vez que ingrese un número mostrar un mensaje "Gano" si es igual al generado o "El número aleatorio el mayor" o "El número aleatorio es menor".
Mostrar cuando gana el jugador cuantos intentos necesitó.
'''
import random
def generar_numero_aleatorio():
return random.randint(1,100)
def es_el_numero(resp_usuario,resp_correc):
return resp_usuario == resp_correc
def numero_dado_es_mayor(resp_usuario,resp_correc):
return resp_usuario > resp_correc
def juego_terminado(numero_correcto,numero_intentos):
print("El juego ha terminado!")
print("El numero correcto era",numero_correcto,"y lo resolviste en",numero_intentos,"intentos.",sep=" ")
def el_numero_es_mayor():
print("El numero que diste es mayor al correcto, intenta de nuevo!")
def el_numero_es_menor():
print("El numero que diste es menor al correcto, intenta de nuevo!")
def iniciar_juego():
gano = False
intentos = 1
numero = 0
respuesta_correc = generar_numero_aleatorio()
while (not gano):
numero = int(input("Ingresa un numero: "))
if (es_el_numero(numero,respuesta_correc)):
juego_terminado(respuesta_correc,intentos)
gano = True
else:
if (numero_dado_es_mayor(numero,respuesta_correc)):
el_numero_es_mayor()
else:
el_numero_es_menor()
intentos += 1
iniciar_juego()
|
flexible
|
{
"blob_id": "8498ba69e4cc5c5f480644ac20d878fb2a632bee",
"index": 5128,
"step-1": "<mask token>\n\n\ndef generar_numero_aleatorio():\n return random.randint(1, 100)\n\n\ndef es_el_numero(resp_usuario, resp_correc):\n return resp_usuario == resp_correc\n\n\ndef numero_dado_es_mayor(resp_usuario, resp_correc):\n return resp_usuario > resp_correc\n\n\n<mask token>\n\n\ndef el_numero_es_mayor():\n print('El numero que diste es mayor al correcto, intenta de nuevo!')\n\n\ndef el_numero_es_menor():\n print('El numero que diste es menor al correcto, intenta de nuevo!')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generar_numero_aleatorio():\n return random.randint(1, 100)\n\n\ndef es_el_numero(resp_usuario, resp_correc):\n return resp_usuario == resp_correc\n\n\ndef numero_dado_es_mayor(resp_usuario, resp_correc):\n return resp_usuario > resp_correc\n\n\ndef juego_terminado(numero_correcto, numero_intentos):\n print('El juego ha terminado!')\n print('El numero correcto era', numero_correcto, 'y lo resolviste en',\n numero_intentos, 'intentos.', sep=' ')\n\n\ndef el_numero_es_mayor():\n print('El numero que diste es mayor al correcto, intenta de nuevo!')\n\n\ndef el_numero_es_menor():\n print('El numero que diste es menor al correcto, intenta de nuevo!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef generar_numero_aleatorio():\n return random.randint(1, 100)\n\n\ndef es_el_numero(resp_usuario, resp_correc):\n return resp_usuario == resp_correc\n\n\ndef numero_dado_es_mayor(resp_usuario, resp_correc):\n return resp_usuario > resp_correc\n\n\ndef juego_terminado(numero_correcto, numero_intentos):\n print('El juego ha terminado!')\n print('El numero correcto era', numero_correcto, 'y lo resolviste en',\n numero_intentos, 'intentos.', sep=' ')\n\n\ndef el_numero_es_mayor():\n print('El numero que diste es mayor al correcto, intenta de nuevo!')\n\n\ndef el_numero_es_menor():\n print('El numero que diste es menor al correcto, intenta de nuevo!')\n\n\ndef iniciar_juego():\n gano = False\n intentos = 1\n numero = 0\n respuesta_correc = generar_numero_aleatorio()\n while not gano:\n numero = int(input('Ingresa un numero: '))\n if es_el_numero(numero, respuesta_correc):\n juego_terminado(respuesta_correc, intentos)\n gano = True\n else:\n if numero_dado_es_mayor(numero, respuesta_correc):\n el_numero_es_mayor()\n else:\n el_numero_es_menor()\n intentos += 1\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport random\n\n\ndef generar_numero_aleatorio():\n return random.randint(1, 100)\n\n\ndef es_el_numero(resp_usuario, resp_correc):\n return resp_usuario == resp_correc\n\n\ndef numero_dado_es_mayor(resp_usuario, resp_correc):\n return resp_usuario > resp_correc\n\n\ndef juego_terminado(numero_correcto, numero_intentos):\n print('El juego ha terminado!')\n print('El numero correcto era', numero_correcto, 'y lo resolviste en',\n numero_intentos, 'intentos.', sep=' ')\n\n\ndef el_numero_es_mayor():\n print('El numero que diste es mayor al correcto, intenta de nuevo!')\n\n\ndef el_numero_es_menor():\n print('El numero que diste es menor al correcto, intenta de nuevo!')\n\n\ndef iniciar_juego():\n gano = False\n intentos = 1\n numero = 0\n respuesta_correc = generar_numero_aleatorio()\n while not gano:\n numero = int(input('Ingresa un numero: '))\n if es_el_numero(numero, respuesta_correc):\n juego_terminado(respuesta_correc, intentos)\n gano = True\n else:\n if numero_dado_es_mayor(numero, respuesta_correc):\n el_numero_es_mayor()\n else:\n el_numero_es_menor()\n intentos += 1\n\n\niniciar_juego()\n",
"step-5": "'''\nConfeccionar un programa que genere un número aleatorio entre 1 y 100 y no se muestre.\nEl operador debe tratar de adivinar el número ingresado.\nCada vez que ingrese un número mostrar un mensaje \"Gano\" si es igual al generado o \"El número aleatorio el mayor\" o \"El número aleatorio es menor\".\nMostrar cuando gana el jugador cuantos intentos necesitó.\n'''\nimport random\n\ndef generar_numero_aleatorio():\n return random.randint(1,100)\n\ndef es_el_numero(resp_usuario,resp_correc):\n return resp_usuario == resp_correc\n\ndef numero_dado_es_mayor(resp_usuario,resp_correc):\n return resp_usuario > resp_correc\n\ndef juego_terminado(numero_correcto,numero_intentos):\n print(\"El juego ha terminado!\")\n print(\"El numero correcto era\",numero_correcto,\"y lo resolviste en\",numero_intentos,\"intentos.\",sep=\" \")\n\ndef el_numero_es_mayor():\n print(\"El numero que diste es mayor al correcto, intenta de nuevo!\")\n\n\ndef el_numero_es_menor():\n print(\"El numero que diste es menor al correcto, intenta de nuevo!\")\n\ndef iniciar_juego():\n gano = False\n \n intentos = 1\n\n numero = 0\n\n respuesta_correc = generar_numero_aleatorio()\n \n while (not gano):\n numero = int(input(\"Ingresa un numero: \"))\n if (es_el_numero(numero,respuesta_correc)):\n juego_terminado(respuesta_correc,intentos)\n gano = True\n else:\n if (numero_dado_es_mayor(numero,respuesta_correc)):\n el_numero_es_mayor()\n else:\n el_numero_es_menor()\n \n intentos += 1\n\n\n\n\niniciar_juego()\n\n\n",
"step-ids": [
5,
6,
7,
9,
10
]
}
|
[
5,
6,
7,
9,
10
] |
"""
Given the root of a binary tree, check whether it is a mirror of itself (i.e., symmetric around its center).
Example 1:
Input: root = [1, 2, 2, 3, 4, 4, 3]
Output: true
1
/ \
2 2
/ \ / \
3 4 4 3
Example 2:
Input: root = [1, 2, 2, None, 3, None, 3]
Output: false
1
/ \
2 2
\ \
3 3
"""
"""
We recursively check whether opposite ends of the tree are equal, going down the tree.
The logic is very similar to problem 100.
"""
from shared import list_to_tree
def is_symmetric(root):
def helper(left, right):
if left is None and right is None:
return True
elif left and right:
return helper(left.left, right.right) and left.val == right.val and helper(left.right, right.left)
else:
return False
return helper(root.left, root.right)
assert is_symmetric(list_to_tree([1, 2, 2, 3, 4, 4, 3])) is True
assert is_symmetric(list_to_tree([1, 2, 2, None, 3, None, 3])) is False
assert is_symmetric(list_to_tree([1, 2, 2, None, 2, None])) is False
assert is_symmetric(list_to_tree([1, 2, 3])) is False
|
normal
|
{
"blob_id": "9cfbb06df4bc286ff56983d6e843b33e4da6ccf8",
"index": 7803,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_symmetric(root):\n\n def helper(left, right):\n if left is None and right is None:\n return True\n elif left and right:\n return helper(left.left, right.right\n ) and left.val == right.val and helper(left.right, right.left)\n else:\n return False\n return helper(root.left, root.right)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef is_symmetric(root):\n\n def helper(left, right):\n if left is None and right is None:\n return True\n elif left and right:\n return helper(left.left, right.right\n ) and left.val == right.val and helper(left.right, right.left)\n else:\n return False\n return helper(root.left, root.right)\n\n\nassert is_symmetric(list_to_tree([1, 2, 2, 3, 4, 4, 3])) is True\nassert is_symmetric(list_to_tree([1, 2, 2, None, 3, None, 3])) is False\nassert is_symmetric(list_to_tree([1, 2, 2, None, 2, None])) is False\nassert is_symmetric(list_to_tree([1, 2, 3])) is False\n",
"step-4": "<mask token>\nfrom shared import list_to_tree\n\n\ndef is_symmetric(root):\n\n def helper(left, right):\n if left is None and right is None:\n return True\n elif left and right:\n return helper(left.left, right.right\n ) and left.val == right.val and helper(left.right, right.left)\n else:\n return False\n return helper(root.left, root.right)\n\n\nassert is_symmetric(list_to_tree([1, 2, 2, 3, 4, 4, 3])) is True\nassert is_symmetric(list_to_tree([1, 2, 2, None, 3, None, 3])) is False\nassert is_symmetric(list_to_tree([1, 2, 2, None, 2, None])) is False\nassert is_symmetric(list_to_tree([1, 2, 3])) is False\n",
"step-5": "\"\"\"\nGiven the root of a binary tree, check whether it is a mirror of itself (i.e., symmetric around its center).\n\nExample 1:\n\nInput: root = [1, 2, 2, 3, 4, 4, 3]\nOutput: true\n\n 1\n / \\\n 2 2\n / \\ / \\\n 3 4 4 3\n\nExample 2:\nInput: root = [1, 2, 2, None, 3, None, 3]\nOutput: false\n\n 1\n / \\\n 2 2\n \\ \\\n 3 3\n\"\"\"\n\n\"\"\"\nWe recursively check whether opposite ends of the tree are equal, going down the tree. \nThe logic is very similar to problem 100.\n\"\"\"\n\nfrom shared import list_to_tree\n\n\ndef is_symmetric(root):\n def helper(left, right):\n if left is None and right is None:\n return True\n elif left and right:\n return helper(left.left, right.right) and left.val == right.val and helper(left.right, right.left)\n else:\n return False\n\n return helper(root.left, root.right)\n\n\nassert is_symmetric(list_to_tree([1, 2, 2, 3, 4, 4, 3])) is True\nassert is_symmetric(list_to_tree([1, 2, 2, None, 3, None, 3])) is False\nassert is_symmetric(list_to_tree([1, 2, 2, None, 2, None])) is False\nassert is_symmetric(list_to_tree([1, 2, 3])) is False\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for line in sys.stdin:
line = line.strip()
twits = line.split()
i = 0
while i < len(twits):
j = 0
while j < len(twits):
if i != j:
print('%s%s\t%d' % (twits[i] + ' ', twits[j], 1))
j += 1
i += 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
window = 2
for line in sys.stdin:
line = line.strip()
twits = line.split()
i = 0
while i < len(twits):
j = 0
while j < len(twits):
if i != j:
print('%s%s\t%d' % (twits[i] + ' ', twits[j], 1))
j += 1
i += 1
<|reserved_special_token_1|>
import sys, re
window = 2
for line in sys.stdin:
line = line.strip()
twits = line.split()
i = 0
while i < len(twits):
j = 0
while j < len(twits):
if i != j:
print('%s%s\t%d' % (twits[i] + ' ', twits[j], 1))
j += 1
i += 1
<|reserved_special_token_1|>
#!/usr/bin/env python
import sys, re
window = 2
for line in sys.stdin:
line = line.strip()
twits = line.split()
i = 0
while i <len(twits):
j = 0
while j <len(twits):
if i!= j:
print("%s%s\t%d" % (twits[i]+' ', twits[j], 1))
j+=1
i+=1
|
flexible
|
{
"blob_id": "e884825325ceb401142cab0618d9d4e70e475cf5",
"index": 893,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in sys.stdin:\n line = line.strip()\n twits = line.split()\n i = 0\n while i < len(twits):\n j = 0\n while j < len(twits):\n if i != j:\n print('%s%s\\t%d' % (twits[i] + ' ', twits[j], 1))\n j += 1\n i += 1\n",
"step-3": "<mask token>\nwindow = 2\nfor line in sys.stdin:\n line = line.strip()\n twits = line.split()\n i = 0\n while i < len(twits):\n j = 0\n while j < len(twits):\n if i != j:\n print('%s%s\\t%d' % (twits[i] + ' ', twits[j], 1))\n j += 1\n i += 1\n",
"step-4": "import sys, re\nwindow = 2\nfor line in sys.stdin:\n line = line.strip()\n twits = line.split()\n i = 0\n while i < len(twits):\n j = 0\n while j < len(twits):\n if i != j:\n print('%s%s\\t%d' % (twits[i] + ' ', twits[j], 1))\n j += 1\n i += 1\n",
"step-5": "#!/usr/bin/env python\n\nimport sys, re\n\nwindow = 2\n\nfor line in sys.stdin:\n line = line.strip()\n twits = line.split()\n i = 0\n while i <len(twits):\n j = 0\n while j <len(twits):\n if i!= j:\n print(\"%s%s\\t%d\" % (twits[i]+' ', twits[j], 1))\n j+=1\n i+=1",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('projects', '0004_project_is_featured')]
operations = [migrations.AlterField(model_name='project', name='pin_id',
field=models.CharField(max_length=20, null=True, unique=True))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('projects', '0004_project_is_featured')]
operations = [migrations.AlterField(model_name='project', name='pin_id',
field=models.CharField(max_length=20, null=True, unique=True))]
<|reserved_special_token_1|>
# Generated by Django 3.1.6 on 2021-04-22 07:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0004_project_is_featured'),
]
operations = [
migrations.AlterField(
model_name='project',
name='pin_id',
field=models.CharField(max_length=20, null=True, unique=True),
),
]
|
flexible
|
{
"blob_id": "24ed29dfaaf7ce508b2d80740bad1304b291c596",
"index": 8466,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('projects', '0004_project_is_featured')]\n operations = [migrations.AlterField(model_name='project', name='pin_id',\n field=models.CharField(max_length=20, null=True, unique=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('projects', '0004_project_is_featured')]\n operations = [migrations.AlterField(model_name='project', name='pin_id',\n field=models.CharField(max_length=20, null=True, unique=True))]\n",
"step-5": "# Generated by Django 3.1.6 on 2021-04-22 07:46\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('projects', '0004_project_is_featured'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='project',\n name='pin_id',\n field=models.CharField(max_length=20, null=True, unique=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
<|reserved_special_token_0|>
print(response)
print(response.safe_search_annotation.adult)
for label in response.label_annotations:
print(label.description)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
creds = service_account.Credentials.from_service_account_file('./key.json')
client = vision.ImageAnnotatorClient(credentials=creds)
file_name = os.path.join(os.path.dirname(__file__), './dog.jpg')
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
request = {'image': {'content': content}, 'features': [{'max_results': 2,
'type': 'LABEL_DETECTION'}, {'type': 'SAFE_SEARCH_DETECTION'}]}
response = client.annotate_image(request)
print(response)
print(response.safe_search_annotation.adult)
for label in response.label_annotations:
print(label.description)
<|reserved_special_token_1|>
import io
import os
from google.cloud import vision
from google.oauth2 import service_account
creds = service_account.Credentials.from_service_account_file('./key.json')
client = vision.ImageAnnotatorClient(credentials=creds)
file_name = os.path.join(os.path.dirname(__file__), './dog.jpg')
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
request = {'image': {'content': content}, 'features': [{'max_results': 2,
'type': 'LABEL_DETECTION'}, {'type': 'SAFE_SEARCH_DETECTION'}]}
response = client.annotate_image(request)
print(response)
print(response.safe_search_annotation.adult)
for label in response.label_annotations:
print(label.description)
<|reserved_special_token_1|>
# use local image
import io
import os
from google.cloud import vision
from google.oauth2 import service_account
creds = service_account.Credentials.from_service_account_file('./key.json')
client = vision.ImageAnnotatorClient(
credentials=creds,
)
# The name of the image file to annotate
file_name = os.path.join(
os.path.dirname(__file__),
"./dog.jpg")
# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
request = {
"image": {
"content": content
},
"features": [
{
"max_results": 2,
"type": "LABEL_DETECTION"
},
{
"type": "SAFE_SEARCH_DETECTION"
}
]
}
response = client.annotate_image(request)
print(response)
print(response.safe_search_annotation.adult)
for label in response.label_annotations:
print(label.description)
|
flexible
|
{
"blob_id": "800573786913ff2fc37845193b5584a0a815533f",
"index": 8340,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith io.open(file_name, 'rb') as image_file:\n content = image_file.read()\n<mask token>\nprint(response)\nprint(response.safe_search_annotation.adult)\nfor label in response.label_annotations:\n print(label.description)\n",
"step-3": "<mask token>\ncreds = service_account.Credentials.from_service_account_file('./key.json')\nclient = vision.ImageAnnotatorClient(credentials=creds)\nfile_name = os.path.join(os.path.dirname(__file__), './dog.jpg')\nwith io.open(file_name, 'rb') as image_file:\n content = image_file.read()\nrequest = {'image': {'content': content}, 'features': [{'max_results': 2,\n 'type': 'LABEL_DETECTION'}, {'type': 'SAFE_SEARCH_DETECTION'}]}\nresponse = client.annotate_image(request)\nprint(response)\nprint(response.safe_search_annotation.adult)\nfor label in response.label_annotations:\n print(label.description)\n",
"step-4": "import io\nimport os\nfrom google.cloud import vision\nfrom google.oauth2 import service_account\ncreds = service_account.Credentials.from_service_account_file('./key.json')\nclient = vision.ImageAnnotatorClient(credentials=creds)\nfile_name = os.path.join(os.path.dirname(__file__), './dog.jpg')\nwith io.open(file_name, 'rb') as image_file:\n content = image_file.read()\nrequest = {'image': {'content': content}, 'features': [{'max_results': 2,\n 'type': 'LABEL_DETECTION'}, {'type': 'SAFE_SEARCH_DETECTION'}]}\nresponse = client.annotate_image(request)\nprint(response)\nprint(response.safe_search_annotation.adult)\nfor label in response.label_annotations:\n print(label.description)\n",
"step-5": "# use local image\n\nimport io\nimport os\n\nfrom google.cloud import vision\nfrom google.oauth2 import service_account\n\ncreds = service_account.Credentials.from_service_account_file('./key.json')\n\nclient = vision.ImageAnnotatorClient(\n credentials=creds,\n)\n\n# The name of the image file to annotate\nfile_name = os.path.join(\n os.path.dirname(__file__),\n \"./dog.jpg\")\n\n# Loads the image into memory\nwith io.open(file_name, 'rb') as image_file:\n content = image_file.read()\n\nrequest = {\n \"image\": {\n \"content\": content\n }, \n \"features\": [\n {\n \"max_results\": 2,\n \"type\": \"LABEL_DETECTION\"\n },\n {\n \"type\": \"SAFE_SEARCH_DETECTION\"\n }\n ]\n}\n\nresponse = client.annotate_image(request)\n\nprint(response)\n\nprint(response.safe_search_annotation.adult)\n\nfor label in response.label_annotations:\n print(label.description)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.shortcuts import render
from .. login.models import *
def user(request):
context = {
"users" : User.objects.all(),
"user_level" : User.objects.get(id = request.session['user_id'])
}
return render(request, 'dashboard/user.html', context)
def admin(request):
context = {
"users" : User.objects.all(),
"user_level" : User.objects.get(id = request.session['user_id'])
}
return render(request, 'dashboard/admin.html', context)
|
normal
|
{
"blob_id": "3d737d0ee9c3af1f8ebe4c6998ad30fa34f42856",
"index": 570,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef user(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/user.html', context)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef user(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/user.html', context)\n\n\ndef admin(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/admin.html', context)\n",
"step-4": "from django.shortcuts import render\nfrom ..login.models import *\n\n\ndef user(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/user.html', context)\n\n\ndef admin(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/admin.html', context)\n",
"step-5": "from django.shortcuts import render\nfrom .. login.models import *\n\ndef user(request):\n context = {\n \"users\" : User.objects.all(),\n \"user_level\" : User.objects.get(id = request.session['user_id'])\n }\n return render(request, 'dashboard/user.html', context)\n\ndef admin(request):\n context = {\n \"users\" : User.objects.all(),\n \"user_level\" : User.objects.get(id = request.session['user_id'])\n }\n return render(request, 'dashboard/admin.html', context)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def twoSum(self, nums, target):
d = dict([(nums[i], i) for i in range(len(nums))])
for n in range(len(nums)):
dif = target - nums[n]
if dif in d and d[dif] != n:
return n, d[dif]
return 'No solution available'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def twoSum(self, nums, target):
d = dict([(nums[i], i) for i in range(len(nums))])
for n in range(len(nums)):
dif = target - nums[n]
if dif in d and d[dif] != n:
return n, d[dif]
return 'No solution available'
<|reserved_special_token_0|>
print(a)
<|reserved_special_token_1|>
class Solution:
def twoSum(self, nums, target):
d = dict([(nums[i], i) for i in range(len(nums))])
for n in range(len(nums)):
dif = target - nums[n]
if dif in d and d[dif] != n:
return n, d[dif]
return 'No solution available'
s = Solution()
nums = [7, 2, 7, 15]
target = 14
a = s.twoSum(nums, target)
print(a)
<|reserved_special_token_1|>
# Given an array of integers, return indices of the two numbers such that they add up to a specific target.
# You may assume that each input would have exactly one solution, and you may not use the same element twice.
# Example:
# Given nums = [2, 7, 11, 15], target = 9,
# Because nums[0] + nums[1] = 2 + 7 = 9,
# return [0, 1].
class Solution:
def twoSum(self, nums, target):
# create a dictionary using the values of the array as the dictionary keys, and the indices of the array as the dictionary values
d = dict([(nums[i],i) for i in range(len(nums))])
# iterate through the array
for n in range(len(nums)):
# find the difference between the target number and the integer in the array
dif = target - nums[n]
# find the difference as a key in the dictionary, be careful that the dictionary's value is not the same as the array's indice (can happen when the difference is half of the target number, but there are not two halves in the array)
if dif in d and d[dif] != n:
# if found, return the two indices of the numbers that add up to the target number
return (n,d[dif])
# just in case there is no solution, even though the problem allows for the assumption that there is always one solution
return ("No solution available")
# initilize a test case
s = Solution()
nums = [7,2,7,15]
target = 14
a = s.twoSum(nums,target)
print(a)
# create a dictionary that stores the indices as the keys and the integers as the values
# iterate through the array, attempting to find the target minus the integer as a key in the dictionary
# return the indice of the integer and the value of the key
# watch out for arrays that involve duplicates, such as [3,3,7,2], target 6
|
flexible
|
{
"blob_id": "16cc85324b555f0cfec8d577b776b86872578822",
"index": 6016,
"step-1": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Solution:\n\n def twoSum(self, nums, target):\n d = dict([(nums[i], i) for i in range(len(nums))])\n for n in range(len(nums)):\n dif = target - nums[n]\n if dif in d and d[dif] != n:\n return n, d[dif]\n return 'No solution available'\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def twoSum(self, nums, target):\n d = dict([(nums[i], i) for i in range(len(nums))])\n for n in range(len(nums)):\n dif = target - nums[n]\n if dif in d and d[dif] != n:\n return n, d[dif]\n return 'No solution available'\n\n\n<mask token>\nprint(a)\n",
"step-4": "class Solution:\n\n def twoSum(self, nums, target):\n d = dict([(nums[i], i) for i in range(len(nums))])\n for n in range(len(nums)):\n dif = target - nums[n]\n if dif in d and d[dif] != n:\n return n, d[dif]\n return 'No solution available'\n\n\ns = Solution()\nnums = [7, 2, 7, 15]\ntarget = 14\na = s.twoSum(nums, target)\nprint(a)\n",
"step-5": "# Given an array of integers, return indices of the two numbers such that they add up to a specific target.\n# You may assume that each input would have exactly one solution, and you may not use the same element twice.\n\n# Example:\n# Given nums = [2, 7, 11, 15], target = 9,\n# Because nums[0] + nums[1] = 2 + 7 = 9,\n# return [0, 1].\n\nclass Solution:\n def twoSum(self, nums, target):\n # create a dictionary using the values of the array as the dictionary keys, and the indices of the array as the dictionary values\n d = dict([(nums[i],i) for i in range(len(nums))])\n # iterate through the array\n for n in range(len(nums)):\n # find the difference between the target number and the integer in the array\n dif = target - nums[n]\n # find the difference as a key in the dictionary, be careful that the dictionary's value is not the same as the array's indice (can happen when the difference is half of the target number, but there are not two halves in the array)\n if dif in d and d[dif] != n:\n # if found, return the two indices of the numbers that add up to the target number\n return (n,d[dif])\n # just in case there is no solution, even though the problem allows for the assumption that there is always one solution\n return (\"No solution available\")\n\n# initilize a test case\ns = Solution()\nnums = [7,2,7,15]\ntarget = 14\na = s.twoSum(nums,target)\nprint(a)\n\n# create a dictionary that stores the indices as the keys and the integers as the values\n# iterate through the array, attempting to find the target minus the integer as a key in the dictionary\n# return the indice of the integer and the value of the key\n# watch out for arrays that involve duplicates, such as [3,3,7,2], target 6",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
'''import math
x = 5
print("sqrt of 5 is", math.sqrt(64))
str1 = "bollywood"
str2 = 'ody'
if str2 in str1:
print("String found")
else:
print("String not found")
print(10+20)'''
#try:
#block of code
#except Exception l:
#block of code
#else:
#this code executes if except block is executed
try:
fh = open("testfile.txt", "w")
fh.write("This is my test file for exception handling! !")
except IOError:
print("Error: can\'t find file or read data")
else:
print("written content in the file successfully")
fh = open("testfile.txt", "r+")
print(fh.read())
fh.close()
print(fh.closed)
try:
fileptr = open("file.txt", "w")
try:
fileptr.write("Hi I am good")
finally:
fileptr.close()
print("file.closed")
except:
print("Error")
else:
print("inside else block")
try:
age = int(input("Enter the age?"))
if age<18:
raise ValueError
else:
print("the age is valid")
except ValueError:
print("The age is not valid")
|
normal
|
{
"blob_id": "c5b40b373953a2375eeca453a65c49bdbb8715f1",
"index": 6586,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n fh = open('testfile.txt', 'w')\n fh.write('This is my test file for exception handling! !')\nexcept IOError:\n print(\"Error: can't find file or read data\")\nelse:\n print('written content in the file successfully')\n fh = open('testfile.txt', 'r+')\n print(fh.read())\n fh.close()\n print(fh.closed)\ntry:\n fileptr = open('file.txt', 'w')\n try:\n fileptr.write('Hi I am good')\n finally:\n fileptr.close()\n print('file.closed')\nexcept:\n print('Error')\nelse:\n print('inside else block')\ntry:\n age = int(input('Enter the age?'))\n if age < 18:\n raise ValueError\n else:\n print('the age is valid')\nexcept ValueError:\n print('The age is not valid')\n",
"step-3": "'''import math\nx = 5\nprint(\"sqrt of 5 is\", math.sqrt(64))\n\nstr1 = \"bollywood\"\n\nstr2 = 'ody'\n\nif str2 in str1:\n print(\"String found\")\nelse:\n print(\"String not found\")\n\n print(10+20)'''\n\n#try:\n #block of code\n#except Exception l:\n #block of code\n#else:\n #this code executes if except block is executed\n\ntry:\n fh = open(\"testfile.txt\", \"w\")\n fh.write(\"This is my test file for exception handling! !\")\n\nexcept IOError:\n print(\"Error: can\\'t find file or read data\")\nelse:\n\n print(\"written content in the file successfully\")\n\n fh = open(\"testfile.txt\", \"r+\")\n print(fh.read())\n fh.close()\n print(fh.closed)\n\ntry:\n fileptr = open(\"file.txt\", \"w\")\n try:\n fileptr.write(\"Hi I am good\")\n\n\n finally:\n fileptr.close()\n print(\"file.closed\")\nexcept:\n print(\"Error\")\nelse:\n print(\"inside else block\")\n\n\ntry:\n age = int(input(\"Enter the age?\"))\n if age<18:\n raise ValueError\n else:\n print(\"the age is valid\")\nexcept ValueError:\n print(\"The age is not valid\")\n\n\n\n\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def my_kron(A, B):
D = A[..., :, None, :, None] * B[..., None, :, None, :]
ds = D.shape
newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]
return D.reshape(newshape)
def _identity(x):
return x
<|reserved_special_token_0|>
def sde_fn1(x, _):
lam = 0.1
sigma2 = 0.1
return lam * x, sigma2
<|reserved_special_token_0|>
class T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):
def __new__(cls, jax_dist, *params):
sp_dist = get_sp_dist(jax_dist)
return super(cls, T).__new__(cls, jax_dist, sp_dist, params)
<|reserved_special_token_0|>
def _multivariate_t_to_scipy(df, loc, tril):
if scipy.__version__ < '1.6.0':
pytest.skip(
'Multivariate Student-T distribution is not available in scipy < 1.6'
)
jax_dist = dist.MultivariateStudentT(df, loc, tril)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_t(loc=mean, shape=cov, df=df)
def _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):
jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_normal(mean=mean, cov=cov)
<|reserved_special_token_0|>
def _TruncatedNormal(loc, scale, low, high):
return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)
<|reserved_special_token_0|>
class SineSkewedUniform(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,
math.pi])
base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,
skewness=skewness)
class SineSkewedVonMises(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = np.array([0.0]), np.array([1.0])
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc
.ndim)
super().__init__(base_dist, skewness, **kwargs)
<|reserved_special_token_0|>
class SineSkewedVonMisesBatched(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc
.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_von_mises_batched(self:
SineSkewedVonMisesBatched, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,
skewness=skewness)
class _GaussianMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, scale):
component_dist = dist.Normal(loc=loc, scale=scale)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(mixing_distribution=mixing_distribution,
component_distribution=component_dist)
@property
def loc(self):
return self.component_distribution.loc
@property
def scale(self):
return self.component_distribution.scale
@vmap_over.register
def _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):
component_distribution = vmap_over(self.component_distribution, loc=loc,
scale=scale)
return vmap_over.dispatch(dist.MixtureSameFamily)(self,
_component_distribution=component_distribution)
class _Gaussian2DMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, covariance_matrix):
component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix
=covariance_matrix)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(mixing_distribution=mixing_distribution,
component_distribution=component_dist)
@property
def loc(self):
return self.component_distribution.loc
@property
def covariance_matrix(self):
return self.component_distribution.covariance_matrix
@vmap_over.register
def _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):
component_distribution = vmap_over(self.component_distribution, loc=loc)
return vmap_over.dispatch(dist.MixtureSameFamily)(self,
_component_distribution=component_distribution)
class _GeneralMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, scales):
component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,
scale_ in zip(locs, scales)]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(mixing_distribution=mixing_distribution,
component_distributions=component_dists)
@property
def locs(self):
return self.component_distributions[0].loc
@property
def scales(self):
return self.component_distributions[0].scale
@vmap_over.register
def _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):
component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in
self.component_distributions]
return vmap_over.dispatch(dist.MixtureGeneral)(self,
_component_distributions=component_distributions)
class _General2DMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, covariance_matrices):
component_dists = [dist.MultivariateNormal(loc=loc_,
covariance_matrix=covariance_matrix) for loc_,
covariance_matrix in zip(locs, covariance_matrices)]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(mixing_distribution=mixing_distribution,
component_distributions=component_dists)
@property
def locs(self):
return self.component_distributions[0].loc
@property
def covariance_matrices(self):
return self.component_distributions[0].covariance_matrix
@vmap_over.register
def _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):
component_distributions = [vmap_over(d, loc=locs) for d in self.
component_distributions]
return vmap_over.dispatch(dist.MixtureGeneral)(self,
_component_distributions=component_distributions)
class _ImproperWrapper(dist.ImproperUniform):
def sample(self, key, sample_shape=()):
transform = biject_to(self.support)
prototype_value = jnp.zeros(self.event_shape)
unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))
shape = sample_shape + self.batch_shape + unconstrained_event_shape
unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)
return transform(unconstrained_samples)
class ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):
arg_constraints = {'rate': constraints.positive, 'gate_logits':
constraints.real}
pytree_data_fields = 'rate',
def __init__(self, rate, gate_logits, *, validate_args=None):
self.rate = rate
super().__init__(dist.Poisson(rate), gate_logits, validate_args=
validate_args)
<|reserved_special_token_0|>
class SparsePoisson(dist.Poisson):
def __init__(self, rate, *, validate_args=None):
super().__init__(rate, is_sparse=True, validate_args=validate_args)
class FoldedNormal(dist.FoldedDistribution):
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
def __init__(self, loc, scale, validate_args=None):
self.loc = loc
self.scale = scale
super().__init__(dist.Normal(loc, scale), validate_args=validate_args)
@vmap_over.register
def _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):
d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=
vmap_over(self.base_dist, loc=loc, scale=scale))
d.loc = loc
d.scale = scale
return d
class _SparseCAR(dist.CAR):
reparametrized_params = ['loc', 'correlation', 'conditional_precision']
def __init__(self, loc, correlation, conditional_precision, adj_matrix,
*, is_sparse=True, validate_args=None):
super().__init__(loc, correlation, conditional_precision,
adj_matrix, is_sparse=True, validate_args=validate_args)
<|reserved_special_token_0|>
def get_sp_dist(jax_dist):
classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]
for cls in classes:
if cls in _DIST_MAP:
return _DIST_MAP[cls]
<|reserved_special_token_0|>
def _is_batched_multivariate(jax_dist):
return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0
<|reserved_special_token_0|>
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])
def test_dist_shape(jax_dist, sp_dist, params, prepend_shape):
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.
event_shape)
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert isinstance(samples, jnp.ndarray)
assert jnp.shape(samples) == expected_shape
if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(
jax_dist, dist.MultivariateStudentT):
sp_dist = sp_dist(*params)
sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)
assert jnp.shape(sp_samples) == expected_shape
elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(
jax_dist, dist.MultivariateStudentT):
sp_dist = sp_dist(*params)
size_ = prepend_shape + jax_dist.batch_shape
size = 1 if size_ == () else size_
try:
sp_samples = sp_dist.rvs(size=size)
except ValueError:
pytest.skip(
"scipy multivariate t doesn't support size with > 1 element")
assert jnp.shape(sp_samples) == expected_shape
if isinstance(jax_dist, (dist.MultivariateNormal, dist.
MultivariateStudentT)):
assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2
assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.
covariance_matrix), rtol=1e-06)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_infer_shapes(jax_dist, sp_dist, params):
shapes = tuple(getattr(p, 'shape', ()) for p in params)
shapes = tuple(x() if callable(x) else x for x in shapes)
jax_dist = jax_dist(*params)
try:
expected_batch_shape, expected_event_shape = type(jax_dist
).infer_shapes(*shapes)
except NotImplementedError:
pytest.skip(
f'{type(jax_dist).__name__}.infer_shapes() is not implemented')
assert jax_dist.batch_shape == expected_batch_shape
assert jax_dist.event_shape == expected_event_shape
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_has_rsample(jax_dist, sp_dist, params):
jax_dist = jax_dist(*params)
masked_dist = jax_dist.mask(False)
indept_dist = jax_dist.expand_by([2]).to_event(1)
transf_dist = dist.TransformedDistribution(jax_dist, biject_to(
constraints.real))
assert masked_dist.has_rsample == jax_dist.has_rsample
assert indept_dist.has_rsample == jax_dist.has_rsample
assert transf_dist.has_rsample == jax_dist.has_rsample
if jax_dist.has_rsample:
assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete
if isinstance(jax_dist, dist.TransformedDistribution):
assert jax_dist.base_dist.has_rsample
else:
assert set(jax_dist.arg_constraints) == set(jax_dist.
reparametrized_params)
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.Normal):
masked_dist.rsample(random.PRNGKey(0))
indept_dist.rsample(random.PRNGKey(0))
transf_dist.rsample(random.PRNGKey(0))
else:
with pytest.raises(NotImplementedError):
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.BernoulliProbs):
with pytest.raises(NotImplementedError):
masked_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
indept_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
transf_dist.rsample(random.PRNGKey(0))
<|reserved_special_token_0|>
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)
def test_sample_gradient(jax_dist, sp_dist, params):
gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [
'concentration1', 'concentration0'], 'BetaProportion': ['mean',
'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],
'InverseGamma': ['concentration'], 'LKJ': ['concentration'],
'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.
__name__, [])
dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1
:] if inspect.isclass(jax_dist) else inspect.getfullargspec(
jax_dist)[0])]
params_dict = dict(zip(dist_args[:len(params)], params))
jax_class = type(jax_dist(**params_dict))
reparametrized_params = [p for p in jax_class.reparametrized_params if
p not in gamma_derived_params]
if not reparametrized_params:
pytest.skip('{} not reparametrized.'.format(jax_class.__name__))
nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in
reparametrized_params}
repara_params = tuple(v for k, v in params_dict.items() if k in
reparametrized_params)
rng_key = random.PRNGKey(0)
def fn(args):
args_dict = dict(zip(reparametrized_params, args))
return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).
sample(key=rng_key))
actual_grad = jax.grad(fn)(repara_params)
assert len(actual_grad) == len(repara_params)
eps = 0.001
for i in range(len(repara_params)):
if repara_params[i] is None:
continue
args_lhs = [(p if j != i else p - eps) for j, p in enumerate(
repara_params)]
args_rhs = [(p if j != i else p + eps) for j, p in enumerate(
repara_params)]
fn_lhs = fn(args_lhs)
fn_rhs = fn(args_rhs)
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])
assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,
atol=0.03)
@pytest.mark.parametrize('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.
Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,
(0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,
(5.0, 2.0, 4.0))])
def test_pathwise_gradient(jax_dist, params):
rng_key = random.PRNGKey(0)
N = 1000000
def f(params):
z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))
return (z + z ** 2).mean(0)
def g(params):
d = jax_dist(*params)
return d.mean + d.variance + d.mean ** 2
actual_grad = grad(f)(params)
expected_grad = grad(g)(params)
assert_allclose(actual_grad, expected_grad, rtol=0.005)
<|reserved_special_token_0|>
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])
@pytest.mark.parametrize('jit', [False, True])
def test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):
jit_fn = _identity if not jit else jax.jit
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert jax_dist.log_prob(samples
).shape == prepend_shape + jax_dist.batch_shape
truncated_dists = (dist.LeftTruncatedDistribution, dist.
RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)
if sp_dist is None:
if isinstance(jax_dist, truncated_dists):
if isinstance(params[0], dist.Distribution):
loc, scale, low, high = params[0].loc, params[0].scale, params[
1], params[2]
else:
loc, scale, low, high = params
if low is None:
low = -np.inf
if high is None:
high = np.inf
sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)
expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -
sp_dist.cdf(low))
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,
atol=1e-05)
return
pytest.skip('no corresponding scipy distn.')
if _is_batched_multivariate(jax_dist):
pytest.skip('batching not allowed in multivariate distns.')
if jax_dist.event_shape and prepend_shape:
pytest.skip(
'batched samples cannot be scored by multivariate distributions.')
sp_dist = sp_dist(*params)
try:
expected = sp_dist.logpdf(samples)
except AttributeError:
expected = sp_dist.logpmf(samples)
except ValueError as e:
if "The input vector 'x' must lie within the normal simplex." in str(e
):
samples = jax.device_get(samples).astype('float64')
samples = samples / samples.sum(axis=-1, keepdims=True)
expected = sp_dist.logpdf(samples)
else:
raise e
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)
def test_mixture_log_prob():
gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist
.Normal(0, 1).expand([2]))
actual = gmm.log_prob(0.0)
expected = dist.Normal(0, 1).log_prob(0.0)
assert_allclose(actual, expected)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.
Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])
@pytest.mark.filterwarnings('ignore:overflow encountered:RuntimeWarning')
def test_cdf_and_icdf(jax_dist, sp_dist, params):
d = jax_dist(*params)
if d.event_dim > 0:
pytest.skip(
'skip testing cdf/icdf methods of multivariate distributions')
samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))
quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())
try:
rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05
if d.shape() == () and not d.is_discrete:
assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.
log_prob(samples)), atol=1e-05, rtol=rtol)
assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(
-d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)
assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,
rtol=1e-05)
assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)
except NotImplementedError:
pass
if not sp_dist:
pytest.skip('no corresponding scipy distn.')
sp_dist = sp_dist(*params)
try:
actual_cdf = d.cdf(samples)
expected_cdf = sp_dist.cdf(samples)
assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)
actual_icdf = d.icdf(quantiles)
expected_icdf = sp_dist.ppf(quantiles)
assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)
except NotImplementedError:
pass
<|reserved_special_token_0|>
@pytest.mark.parametrize('dimension', [2, 3, 5])
@pytest.mark.parametrize('concentration', [0.6, 2.2])
def test_log_prob_LKJCholesky(dimension, concentration):
d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')
beta_sample = d._beta.sample(random.PRNGKey(0))
beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))
partial_correlation = 2 * beta_sample - 1
affine_logdet = beta_sample.shape[-1] * jnp.log(2)
sample = signed_stick_breaking_tril(partial_correlation)
inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2
inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(
partial_correlation)))
unconstrained = inv_tanh(partial_correlation)
corr_cholesky_logdet = biject_to(constraints.corr_cholesky
).log_abs_det_jacobian(unconstrained, sample)
signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet
actual_log_prob = d.log_prob(sample)
expected_log_prob = (beta_log_prob - affine_logdet -
signed_stick_breaking_logdet)
assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)
assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06
)
def test_zero_inflated_logits_probs_agree():
concentration = np.exp(np.random.normal(1))
rate = np.exp(np.random.normal(1))
d = dist.GammaPoisson(concentration, rate)
gate_logits = np.random.normal(0)
gate_probs = expit(gate_logits)
zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)
zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)
sample = np.random.randint(0, 20, (1000, 100))
assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))
<|reserved_special_token_0|>
@pytest.mark.parametrize('total_count', [1, 2, 3, 10])
@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])
def test_beta_binomial_log_prob(total_count, shape):
concentration0 = np.exp(np.random.normal(size=shape))
concentration1 = np.exp(np.random.normal(size=shape))
value = jnp.arange(1 + total_count)
num_samples = 100000
probs = np.random.beta(concentration1, concentration0, size=(
num_samples,) + shape)
log_probs = dist.Binomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.BetaBinomial(concentration1, concentration0, total_count
).log_prob(value)
assert_allclose(actual, expected, rtol=0.02)
@pytest.mark.parametrize('total_count', [1, 2, 3, 10])
@pytest.mark.parametrize('batch_shape', [(1,), (3, 1), (2, 3, 1)])
def test_dirichlet_multinomial_log_prob(total_count, batch_shape):
event_shape = 3,
concentration = np.exp(np.random.normal(size=batch_shape + event_shape))
value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1
,) * len(batch_shape) + event_shape)
num_samples = 100000
probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (
num_samples, 1))
log_probs = dist.Multinomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.DirichletMultinomial(concentration, total_count).log_prob(
value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])
def test_gamma_poisson_log_prob(shape):
gamma_conc = np.exp(np.random.normal(size=shape))
gamma_rate = np.exp(np.random.normal(size=shape))
value = jnp.arange(15)
num_samples = 300000
poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(
num_samples,) + shape)
log_probs = dist.Poisson(poisson_rate).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_log_prob_gradient(jax_dist, sp_dist, params):
if jax_dist in [dist.LKJ, dist.LKJCholesky]:
pytest.skip('we have separated tests for LKJCholesky distribution')
if jax_dist is _ImproperWrapper:
pytest.skip(
'no param for ImproperUniform to test for log_prob gradient')
rng_key = random.PRNGKey(0)
value = jax_dist(*params).sample(rng_key)
def fn(*args):
return jnp.sum(jax_dist(*args).log_prob(value))
eps = 0.001
for i in range(len(params)):
if jax_dist is dist.EulerMaruyama and i == 1:
continue
if jax_dist is _SparseCAR and i == 3:
continue
if isinstance(params[i], dist.Distribution):
continue
if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,
jnp.int64):
continue
actual_grad = jax.grad(fn, i)(*params)
args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]
args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]
fn_lhs = fn(*args_lhs)
fn_rhs = fn(*args_rhs)
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad) == jnp.shape(params[i])
if i == 0 and jax_dist is dist.Delta:
expected_grad = 0.0
assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,
atol=0.01)
<|reserved_special_token_0|>
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])
def test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):
if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,
_Gaussian2DMixture, _GeneralMixture, _General2DMixture):
pytest.skip(f'{jax_dist.__name__} is a function, not a class')
dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]
valid_params, oob_params = list(params), list(params)
key = random.PRNGKey(1)
dependent_constraint = False
for i in range(len(params)):
if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky
) and dist_args[i] != 'concentration':
continue
if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':
continue
if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':
continue
if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i
] == 'base_dist':
continue
if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':
continue
if jax_dist is dist.SineBivariateVonMises and dist_args[i
] == 'weighted_correlation':
continue
if params[i] is None:
oob_params[i] = None
valid_params[i] = None
continue
constraint = jax_dist.arg_constraints[dist_args[i]]
if isinstance(constraint, constraints._Dependent):
dependent_constraint = True
break
key, key_gen = random.split(key)
oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(
params[i]), key_gen)
valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(
params[i]), key_gen)
if jax_dist is dist.MultivariateStudentT:
valid_params[0] += 1
if jax_dist is dist.LogUniform:
valid_params[1] += valid_params[0]
assert jax_dist(*oob_params)
if not dependent_constraint and (jax_dist is not _ImproperWrapper and
'SineSkewed' not in jax_dist.__name__):
with pytest.raises(ValueError):
jax_dist(*oob_params, validate_args=True)
with pytest.raises(ValueError):
oob_params = jax.device_get(oob_params)
def dist_gen_fn():
d = jax_dist(*oob_params, validate_args=True)
return d
jax.jit(dist_gen_fn)()
d = jax_dist(*valid_params, validate_args=True)
if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and
prepend_shape):
valid_samples = gen_values_within_bounds(d.support, size=
prepend_shape + d.batch_shape + d.event_shape)
try:
expected = sp_dist(*valid_params).logpdf(valid_samples)
except AttributeError:
expected = sp_dist(*valid_params).logpmf(valid_samples)
assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,
rtol=1e-05)
oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +
d.batch_shape + d.event_shape)
with pytest.warns(UserWarning, match='Out-of-support'):
d.log_prob(oob_samples)
with pytest.warns(UserWarning, match='Out-of-support'):
oob_samples = jax.device_get(oob_samples)
valid_params = jax.device_get(valid_params)
def log_prob_fn():
d = jax_dist(*valid_params, validate_args=True)
return d.log_prob(oob_samples)
jax.jit(log_prob_fn)()
def test_omnistaging_invalid_param():
def f(x):
return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)
with pytest.raises(ValueError, match='got invalid'):
jax.jit(f)(0)
def test_omnistaging_invalid_sample():
def f(x):
return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)
with pytest.warns(UserWarning, match='Out-of-support'):
jax.jit(f)(0)
def test_categorical_log_prob_grad():
data = jnp.repeat(jnp.arange(3), 10)
def f(x):
return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(
data).sum()
def g(x):
return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data
).sum()
x = 0.5
fx, grad_fx = jax.value_and_grad(f)(x)
gx, grad_gx = jax.value_and_grad(g)(x)
assert_allclose(fx, gx, rtol=1e-06)
assert_allclose(grad_fx, grad_gx, atol=0.0001)
def test_beta_proportion_invalid_mean():
with dist.distribution.validation_enabled(), pytest.raises(ValueError,
match='^BetaProportion distribution got invalid mean parameter\\.$'):
dist.BetaProportion(1.0, 1.0)
<|reserved_special_token_0|>
@pytest.mark.parametrize('constraint', [constraints.corr_cholesky,
constraints.corr_matrix, constraints.greater_than(2), constraints.
interval(-3, 5), constraints.l1_ball, constraints.less_than(1),
constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,
constraints.ordered_vector, constraints.positive, constraints.
positive_definite, constraints.positive_ordered_vector, constraints.
real, constraints.real_vector, constraints.simplex, constraints.
softplus_positive, constraints.softplus_lower_cholesky, constraints.
unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.
__class__)
@pytest.mark.parametrize('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,
3)])
def test_biject_to(constraint, shape):
transform = biject_to(constraint)
event_dim = transform.domain.event_dim
if isinstance(constraint, constraints._Interval):
assert transform.codomain.upper_bound == constraint.upper_bound
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._GreaterThan):
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._LessThan):
assert transform.codomain.upper_bound == constraint.upper_bound
if len(shape) < event_dim:
return
rng_key = random.PRNGKey(0)
x = random.normal(rng_key, shape)
y = transform(x)
assert transform.forward_shape(x.shape) == y.shape
assert transform.inverse_shape(y.shape) == x.shape
x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))
assert x_nan.shape == x.shape
batch_shape = shape if event_dim == 0 else shape[:-1]
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=
jnp.bool_))
z = transform.inv(y)
assert_allclose(x, z, atol=1e-05, rtol=1e-05)
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
actual = transform.log_abs_det_jacobian(x, y)
assert jnp.shape(actual) == batch_shape
if len(shape) == event_dim:
if constraint is constraints.simplex:
expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)
[:, :-1])[1]
elif constraint in [constraints.real_vector, constraints.
ordered_vector, constraints.positive_ordered_vector,
constraints.l1_ball]:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
elif constraint in [constraints.corr_cholesky, constraints.corr_matrix
]:
vec_transform = lambda x: matrix_to_tril_vec(transform(x),
diagonal=-1)
y_tril = matrix_to_tril_vec(y, diagonal=-1)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y, diagonal=-1)
if constraint is constraints.corr_matrix:
matrix = matrix + jnp.swapaxes(matrix, -2, -1
) + jnp.identity(matrix.shape[-1])
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform
)(y_tril))[1]
elif constraint in [constraints.lower_cholesky, constraints.
scaled_unit_lower_cholesky, constraints.positive_definite,
constraints.softplus_lower_cholesky]:
vec_transform = lambda x: matrix_to_tril_vec(transform(x))
y_tril = matrix_to_tril_vec(y)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y)
if constraint is constraints.positive_definite:
matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(
jnp.diag(matrix))
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform
)(y_tril))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)
assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)
@pytest.mark.parametrize('transform, event_shape', [(PermuteTransform(np.
array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (
SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np
.array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(
[biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),
biject_to(constraints.ordered_vector).inv]), (5,))])
@pytest.mark.parametrize('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1,
3), (5, 3)])
def test_bijective_transforms(transform, event_shape, batch_shape):
shape = batch_shape + event_shape
rng_key = random.PRNGKey(0)
x = biject_to(transform.domain)(random.normal(rng_key, shape))
y = transform(x)
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))
z = transform.inv(y)
assert_allclose(x, z, atol=1e-06, rtol=0.0001)
assert transform.inv.inv is transform
assert transform.inv is transform.inv
assert transform.domain is transform.inv.codomain
assert transform.codomain is transform.inv.domain
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
actual = transform.log_abs_det_jacobian(x, y)
assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))
assert jnp.shape(actual) == batch_shape
if len(shape) == transform.domain.event_dim:
if len(event_shape) == 1:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-06)
assert_allclose(actual, -inv_expected, atol=1e-06)
<|reserved_special_token_0|>
@pytest.mark.parametrize('batch_shape', [(), (5,)])
def test_composed_transform_1(batch_shape):
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
t = transforms.ComposeTransform([t1, t2, t2])
assert t.domain.event_dim == 1
assert t.codomain.event_dim == 3
x = np.random.normal(size=batch_shape + (6,))
y = t(x)
log_det = t.log_abs_det_jacobian(x, y)
assert log_det.shape == batch_shape
z = t2(x * 2)
expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z
) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)
assert_allclose(log_det, expected_log_det)
@pytest.mark.parametrize('batch_shape', [(), (5,)])
def test_simplex_to_order_transform(batch_shape):
simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()
simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)
transform = SimplexToOrderedTransform()
out = transform(simplex)
assert out.shape == transform.forward_shape(simplex.shape)
assert simplex.shape == transform.inverse_shape(out.shape)
<|reserved_special_token_0|>
@pytest.mark.parametrize('transformed_dist', [dist.TransformedDistribution(
dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),
dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms
.PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])
def test_transformed_distribution_intermediates(transformed_dist):
sample, intermediates = transformed_dist.sample_with_intermediates(random
.PRNGKey(1))
assert_allclose(transformed_dist.log_prob(sample, intermediates),
transformed_dist.log_prob(sample))
<|reserved_special_token_0|>
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)
def test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=
100000, key=random.PRNGKey(11)):
"""On samplers that we do not get directly from JAX, (e.g. we only get
Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test
agreement in the empirical distribution of generated samples between our
samplers and those from SciPy.
"""
if jax_dist not in [dist.Gumbel]:
pytest.skip(
'{} sampling method taken from upstream, no need totest generated samples.'
.format(jax_dist.__name__))
jax_dist = jax_dist(*params)
if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:
our_samples = jax_dist.sample(key, (N_sample,))
ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)
assert ks_result.pvalue > 0.05
<|reserved_special_token_0|>
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)
@pytest.mark.parametrize('prepend_shape', [(), (2, 3)])
@pytest.mark.parametrize('sample_shape', [(), (4,)])
def test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):
jax_dist = jax_dist(*params)
new_batch_shape = prepend_shape + jax_dist.batch_shape
expanded_dist = jax_dist.expand(new_batch_shape)
rng_key = random.PRNGKey(0)
samples = expanded_dist.sample(rng_key, sample_shape)
assert expanded_dist.batch_shape == new_batch_shape
assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape
assert expanded_dist.log_prob(samples
).shape == sample_shape + new_batch_shape
assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,
) + new_batch_shape
if prepend_shape:
with pytest.raises(ValueError, match=
'Cannot broadcast distribution of shape'):
assert expanded_dist.expand((3,) + jax_dist.batch_shape)
<|reserved_special_token_0|>
@pytest.mark.parametrize('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)
), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])
def test_expand_reshaped_distribution(extra_event_dims, expand_shape):
loc = jnp.zeros((1, 6))
scale_tril = jnp.eye(6)
d = dist.MultivariateNormal(loc, scale_tril=scale_tril)
full_shape = 4, 1, 1, 1, 6
reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)
cut = 4 - extra_event_dims
batch_shape, event_shape = full_shape[:cut], full_shape[cut:]
assert reshaped_dist.batch_shape == batch_shape
assert reshaped_dist.event_shape == event_shape
large = reshaped_dist.expand(expand_shape)
assert large.batch_shape == expand_shape
assert large.event_shape == event_shape
with pytest.raises((RuntimeError, ValueError)):
reshaped_dist.expand(expand_shape + (3,))
with pytest.raises((RuntimeError, ValueError)):
large.expand(expand_shape[1:])
<|reserved_special_token_0|>
@pytest.mark.parametrize('event_shape', [(), (4,), (2, 4)])
def test_mask_grad(event_shape):
def f(x, data):
base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()
mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(
event_shape))))
log_prob = base_dist.mask(mask).log_prob(data)
assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)
]
return log_prob.sum()
data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])
log_prob, grad = jax.value_and_grad(f)(1.0, data)
assert jnp.isfinite(grad) and jnp.isfinite(log_prob)
<|reserved_special_token_0|>
def test_expand_no_unnecessary_batch_shape_expansion():
for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):
d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])
roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])
assert d.batch_shape == roundtripped_d.batch_shape
assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape
assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)
def bs(arg):
return dist.Normal(arg, arg).expand([10, 3, *arg.shape])
d = bs(arg)
dj = jax.jit(bs)(arg)
assert isinstance(d, dist.ExpandedDistribution)
assert isinstance(dj, dist.ExpandedDistribution)
assert d.batch_shape == dj.batch_shape
assert d.base_dist.batch_shape == dj.base_dist.batch_shape
assert d.base_dist.event_shape == dj.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)
@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)
def test_kl_delta_normal_shape(batch_shape):
v = np.random.normal(size=batch_shape)
loc = np.random.normal(size=batch_shape)
scale = np.exp(np.random.normal(size=batch_shape))
p = dist.Delta(v)
q = dist.Normal(loc, scale)
assert kl_divergence(p, q).shape == batch_shape
def test_kl_delta_normal():
v = np.random.normal()
loc = np.random.normal()
scale = np.exp(np.random.normal())
p = dist.Delta(v, 10.0)
q = dist.Normal(loc, scale)
assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))
@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize('event_shape', [(), (4,), (2, 3)], ids=str)
def test_kl_independent_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(
size=shape)))
q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(
size=shape)))
actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.
Independent(q, len(event_shape)))
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
<|reserved_special_token_0|>
@pytest.mark.parametrize('shape', [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.
Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.
Normal), (dist.Weibull, dist.Gamma)])
def test_kl_univariate(shape, p_dist, q_dist):
def make_dist(dist_class):
params = {}
for k, c in dist_class.arg_constraints.items():
if c is constraints.real:
params[k] = np.random.normal(size=shape)
elif c is constraints.positive:
params[k] = np.exp(np.random.normal(size=shape))
else:
raise ValueError(f'Missing pattern for param {k}.')
d = dist_class(**params)
if dist_class is dist.Kumaraswamy:
d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000
return d
p = make_dist(p_dist)
q = make_dist(q_dist)
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10000,)).copy()
expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize('shape', [(4,), (2, 3)], ids=str)
def test_kl_dirichlet_dirichlet(shape):
p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10000,)).copy()
expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)
assert_allclose(actual, expected, rtol=0.05)
def test_vmapped_binomial_p0():
def sample_binomial_withp0(key):
n = 2 * (random.uniform(key) > 0.5)
_, key = random.split(key)
return dist.Binomial(total_count=n, probs=0).sample(key)
jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))
<|reserved_special_token_0|>
def _tree_equal(t1, t2):
t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)
return jnp.all(jax.flatten_util.ravel_pytree(t)[0])
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_vmap_dist(jax_dist, sp_dist, params):
param_names = list(inspect.signature(jax_dist).parameters.keys())
vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)
vmappable_param_idxs = vmappable_param_idxs[:len(params)]
if len(vmappable_param_idxs) == 0:
return
def make_jax_dist(*params):
return jax_dist(*params)
def sample(d: dist.Distribution):
return d.sample(random.PRNGKey(0))
d = make_jax_dist(*params)
if isinstance(d, _SparseCAR) and d.is_sparse:
return
in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for
i in range(len(params))), 0), *(([(0 if i == idx else None) for i in
range(len(params))], 0) for idx in vmappable_param_idxs if params[
idx] is not None), *(([(0 if i == idx else None) for i in range(len
(params))], vmap_over(d, **{param_names[idx]: 0})) for idx in
vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==
idx else None) for i in range(len(params))], vmap_over(d, **{
param_names[idx]: 1})) for idx in vmappable_param_idxs if
isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).
ndim > 0 and jax_dist is not _GeneralMixture)]
for in_axes, out_axes in in_out_axes_cases:
batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),
arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,
in_axes)]
d = make_jax_dist(*params)
batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes
)(*batched_params)
eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(
batched_d, d)
assert eq == jnp.array([True])
samples_dist = sample(d)
samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)
assert samples_batched_dist.shape == (1, *samples_dist.shape)
def test_multinomial_abstract_total_count():
probs = jnp.array([0.2, 0.5, 0.3])
key = random.PRNGKey(0)
def f(x):
total_count = x.sum(-1)
return dist.Multinomial(total_count, probs=probs, total_count_max=10
).sample(key)
x = dist.Multinomial(10, probs).sample(key)
y = jax.jit(f)(x)
assert_allclose(x, y, rtol=1e-06)
def test_normal_log_cdf():
loc = jnp.array([[0.0, -10.0, 20.0]])
scale = jnp.array([[1, 5, 7]])
values = jnp.linspace(-5, 5, 100).reshape(-1, 1)
numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)
numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)
jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)
assert_allclose(numpyro_log_cdf, jax_log_cdf)
assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)
<|reserved_special_token_0|>
def test_sample_truncated_normal_in_tail():
tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)
samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))
assert ~jnp.isinf(samples).any()
@jax.enable_custom_prng()
def test_jax_custom_prng():
samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))
assert ~jnp.isinf(samples).any()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def my_kron(A, B):
D = A[..., :, None, :, None] * B[..., None, :, None, :]
ds = D.shape
newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]
return D.reshape(newshape)
def _identity(x):
return x
<|reserved_special_token_0|>
def sde_fn1(x, _):
lam = 0.1
sigma2 = 0.1
return lam * x, sigma2
<|reserved_special_token_0|>
class T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):
def __new__(cls, jax_dist, *params):
sp_dist = get_sp_dist(jax_dist)
return super(cls, T).__new__(cls, jax_dist, sp_dist, params)
<|reserved_special_token_0|>
def _multivariate_t_to_scipy(df, loc, tril):
if scipy.__version__ < '1.6.0':
pytest.skip(
'Multivariate Student-T distribution is not available in scipy < 1.6'
)
jax_dist = dist.MultivariateStudentT(df, loc, tril)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_t(loc=mean, shape=cov, df=df)
def _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):
jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_normal(mean=mean, cov=cov)
<|reserved_special_token_0|>
def _TruncatedNormal(loc, scale, low, high):
return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)
def _TruncatedCauchy(loc, scale, low, high):
return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)
<|reserved_special_token_0|>
class SineSkewedUniform(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,
math.pi])
base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,
skewness=skewness)
class SineSkewedVonMises(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = np.array([0.0]), np.array([1.0])
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc
.ndim)
super().__init__(base_dist, skewness, **kwargs)
<|reserved_special_token_0|>
class SineSkewedVonMisesBatched(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc
.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_von_mises_batched(self:
SineSkewedVonMisesBatched, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,
skewness=skewness)
class _GaussianMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, scale):
component_dist = dist.Normal(loc=loc, scale=scale)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(mixing_distribution=mixing_distribution,
component_distribution=component_dist)
@property
def loc(self):
return self.component_distribution.loc
@property
def scale(self):
return self.component_distribution.scale
@vmap_over.register
def _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):
component_distribution = vmap_over(self.component_distribution, loc=loc,
scale=scale)
return vmap_over.dispatch(dist.MixtureSameFamily)(self,
_component_distribution=component_distribution)
class _Gaussian2DMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, covariance_matrix):
component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix
=covariance_matrix)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(mixing_distribution=mixing_distribution,
component_distribution=component_dist)
@property
def loc(self):
return self.component_distribution.loc
@property
def covariance_matrix(self):
return self.component_distribution.covariance_matrix
@vmap_over.register
def _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):
component_distribution = vmap_over(self.component_distribution, loc=loc)
return vmap_over.dispatch(dist.MixtureSameFamily)(self,
_component_distribution=component_distribution)
class _GeneralMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, scales):
component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,
scale_ in zip(locs, scales)]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(mixing_distribution=mixing_distribution,
component_distributions=component_dists)
@property
def locs(self):
return self.component_distributions[0].loc
@property
def scales(self):
return self.component_distributions[0].scale
@vmap_over.register
def _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):
component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in
self.component_distributions]
return vmap_over.dispatch(dist.MixtureGeneral)(self,
_component_distributions=component_distributions)
class _General2DMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, covariance_matrices):
component_dists = [dist.MultivariateNormal(loc=loc_,
covariance_matrix=covariance_matrix) for loc_,
covariance_matrix in zip(locs, covariance_matrices)]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(mixing_distribution=mixing_distribution,
component_distributions=component_dists)
@property
def locs(self):
return self.component_distributions[0].loc
@property
def covariance_matrices(self):
return self.component_distributions[0].covariance_matrix
@vmap_over.register
def _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):
component_distributions = [vmap_over(d, loc=locs) for d in self.
component_distributions]
return vmap_over.dispatch(dist.MixtureGeneral)(self,
_component_distributions=component_distributions)
class _ImproperWrapper(dist.ImproperUniform):
def sample(self, key, sample_shape=()):
transform = biject_to(self.support)
prototype_value = jnp.zeros(self.event_shape)
unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))
shape = sample_shape + self.batch_shape + unconstrained_event_shape
unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)
return transform(unconstrained_samples)
class ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):
arg_constraints = {'rate': constraints.positive, 'gate_logits':
constraints.real}
pytree_data_fields = 'rate',
def __init__(self, rate, gate_logits, *, validate_args=None):
self.rate = rate
super().__init__(dist.Poisson(rate), gate_logits, validate_args=
validate_args)
<|reserved_special_token_0|>
class SparsePoisson(dist.Poisson):
def __init__(self, rate, *, validate_args=None):
super().__init__(rate, is_sparse=True, validate_args=validate_args)
class FoldedNormal(dist.FoldedDistribution):
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
def __init__(self, loc, scale, validate_args=None):
self.loc = loc
self.scale = scale
super().__init__(dist.Normal(loc, scale), validate_args=validate_args)
@vmap_over.register
def _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):
d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=
vmap_over(self.base_dist, loc=loc, scale=scale))
d.loc = loc
d.scale = scale
return d
class _SparseCAR(dist.CAR):
reparametrized_params = ['loc', 'correlation', 'conditional_precision']
def __init__(self, loc, correlation, conditional_precision, adj_matrix,
*, is_sparse=True, validate_args=None):
super().__init__(loc, correlation, conditional_precision,
adj_matrix, is_sparse=True, validate_args=validate_args)
<|reserved_special_token_0|>
def get_sp_dist(jax_dist):
classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]
for cls in classes:
if cls in _DIST_MAP:
return _DIST_MAP[cls]
<|reserved_special_token_0|>
def _is_batched_multivariate(jax_dist):
return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0
<|reserved_special_token_0|>
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])
def test_dist_shape(jax_dist, sp_dist, params, prepend_shape):
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.
event_shape)
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert isinstance(samples, jnp.ndarray)
assert jnp.shape(samples) == expected_shape
if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(
jax_dist, dist.MultivariateStudentT):
sp_dist = sp_dist(*params)
sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)
assert jnp.shape(sp_samples) == expected_shape
elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(
jax_dist, dist.MultivariateStudentT):
sp_dist = sp_dist(*params)
size_ = prepend_shape + jax_dist.batch_shape
size = 1 if size_ == () else size_
try:
sp_samples = sp_dist.rvs(size=size)
except ValueError:
pytest.skip(
"scipy multivariate t doesn't support size with > 1 element")
assert jnp.shape(sp_samples) == expected_shape
if isinstance(jax_dist, (dist.MultivariateNormal, dist.
MultivariateStudentT)):
assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2
assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.
covariance_matrix), rtol=1e-06)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_infer_shapes(jax_dist, sp_dist, params):
shapes = tuple(getattr(p, 'shape', ()) for p in params)
shapes = tuple(x() if callable(x) else x for x in shapes)
jax_dist = jax_dist(*params)
try:
expected_batch_shape, expected_event_shape = type(jax_dist
).infer_shapes(*shapes)
except NotImplementedError:
pytest.skip(
f'{type(jax_dist).__name__}.infer_shapes() is not implemented')
assert jax_dist.batch_shape == expected_batch_shape
assert jax_dist.event_shape == expected_event_shape
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_has_rsample(jax_dist, sp_dist, params):
jax_dist = jax_dist(*params)
masked_dist = jax_dist.mask(False)
indept_dist = jax_dist.expand_by([2]).to_event(1)
transf_dist = dist.TransformedDistribution(jax_dist, biject_to(
constraints.real))
assert masked_dist.has_rsample == jax_dist.has_rsample
assert indept_dist.has_rsample == jax_dist.has_rsample
assert transf_dist.has_rsample == jax_dist.has_rsample
if jax_dist.has_rsample:
assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete
if isinstance(jax_dist, dist.TransformedDistribution):
assert jax_dist.base_dist.has_rsample
else:
assert set(jax_dist.arg_constraints) == set(jax_dist.
reparametrized_params)
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.Normal):
masked_dist.rsample(random.PRNGKey(0))
indept_dist.rsample(random.PRNGKey(0))
transf_dist.rsample(random.PRNGKey(0))
else:
with pytest.raises(NotImplementedError):
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.BernoulliProbs):
with pytest.raises(NotImplementedError):
masked_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
indept_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
transf_dist.rsample(random.PRNGKey(0))
<|reserved_special_token_0|>
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)
def test_sample_gradient(jax_dist, sp_dist, params):
gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [
'concentration1', 'concentration0'], 'BetaProportion': ['mean',
'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],
'InverseGamma': ['concentration'], 'LKJ': ['concentration'],
'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.
__name__, [])
dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1
:] if inspect.isclass(jax_dist) else inspect.getfullargspec(
jax_dist)[0])]
params_dict = dict(zip(dist_args[:len(params)], params))
jax_class = type(jax_dist(**params_dict))
reparametrized_params = [p for p in jax_class.reparametrized_params if
p not in gamma_derived_params]
if not reparametrized_params:
pytest.skip('{} not reparametrized.'.format(jax_class.__name__))
nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in
reparametrized_params}
repara_params = tuple(v for k, v in params_dict.items() if k in
reparametrized_params)
rng_key = random.PRNGKey(0)
def fn(args):
args_dict = dict(zip(reparametrized_params, args))
return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).
sample(key=rng_key))
actual_grad = jax.grad(fn)(repara_params)
assert len(actual_grad) == len(repara_params)
eps = 0.001
for i in range(len(repara_params)):
if repara_params[i] is None:
continue
args_lhs = [(p if j != i else p - eps) for j, p in enumerate(
repara_params)]
args_rhs = [(p if j != i else p + eps) for j, p in enumerate(
repara_params)]
fn_lhs = fn(args_lhs)
fn_rhs = fn(args_rhs)
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])
assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,
atol=0.03)
@pytest.mark.parametrize('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.
Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,
(0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,
(5.0, 2.0, 4.0))])
def test_pathwise_gradient(jax_dist, params):
rng_key = random.PRNGKey(0)
N = 1000000
def f(params):
z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))
return (z + z ** 2).mean(0)
def g(params):
d = jax_dist(*params)
return d.mean + d.variance + d.mean ** 2
actual_grad = grad(f)(params)
expected_grad = grad(g)(params)
assert_allclose(actual_grad, expected_grad, rtol=0.005)
<|reserved_special_token_0|>
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])
@pytest.mark.parametrize('jit', [False, True])
def test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):
jit_fn = _identity if not jit else jax.jit
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert jax_dist.log_prob(samples
).shape == prepend_shape + jax_dist.batch_shape
truncated_dists = (dist.LeftTruncatedDistribution, dist.
RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)
if sp_dist is None:
if isinstance(jax_dist, truncated_dists):
if isinstance(params[0], dist.Distribution):
loc, scale, low, high = params[0].loc, params[0].scale, params[
1], params[2]
else:
loc, scale, low, high = params
if low is None:
low = -np.inf
if high is None:
high = np.inf
sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)
expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -
sp_dist.cdf(low))
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,
atol=1e-05)
return
pytest.skip('no corresponding scipy distn.')
if _is_batched_multivariate(jax_dist):
pytest.skip('batching not allowed in multivariate distns.')
if jax_dist.event_shape and prepend_shape:
pytest.skip(
'batched samples cannot be scored by multivariate distributions.')
sp_dist = sp_dist(*params)
try:
expected = sp_dist.logpdf(samples)
except AttributeError:
expected = sp_dist.logpmf(samples)
except ValueError as e:
if "The input vector 'x' must lie within the normal simplex." in str(e
):
samples = jax.device_get(samples).astype('float64')
samples = samples / samples.sum(axis=-1, keepdims=True)
expected = sp_dist.logpdf(samples)
else:
raise e
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)
def test_mixture_log_prob():
gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist
.Normal(0, 1).expand([2]))
actual = gmm.log_prob(0.0)
expected = dist.Normal(0, 1).log_prob(0.0)
assert_allclose(actual, expected)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.
Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])
@pytest.mark.filterwarnings('ignore:overflow encountered:RuntimeWarning')
def test_cdf_and_icdf(jax_dist, sp_dist, params):
d = jax_dist(*params)
if d.event_dim > 0:
pytest.skip(
'skip testing cdf/icdf methods of multivariate distributions')
samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))
quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())
try:
rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05
if d.shape() == () and not d.is_discrete:
assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.
log_prob(samples)), atol=1e-05, rtol=rtol)
assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(
-d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)
assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,
rtol=1e-05)
assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)
except NotImplementedError:
pass
if not sp_dist:
pytest.skip('no corresponding scipy distn.')
sp_dist = sp_dist(*params)
try:
actual_cdf = d.cdf(samples)
expected_cdf = sp_dist.cdf(samples)
assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)
actual_icdf = d.icdf(quantiles)
expected_icdf = sp_dist.ppf(quantiles)
assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)
except NotImplementedError:
pass
<|reserved_special_token_0|>
@pytest.mark.parametrize('dimension', [2, 3, 5])
@pytest.mark.parametrize('concentration', [0.6, 2.2])
def test_log_prob_LKJCholesky(dimension, concentration):
d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')
beta_sample = d._beta.sample(random.PRNGKey(0))
beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))
partial_correlation = 2 * beta_sample - 1
affine_logdet = beta_sample.shape[-1] * jnp.log(2)
sample = signed_stick_breaking_tril(partial_correlation)
inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2
inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(
partial_correlation)))
unconstrained = inv_tanh(partial_correlation)
corr_cholesky_logdet = biject_to(constraints.corr_cholesky
).log_abs_det_jacobian(unconstrained, sample)
signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet
actual_log_prob = d.log_prob(sample)
expected_log_prob = (beta_log_prob - affine_logdet -
signed_stick_breaking_logdet)
assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)
assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06
)
def test_zero_inflated_logits_probs_agree():
concentration = np.exp(np.random.normal(1))
rate = np.exp(np.random.normal(1))
d = dist.GammaPoisson(concentration, rate)
gate_logits = np.random.normal(0)
gate_probs = expit(gate_logits)
zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)
zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)
sample = np.random.randint(0, 20, (1000, 100))
assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))
<|reserved_special_token_0|>
@pytest.mark.parametrize('total_count', [1, 2, 3, 10])
@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])
def test_beta_binomial_log_prob(total_count, shape):
concentration0 = np.exp(np.random.normal(size=shape))
concentration1 = np.exp(np.random.normal(size=shape))
value = jnp.arange(1 + total_count)
num_samples = 100000
probs = np.random.beta(concentration1, concentration0, size=(
num_samples,) + shape)
log_probs = dist.Binomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.BetaBinomial(concentration1, concentration0, total_count
).log_prob(value)
assert_allclose(actual, expected, rtol=0.02)
@pytest.mark.parametrize('total_count', [1, 2, 3, 10])
@pytest.mark.parametrize('batch_shape', [(1,), (3, 1), (2, 3, 1)])
def test_dirichlet_multinomial_log_prob(total_count, batch_shape):
event_shape = 3,
concentration = np.exp(np.random.normal(size=batch_shape + event_shape))
value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1
,) * len(batch_shape) + event_shape)
num_samples = 100000
probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (
num_samples, 1))
log_probs = dist.Multinomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.DirichletMultinomial(concentration, total_count).log_prob(
value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])
def test_gamma_poisson_log_prob(shape):
gamma_conc = np.exp(np.random.normal(size=shape))
gamma_rate = np.exp(np.random.normal(size=shape))
value = jnp.arange(15)
num_samples = 300000
poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(
num_samples,) + shape)
log_probs = dist.Poisson(poisson_rate).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_log_prob_gradient(jax_dist, sp_dist, params):
if jax_dist in [dist.LKJ, dist.LKJCholesky]:
pytest.skip('we have separated tests for LKJCholesky distribution')
if jax_dist is _ImproperWrapper:
pytest.skip(
'no param for ImproperUniform to test for log_prob gradient')
rng_key = random.PRNGKey(0)
value = jax_dist(*params).sample(rng_key)
def fn(*args):
return jnp.sum(jax_dist(*args).log_prob(value))
eps = 0.001
for i in range(len(params)):
if jax_dist is dist.EulerMaruyama and i == 1:
continue
if jax_dist is _SparseCAR and i == 3:
continue
if isinstance(params[i], dist.Distribution):
continue
if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,
jnp.int64):
continue
actual_grad = jax.grad(fn, i)(*params)
args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]
args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]
fn_lhs = fn(*args_lhs)
fn_rhs = fn(*args_rhs)
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad) == jnp.shape(params[i])
if i == 0 and jax_dist is dist.Delta:
expected_grad = 0.0
assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,
atol=0.01)
<|reserved_special_token_0|>
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])
def test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):
if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,
_Gaussian2DMixture, _GeneralMixture, _General2DMixture):
pytest.skip(f'{jax_dist.__name__} is a function, not a class')
dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]
valid_params, oob_params = list(params), list(params)
key = random.PRNGKey(1)
dependent_constraint = False
for i in range(len(params)):
if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky
) and dist_args[i] != 'concentration':
continue
if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':
continue
if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':
continue
if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i
] == 'base_dist':
continue
if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':
continue
if jax_dist is dist.SineBivariateVonMises and dist_args[i
] == 'weighted_correlation':
continue
if params[i] is None:
oob_params[i] = None
valid_params[i] = None
continue
constraint = jax_dist.arg_constraints[dist_args[i]]
if isinstance(constraint, constraints._Dependent):
dependent_constraint = True
break
key, key_gen = random.split(key)
oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(
params[i]), key_gen)
valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(
params[i]), key_gen)
if jax_dist is dist.MultivariateStudentT:
valid_params[0] += 1
if jax_dist is dist.LogUniform:
valid_params[1] += valid_params[0]
assert jax_dist(*oob_params)
if not dependent_constraint and (jax_dist is not _ImproperWrapper and
'SineSkewed' not in jax_dist.__name__):
with pytest.raises(ValueError):
jax_dist(*oob_params, validate_args=True)
with pytest.raises(ValueError):
oob_params = jax.device_get(oob_params)
def dist_gen_fn():
d = jax_dist(*oob_params, validate_args=True)
return d
jax.jit(dist_gen_fn)()
d = jax_dist(*valid_params, validate_args=True)
if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and
prepend_shape):
valid_samples = gen_values_within_bounds(d.support, size=
prepend_shape + d.batch_shape + d.event_shape)
try:
expected = sp_dist(*valid_params).logpdf(valid_samples)
except AttributeError:
expected = sp_dist(*valid_params).logpmf(valid_samples)
assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,
rtol=1e-05)
oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +
d.batch_shape + d.event_shape)
with pytest.warns(UserWarning, match='Out-of-support'):
d.log_prob(oob_samples)
with pytest.warns(UserWarning, match='Out-of-support'):
oob_samples = jax.device_get(oob_samples)
valid_params = jax.device_get(valid_params)
def log_prob_fn():
d = jax_dist(*valid_params, validate_args=True)
return d.log_prob(oob_samples)
jax.jit(log_prob_fn)()
def test_omnistaging_invalid_param():
def f(x):
return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)
with pytest.raises(ValueError, match='got invalid'):
jax.jit(f)(0)
def test_omnistaging_invalid_sample():
def f(x):
return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)
with pytest.warns(UserWarning, match='Out-of-support'):
jax.jit(f)(0)
def test_categorical_log_prob_grad():
data = jnp.repeat(jnp.arange(3), 10)
def f(x):
return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(
data).sum()
def g(x):
return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data
).sum()
x = 0.5
fx, grad_fx = jax.value_and_grad(f)(x)
gx, grad_gx = jax.value_and_grad(g)(x)
assert_allclose(fx, gx, rtol=1e-06)
assert_allclose(grad_fx, grad_gx, atol=0.0001)
def test_beta_proportion_invalid_mean():
with dist.distribution.validation_enabled(), pytest.raises(ValueError,
match='^BetaProportion distribution got invalid mean parameter\\.$'):
dist.BetaProportion(1.0, 1.0)
<|reserved_special_token_0|>
@pytest.mark.parametrize('constraint', [constraints.corr_cholesky,
constraints.corr_matrix, constraints.greater_than(2), constraints.
interval(-3, 5), constraints.l1_ball, constraints.less_than(1),
constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,
constraints.ordered_vector, constraints.positive, constraints.
positive_definite, constraints.positive_ordered_vector, constraints.
real, constraints.real_vector, constraints.simplex, constraints.
softplus_positive, constraints.softplus_lower_cholesky, constraints.
unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.
__class__)
@pytest.mark.parametrize('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,
3)])
def test_biject_to(constraint, shape):
transform = biject_to(constraint)
event_dim = transform.domain.event_dim
if isinstance(constraint, constraints._Interval):
assert transform.codomain.upper_bound == constraint.upper_bound
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._GreaterThan):
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._LessThan):
assert transform.codomain.upper_bound == constraint.upper_bound
if len(shape) < event_dim:
return
rng_key = random.PRNGKey(0)
x = random.normal(rng_key, shape)
y = transform(x)
assert transform.forward_shape(x.shape) == y.shape
assert transform.inverse_shape(y.shape) == x.shape
x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))
assert x_nan.shape == x.shape
batch_shape = shape if event_dim == 0 else shape[:-1]
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=
jnp.bool_))
z = transform.inv(y)
assert_allclose(x, z, atol=1e-05, rtol=1e-05)
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
actual = transform.log_abs_det_jacobian(x, y)
assert jnp.shape(actual) == batch_shape
if len(shape) == event_dim:
if constraint is constraints.simplex:
expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)
[:, :-1])[1]
elif constraint in [constraints.real_vector, constraints.
ordered_vector, constraints.positive_ordered_vector,
constraints.l1_ball]:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
elif constraint in [constraints.corr_cholesky, constraints.corr_matrix
]:
vec_transform = lambda x: matrix_to_tril_vec(transform(x),
diagonal=-1)
y_tril = matrix_to_tril_vec(y, diagonal=-1)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y, diagonal=-1)
if constraint is constraints.corr_matrix:
matrix = matrix + jnp.swapaxes(matrix, -2, -1
) + jnp.identity(matrix.shape[-1])
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform
)(y_tril))[1]
elif constraint in [constraints.lower_cholesky, constraints.
scaled_unit_lower_cholesky, constraints.positive_definite,
constraints.softplus_lower_cholesky]:
vec_transform = lambda x: matrix_to_tril_vec(transform(x))
y_tril = matrix_to_tril_vec(y)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y)
if constraint is constraints.positive_definite:
matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(
jnp.diag(matrix))
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform
)(y_tril))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)
assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)
@pytest.mark.parametrize('transform, event_shape', [(PermuteTransform(np.
array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (
SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np
.array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(
[biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),
biject_to(constraints.ordered_vector).inv]), (5,))])
@pytest.mark.parametrize('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1,
3), (5, 3)])
def test_bijective_transforms(transform, event_shape, batch_shape):
shape = batch_shape + event_shape
rng_key = random.PRNGKey(0)
x = biject_to(transform.domain)(random.normal(rng_key, shape))
y = transform(x)
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))
z = transform.inv(y)
assert_allclose(x, z, atol=1e-06, rtol=0.0001)
assert transform.inv.inv is transform
assert transform.inv is transform.inv
assert transform.domain is transform.inv.codomain
assert transform.codomain is transform.inv.domain
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
actual = transform.log_abs_det_jacobian(x, y)
assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))
assert jnp.shape(actual) == batch_shape
if len(shape) == transform.domain.event_dim:
if len(event_shape) == 1:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-06)
assert_allclose(actual, -inv_expected, atol=1e-06)
<|reserved_special_token_0|>
@pytest.mark.parametrize('batch_shape', [(), (5,)])
def test_composed_transform_1(batch_shape):
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
t = transforms.ComposeTransform([t1, t2, t2])
assert t.domain.event_dim == 1
assert t.codomain.event_dim == 3
x = np.random.normal(size=batch_shape + (6,))
y = t(x)
log_det = t.log_abs_det_jacobian(x, y)
assert log_det.shape == batch_shape
z = t2(x * 2)
expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z
) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)
assert_allclose(log_det, expected_log_det)
@pytest.mark.parametrize('batch_shape', [(), (5,)])
def test_simplex_to_order_transform(batch_shape):
simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()
simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)
transform = SimplexToOrderedTransform()
out = transform(simplex)
assert out.shape == transform.forward_shape(simplex.shape)
assert simplex.shape == transform.inverse_shape(out.shape)
<|reserved_special_token_0|>
@pytest.mark.parametrize('transformed_dist', [dist.TransformedDistribution(
dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),
dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms
.PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])
def test_transformed_distribution_intermediates(transformed_dist):
sample, intermediates = transformed_dist.sample_with_intermediates(random
.PRNGKey(1))
assert_allclose(transformed_dist.log_prob(sample, intermediates),
transformed_dist.log_prob(sample))
<|reserved_special_token_0|>
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)
def test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=
100000, key=random.PRNGKey(11)):
"""On samplers that we do not get directly from JAX, (e.g. we only get
Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test
agreement in the empirical distribution of generated samples between our
samplers and those from SciPy.
"""
if jax_dist not in [dist.Gumbel]:
pytest.skip(
'{} sampling method taken from upstream, no need totest generated samples.'
.format(jax_dist.__name__))
jax_dist = jax_dist(*params)
if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:
our_samples = jax_dist.sample(key, (N_sample,))
ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)
assert ks_result.pvalue > 0.05
<|reserved_special_token_0|>
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)
@pytest.mark.parametrize('prepend_shape', [(), (2, 3)])
@pytest.mark.parametrize('sample_shape', [(), (4,)])
def test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):
jax_dist = jax_dist(*params)
new_batch_shape = prepend_shape + jax_dist.batch_shape
expanded_dist = jax_dist.expand(new_batch_shape)
rng_key = random.PRNGKey(0)
samples = expanded_dist.sample(rng_key, sample_shape)
assert expanded_dist.batch_shape == new_batch_shape
assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape
assert expanded_dist.log_prob(samples
).shape == sample_shape + new_batch_shape
assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,
) + new_batch_shape
if prepend_shape:
with pytest.raises(ValueError, match=
'Cannot broadcast distribution of shape'):
assert expanded_dist.expand((3,) + jax_dist.batch_shape)
<|reserved_special_token_0|>
@pytest.mark.parametrize('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)
), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])
def test_expand_reshaped_distribution(extra_event_dims, expand_shape):
loc = jnp.zeros((1, 6))
scale_tril = jnp.eye(6)
d = dist.MultivariateNormal(loc, scale_tril=scale_tril)
full_shape = 4, 1, 1, 1, 6
reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)
cut = 4 - extra_event_dims
batch_shape, event_shape = full_shape[:cut], full_shape[cut:]
assert reshaped_dist.batch_shape == batch_shape
assert reshaped_dist.event_shape == event_shape
large = reshaped_dist.expand(expand_shape)
assert large.batch_shape == expand_shape
assert large.event_shape == event_shape
with pytest.raises((RuntimeError, ValueError)):
reshaped_dist.expand(expand_shape + (3,))
with pytest.raises((RuntimeError, ValueError)):
large.expand(expand_shape[1:])
<|reserved_special_token_0|>
@pytest.mark.parametrize('event_shape', [(), (4,), (2, 4)])
def test_mask_grad(event_shape):
def f(x, data):
base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()
mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(
event_shape))))
log_prob = base_dist.mask(mask).log_prob(data)
assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)
]
return log_prob.sum()
data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])
log_prob, grad = jax.value_and_grad(f)(1.0, data)
assert jnp.isfinite(grad) and jnp.isfinite(log_prob)
<|reserved_special_token_0|>
def test_expand_no_unnecessary_batch_shape_expansion():
for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):
d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])
roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])
assert d.batch_shape == roundtripped_d.batch_shape
assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape
assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)
def bs(arg):
return dist.Normal(arg, arg).expand([10, 3, *arg.shape])
d = bs(arg)
dj = jax.jit(bs)(arg)
assert isinstance(d, dist.ExpandedDistribution)
assert isinstance(dj, dist.ExpandedDistribution)
assert d.batch_shape == dj.batch_shape
assert d.base_dist.batch_shape == dj.base_dist.batch_shape
assert d.base_dist.event_shape == dj.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)
@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)
def test_kl_delta_normal_shape(batch_shape):
v = np.random.normal(size=batch_shape)
loc = np.random.normal(size=batch_shape)
scale = np.exp(np.random.normal(size=batch_shape))
p = dist.Delta(v)
q = dist.Normal(loc, scale)
assert kl_divergence(p, q).shape == batch_shape
def test_kl_delta_normal():
v = np.random.normal()
loc = np.random.normal()
scale = np.exp(np.random.normal())
p = dist.Delta(v, 10.0)
q = dist.Normal(loc, scale)
assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))
@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize('event_shape', [(), (4,), (2, 3)], ids=str)
def test_kl_independent_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(
size=shape)))
q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(
size=shape)))
actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.
Independent(q, len(event_shape)))
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize('event_shape', [(), (4,), (2, 3)], ids=str)
def test_kl_expanded_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(
shape)
q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(
shape)
actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.
Independent(q, len(event_shape)))
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
@pytest.mark.parametrize('shape', [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.
Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.
Normal), (dist.Weibull, dist.Gamma)])
def test_kl_univariate(shape, p_dist, q_dist):
def make_dist(dist_class):
params = {}
for k, c in dist_class.arg_constraints.items():
if c is constraints.real:
params[k] = np.random.normal(size=shape)
elif c is constraints.positive:
params[k] = np.exp(np.random.normal(size=shape))
else:
raise ValueError(f'Missing pattern for param {k}.')
d = dist_class(**params)
if dist_class is dist.Kumaraswamy:
d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000
return d
p = make_dist(p_dist)
q = make_dist(q_dist)
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10000,)).copy()
expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize('shape', [(4,), (2, 3)], ids=str)
def test_kl_dirichlet_dirichlet(shape):
p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10000,)).copy()
expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)
assert_allclose(actual, expected, rtol=0.05)
def test_vmapped_binomial_p0():
def sample_binomial_withp0(key):
n = 2 * (random.uniform(key) > 0.5)
_, key = random.split(key)
return dist.Binomial(total_count=n, probs=0).sample(key)
jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))
<|reserved_special_token_0|>
def _tree_equal(t1, t2):
t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)
return jnp.all(jax.flatten_util.ravel_pytree(t)[0])
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_vmap_dist(jax_dist, sp_dist, params):
param_names = list(inspect.signature(jax_dist).parameters.keys())
vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)
vmappable_param_idxs = vmappable_param_idxs[:len(params)]
if len(vmappable_param_idxs) == 0:
return
def make_jax_dist(*params):
return jax_dist(*params)
def sample(d: dist.Distribution):
return d.sample(random.PRNGKey(0))
d = make_jax_dist(*params)
if isinstance(d, _SparseCAR) and d.is_sparse:
return
in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for
i in range(len(params))), 0), *(([(0 if i == idx else None) for i in
range(len(params))], 0) for idx in vmappable_param_idxs if params[
idx] is not None), *(([(0 if i == idx else None) for i in range(len
(params))], vmap_over(d, **{param_names[idx]: 0})) for idx in
vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==
idx else None) for i in range(len(params))], vmap_over(d, **{
param_names[idx]: 1})) for idx in vmappable_param_idxs if
isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).
ndim > 0 and jax_dist is not _GeneralMixture)]
for in_axes, out_axes in in_out_axes_cases:
batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),
arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,
in_axes)]
d = make_jax_dist(*params)
batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes
)(*batched_params)
eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(
batched_d, d)
assert eq == jnp.array([True])
samples_dist = sample(d)
samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)
assert samples_batched_dist.shape == (1, *samples_dist.shape)
def test_multinomial_abstract_total_count():
probs = jnp.array([0.2, 0.5, 0.3])
key = random.PRNGKey(0)
def f(x):
total_count = x.sum(-1)
return dist.Multinomial(total_count, probs=probs, total_count_max=10
).sample(key)
x = dist.Multinomial(10, probs).sample(key)
y = jax.jit(f)(x)
assert_allclose(x, y, rtol=1e-06)
def test_normal_log_cdf():
loc = jnp.array([[0.0, -10.0, 20.0]])
scale = jnp.array([[1, 5, 7]])
values = jnp.linspace(-5, 5, 100).reshape(-1, 1)
numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)
numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)
jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)
assert_allclose(numpyro_log_cdf, jax_log_cdf)
assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)
@pytest.mark.parametrize('value', [-15.0, jnp.array([[-15.0], [-10.0], [-
5.0]]), jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]])]
)
def test_truncated_normal_log_prob_in_tail(value):
loc = 1.35
scale = jnp.geomspace(0.01, 1, 10)
low, high = -20, -1.0
a, b = (low - loc) / scale, (high - loc) / scale
numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high
).log_prob(value)
jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)
assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)
def test_sample_truncated_normal_in_tail():
tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)
samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))
assert ~jnp.isinf(samples).any()
@jax.enable_custom_prng()
def test_jax_custom_prng():
samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))
assert ~jnp.isinf(samples).any()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def my_kron(A, B):
D = A[..., :, None, :, None] * B[..., None, :, None, :]
ds = D.shape
newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]
return D.reshape(newshape)
def _identity(x):
return x
<|reserved_special_token_0|>
def sde_fn1(x, _):
lam = 0.1
sigma2 = 0.1
return lam * x, sigma2
<|reserved_special_token_0|>
class T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):
def __new__(cls, jax_dist, *params):
sp_dist = get_sp_dist(jax_dist)
return super(cls, T).__new__(cls, jax_dist, sp_dist, params)
def _mvn_to_scipy(loc, cov, prec, tril):
jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_normal(mean=mean, cov=cov)
def _multivariate_t_to_scipy(df, loc, tril):
if scipy.__version__ < '1.6.0':
pytest.skip(
'Multivariate Student-T distribution is not available in scipy < 1.6'
)
jax_dist = dist.MultivariateStudentT(df, loc, tril)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_t(loc=mean, shape=cov, df=df)
def _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):
jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_normal(mean=mean, cov=cov)
<|reserved_special_token_0|>
def _TruncatedNormal(loc, scale, low, high):
return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)
def _TruncatedCauchy(loc, scale, low, high):
return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)
<|reserved_special_token_0|>
class SineSkewedUniform(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,
math.pi])
base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,
skewness=skewness)
class SineSkewedVonMises(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = np.array([0.0]), np.array([1.0])
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc
.ndim)
super().__init__(base_dist, skewness, **kwargs)
<|reserved_special_token_0|>
class SineSkewedVonMisesBatched(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc
.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_von_mises_batched(self:
SineSkewedVonMisesBatched, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,
skewness=skewness)
class _GaussianMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, scale):
component_dist = dist.Normal(loc=loc, scale=scale)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(mixing_distribution=mixing_distribution,
component_distribution=component_dist)
@property
def loc(self):
return self.component_distribution.loc
@property
def scale(self):
return self.component_distribution.scale
@vmap_over.register
def _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):
component_distribution = vmap_over(self.component_distribution, loc=loc,
scale=scale)
return vmap_over.dispatch(dist.MixtureSameFamily)(self,
_component_distribution=component_distribution)
class _Gaussian2DMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, covariance_matrix):
component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix
=covariance_matrix)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(mixing_distribution=mixing_distribution,
component_distribution=component_dist)
@property
def loc(self):
return self.component_distribution.loc
@property
def covariance_matrix(self):
return self.component_distribution.covariance_matrix
@vmap_over.register
def _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):
component_distribution = vmap_over(self.component_distribution, loc=loc)
return vmap_over.dispatch(dist.MixtureSameFamily)(self,
_component_distribution=component_distribution)
class _GeneralMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, scales):
component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,
scale_ in zip(locs, scales)]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(mixing_distribution=mixing_distribution,
component_distributions=component_dists)
@property
def locs(self):
return self.component_distributions[0].loc
@property
def scales(self):
return self.component_distributions[0].scale
@vmap_over.register
def _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):
component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in
self.component_distributions]
return vmap_over.dispatch(dist.MixtureGeneral)(self,
_component_distributions=component_distributions)
class _General2DMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, covariance_matrices):
component_dists = [dist.MultivariateNormal(loc=loc_,
covariance_matrix=covariance_matrix) for loc_,
covariance_matrix in zip(locs, covariance_matrices)]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(mixing_distribution=mixing_distribution,
component_distributions=component_dists)
@property
def locs(self):
return self.component_distributions[0].loc
@property
def covariance_matrices(self):
return self.component_distributions[0].covariance_matrix
@vmap_over.register
def _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):
component_distributions = [vmap_over(d, loc=locs) for d in self.
component_distributions]
return vmap_over.dispatch(dist.MixtureGeneral)(self,
_component_distributions=component_distributions)
class _ImproperWrapper(dist.ImproperUniform):
def sample(self, key, sample_shape=()):
transform = biject_to(self.support)
prototype_value = jnp.zeros(self.event_shape)
unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))
shape = sample_shape + self.batch_shape + unconstrained_event_shape
unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)
return transform(unconstrained_samples)
class ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):
arg_constraints = {'rate': constraints.positive, 'gate_logits':
constraints.real}
pytree_data_fields = 'rate',
def __init__(self, rate, gate_logits, *, validate_args=None):
self.rate = rate
super().__init__(dist.Poisson(rate), gate_logits, validate_args=
validate_args)
@vmap_over.register
def _vmap_over_zero_inflated_poisson_logits(self: ZeroInflatedPoissonLogits,
rate=None, gate_logits=None):
dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(self,
base_dist=vmap_over(self.base_dist, rate=rate), gate_logits=
gate_logits, gate=gate_logits)
dist_axes.rate = rate
return dist_axes
class SparsePoisson(dist.Poisson):
def __init__(self, rate, *, validate_args=None):
super().__init__(rate, is_sparse=True, validate_args=validate_args)
class FoldedNormal(dist.FoldedDistribution):
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
def __init__(self, loc, scale, validate_args=None):
self.loc = loc
self.scale = scale
super().__init__(dist.Normal(loc, scale), validate_args=validate_args)
@vmap_over.register
def _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):
d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=
vmap_over(self.base_dist, loc=loc, scale=scale))
d.loc = loc
d.scale = scale
return d
class _SparseCAR(dist.CAR):
reparametrized_params = ['loc', 'correlation', 'conditional_precision']
def __init__(self, loc, correlation, conditional_precision, adj_matrix,
*, is_sparse=True, validate_args=None):
super().__init__(loc, correlation, conditional_precision,
adj_matrix, is_sparse=True, validate_args=validate_args)
<|reserved_special_token_0|>
def get_sp_dist(jax_dist):
classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]
for cls in classes:
if cls in _DIST_MAP:
return _DIST_MAP[cls]
<|reserved_special_token_0|>
def _is_batched_multivariate(jax_dist):
return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0
def gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):
eps = 1e-06
if constraint is constraints.boolean:
return random.bernoulli(key, shape=size)
elif isinstance(constraint, constraints.greater_than):
return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps
elif isinstance(constraint, constraints.integer_interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.randint(key, size, lower_bound, upper_bound + 1)
elif isinstance(constraint, constraints.integer_greater_than):
return constraint.lower_bound + random.poisson(key, np.array(5),
shape=size)
elif isinstance(constraint, constraints.interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.uniform(key, size, minval=lower_bound, maxval=upper_bound
)
elif constraint in (constraints.real, constraints.real_vector):
return random.normal(key, size)
elif constraint is constraints.simplex:
return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])
elif isinstance(constraint, constraints.multinomial):
n = size[-1]
return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.
upper_bound, shape=size[:-1])
elif constraint is constraints.corr_cholesky:
return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (
size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))
elif constraint is constraints.corr_matrix:
cholesky = signed_stick_breaking_tril(random.uniform(key, size[:-2] +
(size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))
return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))
elif constraint is constraints.lower_cholesky:
return jnp.tril(random.uniform(key, size))
elif constraint is constraints.positive_definite:
x = random.normal(key, size)
return jnp.matmul(x, jnp.swapaxes(x, -2, -1))
elif constraint is constraints.ordered_vector:
x = jnp.cumsum(random.exponential(key, size), -1)
return x - random.normal(key, size[:-1] + (1,))
elif isinstance(constraint, constraints.independent):
return gen_values_within_bounds(constraint.base_constraint, size, key)
elif constraint is constraints.sphere:
x = random.normal(key, size)
return x / jnp.linalg.norm(x, axis=-1)
elif constraint is constraints.l1_ball:
key1, key2 = random.split(key)
sign = random.bernoulli(key1)
bounds = [0, (-1) ** sign * 0.5]
return random.uniform(key, size, float, *sorted(bounds))
else:
raise NotImplementedError('{} not implemented.'.format(constraint))
def gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):
if constraint is constraints.boolean:
return random.bernoulli(key, shape=size) - 2
elif isinstance(constraint, constraints.greater_than):
return constraint.lower_bound - jnp.exp(random.normal(key, size))
elif isinstance(constraint, constraints.integer_interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
return random.randint(key, size, lower_bound - 1, lower_bound)
elif isinstance(constraint, constraints.integer_greater_than):
return constraint.lower_bound - random.poisson(key, np.array(5),
shape=size)
elif isinstance(constraint, constraints.interval):
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.uniform(key, size, minval=upper_bound, maxval=
upper_bound + 1.0)
elif constraint in [constraints.real, constraints.real_vector]:
return lax.full(size, np.nan)
elif constraint is constraints.simplex:
return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]
) + 0.01
elif isinstance(constraint, constraints.multinomial):
n = size[-1]
return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.
upper_bound, shape=size[:-1]) + 1
elif constraint is constraints.corr_cholesky:
return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (
size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)) + 0.01
elif constraint is constraints.corr_matrix:
cholesky = 0.01 + signed_stick_breaking_tril(random.uniform(key,
size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)
)
return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))
elif constraint is constraints.lower_cholesky:
return random.uniform(key, size)
elif constraint is constraints.positive_definite:
return random.normal(key, size)
elif constraint is constraints.ordered_vector:
x = jnp.cumsum(random.exponential(key, size), -1)
return x[..., ::-1]
elif isinstance(constraint, constraints.independent):
return gen_values_outside_bounds(constraint.base_constraint, size, key)
elif constraint is constraints.sphere:
x = random.normal(key, size)
x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)
return 2 * x
elif constraint is constraints.l1_ball:
key1, key2 = random.split(key)
sign = random.bernoulli(key1)
bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]
return random.uniform(key, size, float, *sorted(bounds))
else:
raise NotImplementedError('{} not implemented.'.format(constraint))
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])
def test_dist_shape(jax_dist, sp_dist, params, prepend_shape):
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.
event_shape)
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert isinstance(samples, jnp.ndarray)
assert jnp.shape(samples) == expected_shape
if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(
jax_dist, dist.MultivariateStudentT):
sp_dist = sp_dist(*params)
sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)
assert jnp.shape(sp_samples) == expected_shape
elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(
jax_dist, dist.MultivariateStudentT):
sp_dist = sp_dist(*params)
size_ = prepend_shape + jax_dist.batch_shape
size = 1 if size_ == () else size_
try:
sp_samples = sp_dist.rvs(size=size)
except ValueError:
pytest.skip(
"scipy multivariate t doesn't support size with > 1 element")
assert jnp.shape(sp_samples) == expected_shape
if isinstance(jax_dist, (dist.MultivariateNormal, dist.
MultivariateStudentT)):
assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2
assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.
covariance_matrix), rtol=1e-06)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_infer_shapes(jax_dist, sp_dist, params):
shapes = tuple(getattr(p, 'shape', ()) for p in params)
shapes = tuple(x() if callable(x) else x for x in shapes)
jax_dist = jax_dist(*params)
try:
expected_batch_shape, expected_event_shape = type(jax_dist
).infer_shapes(*shapes)
except NotImplementedError:
pytest.skip(
f'{type(jax_dist).__name__}.infer_shapes() is not implemented')
assert jax_dist.batch_shape == expected_batch_shape
assert jax_dist.event_shape == expected_event_shape
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_has_rsample(jax_dist, sp_dist, params):
jax_dist = jax_dist(*params)
masked_dist = jax_dist.mask(False)
indept_dist = jax_dist.expand_by([2]).to_event(1)
transf_dist = dist.TransformedDistribution(jax_dist, biject_to(
constraints.real))
assert masked_dist.has_rsample == jax_dist.has_rsample
assert indept_dist.has_rsample == jax_dist.has_rsample
assert transf_dist.has_rsample == jax_dist.has_rsample
if jax_dist.has_rsample:
assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete
if isinstance(jax_dist, dist.TransformedDistribution):
assert jax_dist.base_dist.has_rsample
else:
assert set(jax_dist.arg_constraints) == set(jax_dist.
reparametrized_params)
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.Normal):
masked_dist.rsample(random.PRNGKey(0))
indept_dist.rsample(random.PRNGKey(0))
transf_dist.rsample(random.PRNGKey(0))
else:
with pytest.raises(NotImplementedError):
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.BernoulliProbs):
with pytest.raises(NotImplementedError):
masked_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
indept_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
transf_dist.rsample(random.PRNGKey(0))
@pytest.mark.parametrize('batch_shape', [(), (4,), (3, 2)])
def test_unit(batch_shape):
log_factor = random.normal(random.PRNGKey(0), batch_shape)
d = dist.Unit(log_factor=log_factor)
x = d.sample(random.PRNGKey(1))
assert x.shape == batch_shape + (0,)
assert (d.log_prob(x) == log_factor).all()
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)
def test_sample_gradient(jax_dist, sp_dist, params):
gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [
'concentration1', 'concentration0'], 'BetaProportion': ['mean',
'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],
'InverseGamma': ['concentration'], 'LKJ': ['concentration'],
'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.
__name__, [])
dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1
:] if inspect.isclass(jax_dist) else inspect.getfullargspec(
jax_dist)[0])]
params_dict = dict(zip(dist_args[:len(params)], params))
jax_class = type(jax_dist(**params_dict))
reparametrized_params = [p for p in jax_class.reparametrized_params if
p not in gamma_derived_params]
if not reparametrized_params:
pytest.skip('{} not reparametrized.'.format(jax_class.__name__))
nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in
reparametrized_params}
repara_params = tuple(v for k, v in params_dict.items() if k in
reparametrized_params)
rng_key = random.PRNGKey(0)
def fn(args):
args_dict = dict(zip(reparametrized_params, args))
return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).
sample(key=rng_key))
actual_grad = jax.grad(fn)(repara_params)
assert len(actual_grad) == len(repara_params)
eps = 0.001
for i in range(len(repara_params)):
if repara_params[i] is None:
continue
args_lhs = [(p if j != i else p - eps) for j, p in enumerate(
repara_params)]
args_rhs = [(p if j != i else p + eps) for j, p in enumerate(
repara_params)]
fn_lhs = fn(args_lhs)
fn_rhs = fn(args_rhs)
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])
assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,
atol=0.03)
@pytest.mark.parametrize('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.
Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,
(0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,
(5.0, 2.0, 4.0))])
def test_pathwise_gradient(jax_dist, params):
rng_key = random.PRNGKey(0)
N = 1000000
def f(params):
z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))
return (z + z ** 2).mean(0)
def g(params):
d = jax_dist(*params)
return d.mean + d.variance + d.mean ** 2
actual_grad = grad(f)(params)
expected_grad = grad(g)(params)
assert_allclose(actual_grad, expected_grad, rtol=0.005)
<|reserved_special_token_0|>
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])
@pytest.mark.parametrize('jit', [False, True])
def test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):
jit_fn = _identity if not jit else jax.jit
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert jax_dist.log_prob(samples
).shape == prepend_shape + jax_dist.batch_shape
truncated_dists = (dist.LeftTruncatedDistribution, dist.
RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)
if sp_dist is None:
if isinstance(jax_dist, truncated_dists):
if isinstance(params[0], dist.Distribution):
loc, scale, low, high = params[0].loc, params[0].scale, params[
1], params[2]
else:
loc, scale, low, high = params
if low is None:
low = -np.inf
if high is None:
high = np.inf
sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)
expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -
sp_dist.cdf(low))
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,
atol=1e-05)
return
pytest.skip('no corresponding scipy distn.')
if _is_batched_multivariate(jax_dist):
pytest.skip('batching not allowed in multivariate distns.')
if jax_dist.event_shape and prepend_shape:
pytest.skip(
'batched samples cannot be scored by multivariate distributions.')
sp_dist = sp_dist(*params)
try:
expected = sp_dist.logpdf(samples)
except AttributeError:
expected = sp_dist.logpmf(samples)
except ValueError as e:
if "The input vector 'x' must lie within the normal simplex." in str(e
):
samples = jax.device_get(samples).astype('float64')
samples = samples / samples.sum(axis=-1, keepdims=True)
expected = sp_dist.logpdf(samples)
else:
raise e
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)
def test_mixture_log_prob():
gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist
.Normal(0, 1).expand([2]))
actual = gmm.log_prob(0.0)
expected = dist.Normal(0, 1).log_prob(0.0)
assert_allclose(actual, expected)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.
Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])
@pytest.mark.filterwarnings('ignore:overflow encountered:RuntimeWarning')
def test_cdf_and_icdf(jax_dist, sp_dist, params):
d = jax_dist(*params)
if d.event_dim > 0:
pytest.skip(
'skip testing cdf/icdf methods of multivariate distributions')
samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))
quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())
try:
rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05
if d.shape() == () and not d.is_discrete:
assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.
log_prob(samples)), atol=1e-05, rtol=rtol)
assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(
-d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)
assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,
rtol=1e-05)
assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)
except NotImplementedError:
pass
if not sp_dist:
pytest.skip('no corresponding scipy distn.')
sp_dist = sp_dist(*params)
try:
actual_cdf = d.cdf(samples)
expected_cdf = sp_dist.cdf(samples)
assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)
actual_icdf = d.icdf(quantiles)
expected_icdf = sp_dist.ppf(quantiles)
assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)
except NotImplementedError:
pass
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DIRECTIONAL)
def test_gof(jax_dist, sp_dist, params):
if 'Improper' in jax_dist.__name__:
pytest.skip('distribution has improper .log_prob()')
if 'LKJ' in jax_dist.__name__:
pytest.xfail('incorrect submanifold scaling')
if jax_dist is dist.EulerMaruyama:
d = jax_dist(*params)
if d.event_dim > 1:
pytest.skip(
'EulerMaruyama skip test when event shape is non-trivial.')
num_samples = 10000
if 'BetaProportion' in jax_dist.__name__:
num_samples = 20000
rng_key = random.PRNGKey(0)
d = jax_dist(*params)
samples = d.sample(key=rng_key, sample_shape=(num_samples,))
probs = np.exp(d.log_prob(samples))
dim = None
if jax_dist is dist.ProjectedNormal:
dim = samples.shape[-1] - 1
probs = probs.reshape(num_samples, -1)
samples = samples.reshape(probs.shape + d.event_shape)
if 'Dirichlet' in jax_dist.__name__:
samples = samples[..., :-1]
for b in range(probs.shape[1]):
try:
gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)
except InvalidTest:
pytest.skip('expensive test')
else:
assert gof > TEST_FAILURE_RATE
<|reserved_special_token_0|>
def _tril_cholesky_to_tril_corr(x):
w = vec_to_tril_matrix(x, diagonal=-1)
diag = jnp.sqrt(1 - jnp.sum(w ** 2, axis=-1))
cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])
corr = jnp.matmul(cholesky, cholesky.T)
return matrix_to_tril_vec(corr, diagonal=-1)
@pytest.mark.parametrize('dimension', [2, 3, 5])
def test_log_prob_LKJCholesky_uniform(dimension):
d = dist.LKJCholesky(dimension=dimension, concentration=1)
N = 5
corr_log_prob = []
for i in range(N):
sample = d.sample(random.PRNGKey(i))
log_prob = d.log_prob(sample)
sample_tril = matrix_to_tril_vec(sample, diagonal=-1)
cholesky_to_corr_jac = np.linalg.slogdet(jax.jacobian(
_tril_cholesky_to_tril_corr)(sample_tril))[1]
corr_log_prob.append(log_prob - cholesky_to_corr_jac)
corr_log_prob = np.array(corr_log_prob)
assert_allclose(corr_log_prob, jnp.broadcast_to(corr_log_prob[0],
corr_log_prob.shape), rtol=1e-06)
if dimension == 2:
assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-06)
@pytest.mark.parametrize('dimension', [2, 3, 5])
@pytest.mark.parametrize('concentration', [0.6, 2.2])
def test_log_prob_LKJCholesky(dimension, concentration):
d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')
beta_sample = d._beta.sample(random.PRNGKey(0))
beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))
partial_correlation = 2 * beta_sample - 1
affine_logdet = beta_sample.shape[-1] * jnp.log(2)
sample = signed_stick_breaking_tril(partial_correlation)
inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2
inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(
partial_correlation)))
unconstrained = inv_tanh(partial_correlation)
corr_cholesky_logdet = biject_to(constraints.corr_cholesky
).log_abs_det_jacobian(unconstrained, sample)
signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet
actual_log_prob = d.log_prob(sample)
expected_log_prob = (beta_log_prob - affine_logdet -
signed_stick_breaking_logdet)
assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)
assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06
)
def test_zero_inflated_logits_probs_agree():
concentration = np.exp(np.random.normal(1))
rate = np.exp(np.random.normal(1))
d = dist.GammaPoisson(concentration, rate)
gate_logits = np.random.normal(0)
gate_probs = expit(gate_logits)
zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)
zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)
sample = np.random.randint(0, 20, (1000, 100))
assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))
<|reserved_special_token_0|>
@pytest.mark.parametrize('total_count', [1, 2, 3, 10])
@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])
def test_beta_binomial_log_prob(total_count, shape):
concentration0 = np.exp(np.random.normal(size=shape))
concentration1 = np.exp(np.random.normal(size=shape))
value = jnp.arange(1 + total_count)
num_samples = 100000
probs = np.random.beta(concentration1, concentration0, size=(
num_samples,) + shape)
log_probs = dist.Binomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.BetaBinomial(concentration1, concentration0, total_count
).log_prob(value)
assert_allclose(actual, expected, rtol=0.02)
@pytest.mark.parametrize('total_count', [1, 2, 3, 10])
@pytest.mark.parametrize('batch_shape', [(1,), (3, 1), (2, 3, 1)])
def test_dirichlet_multinomial_log_prob(total_count, batch_shape):
event_shape = 3,
concentration = np.exp(np.random.normal(size=batch_shape + event_shape))
value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1
,) * len(batch_shape) + event_shape)
num_samples = 100000
probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (
num_samples, 1))
log_probs = dist.Multinomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.DirichletMultinomial(concentration, total_count).log_prob(
value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])
def test_gamma_poisson_log_prob(shape):
gamma_conc = np.exp(np.random.normal(size=shape))
gamma_rate = np.exp(np.random.normal(size=shape))
value = jnp.arange(15)
num_samples = 300000
poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(
num_samples,) + shape)
log_probs = dist.Poisson(poisson_rate).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_log_prob_gradient(jax_dist, sp_dist, params):
if jax_dist in [dist.LKJ, dist.LKJCholesky]:
pytest.skip('we have separated tests for LKJCholesky distribution')
if jax_dist is _ImproperWrapper:
pytest.skip(
'no param for ImproperUniform to test for log_prob gradient')
rng_key = random.PRNGKey(0)
value = jax_dist(*params).sample(rng_key)
def fn(*args):
return jnp.sum(jax_dist(*args).log_prob(value))
eps = 0.001
for i in range(len(params)):
if jax_dist is dist.EulerMaruyama and i == 1:
continue
if jax_dist is _SparseCAR and i == 3:
continue
if isinstance(params[i], dist.Distribution):
continue
if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,
jnp.int64):
continue
actual_grad = jax.grad(fn, i)(*params)
args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]
args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]
fn_lhs = fn(*args_lhs)
fn_rhs = fn(*args_rhs)
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad) == jnp.shape(params[i])
if i == 0 and jax_dist is dist.Delta:
expected_grad = 0.0
assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,
atol=0.01)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_mean_var(jax_dist, sp_dist, params):
if jax_dist is _ImproperWrapper:
pytest.skip('Improper distribution does not has mean/var implemented')
if jax_dist is FoldedNormal:
pytest.skip('Folded distribution does not has mean/var implemented')
if jax_dist is dist.EulerMaruyama:
pytest.skip(
'EulerMaruyama distribution does not has mean/var implemented')
if jax_dist is dist.RelaxedBernoulliLogits:
pytest.skip(
'RelaxedBernoulli distribution does not has mean/var implemented')
if 'SineSkewed' in jax_dist.__name__:
pytest.skip('Skewed Distribution are not symmetric about location.')
if jax_dist in (_TruncatedNormal, _TruncatedCauchy, dist.
LeftTruncatedDistribution, dist.RightTruncatedDistribution, dist.
TwoSidedTruncatedDistribution):
pytest.skip('Truncated distributions do not has mean/var implemented')
if jax_dist is dist.ProjectedNormal:
pytest.skip('Mean is defined in submanifold')
n = 20000 if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.
SineBivariateVonMises] else 200000
d_jax = jax_dist(*params)
k = random.PRNGKey(0)
samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)
if sp_dist and not _is_batched_multivariate(d_jax) and jax_dist not in [
dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]:
d_sp = sp_dist(*params)
try:
sp_mean = d_sp.mean()
except TypeError:
sp_mean = d_sp.mean
if d_jax.event_shape:
try:
sp_var = jnp.diag(d_sp.cov())
except TypeError:
sp_var = jnp.diag(d_sp.cov)
except AttributeError:
sp_var = d_sp.var()
else:
sp_var = d_sp.var()
assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-07)
assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-07)
if jnp.all(jnp.isfinite(sp_mean)):
assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,
atol=0.01)
if jnp.all(jnp.isfinite(sp_var)):
assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),
rtol=0.05, atol=0.01)
elif jax_dist in [dist.LKJ, dist.LKJCholesky]:
if jax_dist is dist.LKJCholesky:
corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))
else:
corr_samples = samples
dimension, concentration, _ = params
marginal = dist.Beta(concentration + 0.5 * (dimension - 2),
concentration + 0.5 * (dimension - 2))
marginal_mean = 2 * marginal.mean - 1
marginal_std = 2 * jnp.sqrt(marginal.variance)
expected_mean = jnp.broadcast_to(jnp.reshape(marginal_mean, jnp.
shape(marginal_mean) + (1, 1)), jnp.shape(marginal_mean) +
d_jax.event_shape)
expected_std = jnp.broadcast_to(jnp.reshape(marginal_std, jnp.shape
(marginal_std) + (1, 1)), jnp.shape(marginal_std) + d_jax.
event_shape)
expected_mean = expected_mean * (1 - jnp.identity(dimension)
) + jnp.identity(dimension)
expected_std = expected_std * (1 - jnp.identity(dimension))
assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol
=0.01)
assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)
elif jax_dist in [dist.VonMises]:
assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=0.01)
x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)
expected_variance = 1 - jnp.sqrt(x ** 2 + y ** 2)
assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=0.01
)
elif jax_dist in [dist.SineBivariateVonMises]:
phi_loc = _circ_mean(samples[..., 0])
psi_loc = _circ_mean(samples[..., 1])
assert_allclose(d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1),
rtol=0.05, atol=0.01)
elif jax_dist in [dist.MatrixNormal]:
sample_shape = 200000,
if len(d_jax.batch_shape) > 0:
axes = [(len(sample_shape) + i) for i in range(len(d_jax.
batch_shape))]
axes = tuple(axes)
samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))
subshape = samples_re.shape[:len(axes)]
ixi = product(*[range(k) for k in subshape])
for ix in ixi:
def get_min_shape(ix, batch_shape):
return min(ix, tuple(map(lambda x: x - 1, batch_shape)))
ix_loc = get_min_shape(ix, d_jax.loc.shape[:len(ix)])
jnp.allclose(jnp.mean(samples_re[ix], 0), jnp.squeeze(d_jax
.mean[ix_loc]), rtol=0.5, atol=0.01)
samples_mvn = jnp.squeeze(samples_re[ix]).reshape(
sample_shape + (-1,), order='F')
ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[:
len(ix)])
ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[:len(ix)]
)
scale_tril = my_kron(d_jax.scale_tril_column[ix_col], d_jax
.scale_tril_row[ix_row])
sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))
jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01
)
else:
jnp.allclose(jnp.mean(samples, 0), jnp.squeeze(d_jax.mean),
rtol=0.5, atol=0.01)
samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,),
order='F')
scale_tril = my_kron(jnp.squeeze(d_jax.scale_tril_column), jnp.
squeeze(d_jax.scale_tril_row))
sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))
jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01)
else:
if jnp.all(jnp.isfinite(d_jax.mean)):
assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,
atol=0.01)
if isinstance(d_jax, dist.CAR):
pytest.skip(
'CAR distribution does not have `variance` implemented.')
if isinstance(d_jax, dist.Gompertz):
pytest.skip(
'Gompertz distribution does not have `variance` implemented.')
if jnp.all(jnp.isfinite(d_jax.variance)):
assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),
rtol=0.05, atol=0.01)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])
def test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):
if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,
_Gaussian2DMixture, _GeneralMixture, _General2DMixture):
pytest.skip(f'{jax_dist.__name__} is a function, not a class')
dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]
valid_params, oob_params = list(params), list(params)
key = random.PRNGKey(1)
dependent_constraint = False
for i in range(len(params)):
if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky
) and dist_args[i] != 'concentration':
continue
if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':
continue
if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':
continue
if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i
] == 'base_dist':
continue
if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':
continue
if jax_dist is dist.SineBivariateVonMises and dist_args[i
] == 'weighted_correlation':
continue
if params[i] is None:
oob_params[i] = None
valid_params[i] = None
continue
constraint = jax_dist.arg_constraints[dist_args[i]]
if isinstance(constraint, constraints._Dependent):
dependent_constraint = True
break
key, key_gen = random.split(key)
oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(
params[i]), key_gen)
valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(
params[i]), key_gen)
if jax_dist is dist.MultivariateStudentT:
valid_params[0] += 1
if jax_dist is dist.LogUniform:
valid_params[1] += valid_params[0]
assert jax_dist(*oob_params)
if not dependent_constraint and (jax_dist is not _ImproperWrapper and
'SineSkewed' not in jax_dist.__name__):
with pytest.raises(ValueError):
jax_dist(*oob_params, validate_args=True)
with pytest.raises(ValueError):
oob_params = jax.device_get(oob_params)
def dist_gen_fn():
d = jax_dist(*oob_params, validate_args=True)
return d
jax.jit(dist_gen_fn)()
d = jax_dist(*valid_params, validate_args=True)
if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and
prepend_shape):
valid_samples = gen_values_within_bounds(d.support, size=
prepend_shape + d.batch_shape + d.event_shape)
try:
expected = sp_dist(*valid_params).logpdf(valid_samples)
except AttributeError:
expected = sp_dist(*valid_params).logpmf(valid_samples)
assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,
rtol=1e-05)
oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +
d.batch_shape + d.event_shape)
with pytest.warns(UserWarning, match='Out-of-support'):
d.log_prob(oob_samples)
with pytest.warns(UserWarning, match='Out-of-support'):
oob_samples = jax.device_get(oob_samples)
valid_params = jax.device_get(valid_params)
def log_prob_fn():
d = jax_dist(*valid_params, validate_args=True)
return d.log_prob(oob_samples)
jax.jit(log_prob_fn)()
def test_omnistaging_invalid_param():
def f(x):
return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)
with pytest.raises(ValueError, match='got invalid'):
jax.jit(f)(0)
def test_omnistaging_invalid_sample():
def f(x):
return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)
with pytest.warns(UserWarning, match='Out-of-support'):
jax.jit(f)(0)
def test_categorical_log_prob_grad():
data = jnp.repeat(jnp.arange(3), 10)
def f(x):
return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(
data).sum()
def g(x):
return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data
).sum()
x = 0.5
fx, grad_fx = jax.value_and_grad(f)(x)
gx, grad_gx = jax.value_and_grad(g)(x)
assert_allclose(fx, gx, rtol=1e-06)
assert_allclose(grad_fx, grad_gx, atol=0.0001)
def test_beta_proportion_invalid_mean():
with dist.distribution.validation_enabled(), pytest.raises(ValueError,
match='^BetaProportion distribution got invalid mean parameter\\.$'):
dist.BetaProportion(1.0, 1.0)
@pytest.mark.parametrize('constraint, x, expected', [(constraints.boolean,
np.array([True, False]), np.array([True, True])), (constraints.boolean,
np.array([1, 1]), np.array([True, True])), (constraints.boolean, np.
array([-1, 1]), np.array([False, True])), (constraints.corr_cholesky,
np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False
])), (constraints.corr_cholesky, np.array([[[1, 0], [1, 0]], [[1, 0], [
0.5, 0.5]]]), np.array([False, False])), (constraints.corr_matrix, np.
array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False])),
(constraints.corr_matrix, np.array([[[1, 0], [1, 0]], [[1, 0], [0.5,
0.5]]]), np.array([False, False])), (constraints.greater_than(1), 3,
True), (constraints.greater_than(1), np.array([-1, 1, 5]), np.array([
False, False, True])), (constraints.integer_interval(-3, 5), 0, True),
(constraints.integer_interval(-3, 5), np.array([-5, -3, 0, 1.1, 5, 7]),
np.array([False, True, True, False, True, False])), (constraints.
interval(-3, 5), 0, True), (constraints.interval(-3, 5), np.array([-5,
-3, 0, 5, 7]), np.array([False, True, True, True, False])), (
constraints.less_than(1), -2, True), (constraints.less_than(1), np.
array([-1, 1, 5]), np.array([True, False, False])), (constraints.
lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True), (
constraints.lower_cholesky, np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0,
0.1], [2.0, 0.2]]]), np.array([False, False])), (constraints.
nonnegative_integer, 3, True), (constraints.nonnegative_integer, np.
array([-1.0, 0.0, 5.0]), np.array([False, True, True])), (constraints.
positive, 3, True), (constraints.positive, np.array([-1, 0, 5]), np.
array([False, False, True])), (constraints.positive_definite, np.array(
[[1.0, 0.3], [0.3, 1.0]]), True), (constraints.positive_definite, np.
array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]), np.array([
False, False])), (constraints.positive_integer, 3, True), (constraints.
positive_integer, np.array([-1.0, 0.0, 5.0]), np.array([False, False,
True])), (constraints.real, -1, True), (constraints.real, np.array([np.
inf, -np.inf, np.nan, np.pi]), np.array([False, False, False, True])),
(constraints.simplex, np.array([0.1, 0.3, 0.6]), True), (constraints.
simplex, np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),
np.array([True, False, False])), (constraints.softplus_positive, 3,
True), (constraints.softplus_positive, np.array([-1, 0, 5]), np.array([
False, False, True])), (constraints.softplus_lower_cholesky, np.array([
[1.0, 0.0], [-2.0, 0.1]]), True), (constraints.softplus_lower_cholesky,
np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]), np.
array([False, False])), (constraints.unit_interval, 0.1, True), (
constraints.unit_interval, np.array([-5, 0, 0.5, 1, 7]), np.array([
False, True, True, True, False])), (constraints.sphere, np.array([[1, 0,
0], [0.5, 0.5, 0]]), np.array([True, False])), (constraints.
open_interval(0.0, 1.0), np.array([-5, 0, 0.5, 1, 7]), np.array([False,
False, True, False, False]))])
def test_constraints(constraint, x, expected):
v = constraint.feasible_like(x)
if jnp.result_type(v) == 'float32' or jnp.result_type(v) == 'float64':
assert not constraint.is_discrete
assert_array_equal(constraint(x), expected)
feasible_value = constraint.feasible_like(x)
assert jnp.shape(feasible_value) == jnp.shape(x)
assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected
), True))
try:
inverse = biject_to(constraint).inv(feasible_value)
except NotImplementedError:
pass
else:
assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-07)
@pytest.mark.parametrize('constraint', [constraints.corr_cholesky,
constraints.corr_matrix, constraints.greater_than(2), constraints.
interval(-3, 5), constraints.l1_ball, constraints.less_than(1),
constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,
constraints.ordered_vector, constraints.positive, constraints.
positive_definite, constraints.positive_ordered_vector, constraints.
real, constraints.real_vector, constraints.simplex, constraints.
softplus_positive, constraints.softplus_lower_cholesky, constraints.
unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.
__class__)
@pytest.mark.parametrize('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,
3)])
def test_biject_to(constraint, shape):
transform = biject_to(constraint)
event_dim = transform.domain.event_dim
if isinstance(constraint, constraints._Interval):
assert transform.codomain.upper_bound == constraint.upper_bound
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._GreaterThan):
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._LessThan):
assert transform.codomain.upper_bound == constraint.upper_bound
if len(shape) < event_dim:
return
rng_key = random.PRNGKey(0)
x = random.normal(rng_key, shape)
y = transform(x)
assert transform.forward_shape(x.shape) == y.shape
assert transform.inverse_shape(y.shape) == x.shape
x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))
assert x_nan.shape == x.shape
batch_shape = shape if event_dim == 0 else shape[:-1]
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=
jnp.bool_))
z = transform.inv(y)
assert_allclose(x, z, atol=1e-05, rtol=1e-05)
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
actual = transform.log_abs_det_jacobian(x, y)
assert jnp.shape(actual) == batch_shape
if len(shape) == event_dim:
if constraint is constraints.simplex:
expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)
[:, :-1])[1]
elif constraint in [constraints.real_vector, constraints.
ordered_vector, constraints.positive_ordered_vector,
constraints.l1_ball]:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
elif constraint in [constraints.corr_cholesky, constraints.corr_matrix
]:
vec_transform = lambda x: matrix_to_tril_vec(transform(x),
diagonal=-1)
y_tril = matrix_to_tril_vec(y, diagonal=-1)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y, diagonal=-1)
if constraint is constraints.corr_matrix:
matrix = matrix + jnp.swapaxes(matrix, -2, -1
) + jnp.identity(matrix.shape[-1])
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform
)(y_tril))[1]
elif constraint in [constraints.lower_cholesky, constraints.
scaled_unit_lower_cholesky, constraints.positive_definite,
constraints.softplus_lower_cholesky]:
vec_transform = lambda x: matrix_to_tril_vec(transform(x))
y_tril = matrix_to_tril_vec(y)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y)
if constraint is constraints.positive_definite:
matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(
jnp.diag(matrix))
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform
)(y_tril))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)
assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)
@pytest.mark.parametrize('transform, event_shape', [(PermuteTransform(np.
array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (
SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np
.array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(
[biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),
biject_to(constraints.ordered_vector).inv]), (5,))])
@pytest.mark.parametrize('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1,
3), (5, 3)])
def test_bijective_transforms(transform, event_shape, batch_shape):
shape = batch_shape + event_shape
rng_key = random.PRNGKey(0)
x = biject_to(transform.domain)(random.normal(rng_key, shape))
y = transform(x)
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))
z = transform.inv(y)
assert_allclose(x, z, atol=1e-06, rtol=0.0001)
assert transform.inv.inv is transform
assert transform.inv is transform.inv
assert transform.domain is transform.inv.codomain
assert transform.codomain is transform.inv.domain
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
actual = transform.log_abs_det_jacobian(x, y)
assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))
assert jnp.shape(actual) == batch_shape
if len(shape) == transform.domain.event_dim:
if len(event_shape) == 1:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-06)
assert_allclose(actual, -inv_expected, atol=1e-06)
@pytest.mark.parametrize('batch_shape', [(), (5,)])
def test_composed_transform(batch_shape):
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
t = transforms.ComposeTransform([t1, t2, t1])
assert t.domain.event_dim == 1
assert t.codomain.event_dim == 2
x = np.random.normal(size=batch_shape + (6,))
y = t(x)
log_det = t.log_abs_det_jacobian(x, y)
assert log_det.shape == batch_shape
expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2
) + jnp.log(2) * 9
assert_allclose(log_det, expected_log_det)
@pytest.mark.parametrize('batch_shape', [(), (5,)])
def test_composed_transform_1(batch_shape):
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
t = transforms.ComposeTransform([t1, t2, t2])
assert t.domain.event_dim == 1
assert t.codomain.event_dim == 3
x = np.random.normal(size=batch_shape + (6,))
y = t(x)
log_det = t.log_abs_det_jacobian(x, y)
assert log_det.shape == batch_shape
z = t2(x * 2)
expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z
) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)
assert_allclose(log_det, expected_log_det)
@pytest.mark.parametrize('batch_shape', [(), (5,)])
def test_simplex_to_order_transform(batch_shape):
simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()
simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)
transform = SimplexToOrderedTransform()
out = transform(simplex)
assert out.shape == transform.forward_shape(simplex.shape)
assert simplex.shape == transform.inverse_shape(out.shape)
@pytest.mark.parametrize('batch_shape', [(), (5,)])
@pytest.mark.parametrize('prepend_event_shape', [(), (4,)])
@pytest.mark.parametrize('sample_shape', [(), (7,)])
def test_transformed_distribution(batch_shape, prepend_event_shape,
sample_shape):
base_dist = dist.Normal(0, 1).expand(batch_shape + prepend_event_shape +
(6,)).to_event(1 + len(prepend_event_shape))
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
d = dist.TransformedDistribution(base_dist, [t1, t2, t1])
assert d.event_dim == 2 + len(prepend_event_shape)
y = d.sample(random.PRNGKey(0), sample_shape)
t = transforms.ComposeTransform([t1, t2, t1])
x = t.inv(y)
assert x.shape == sample_shape + base_dist.shape()
log_prob = d.log_prob(y)
assert log_prob.shape == sample_shape + batch_shape
t_log_det = t.log_abs_det_jacobian(x, y)
if prepend_event_shape:
t_log_det = t_log_det.sum(-1)
expected_log_prob = base_dist.log_prob(x) - t_log_det
assert_allclose(log_prob, expected_log_prob, atol=1e-05)
@pytest.mark.parametrize('transformed_dist', [dist.TransformedDistribution(
dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),
dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms
.PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])
def test_transformed_distribution_intermediates(transformed_dist):
sample, intermediates = transformed_dist.sample_with_intermediates(random
.PRNGKey(1))
assert_allclose(transformed_dist.log_prob(sample, intermediates),
transformed_dist.log_prob(sample))
def test_transformed_transformed_distribution():
loc, scale = -2, 3
dist1 = dist.TransformedDistribution(dist.Normal(2, 3), transforms.
PowerTransform(2.0))
dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(
-2, 3))
assert isinstance(dist2.base_dist, dist.Normal)
assert len(dist2.transforms) == 2
assert isinstance(dist2.transforms[0], transforms.PowerTransform)
assert isinstance(dist2.transforms[1], transforms.AffineTransform)
rng_key = random.PRNGKey(0)
assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))
intermediates = dist2.sample_with_intermediates(rng_key)
assert len(intermediates) == 2
<|reserved_special_token_0|>
@pytest.mark.parametrize('ts', [[transforms.PowerTransform(0.7), transforms
.AffineTransform(2.0, 3.0)], [transforms.ExpTransform()], [transforms.
ComposeTransform([transforms.AffineTransform(-2, 3), transforms.
ExpTransform()]), transforms.PowerTransform(3.0)], [_make_iaf(5,
hidden_dims=[10], rng_key=random.PRNGKey(0)), transforms.
PermuteTransform(jnp.arange(5)[::-1]), _make_iaf(5, hidden_dims=[10],
rng_key=random.PRNGKey(1))]])
def test_compose_transform_with_intermediates(ts):
transform = transforms.ComposeTransform(ts)
x = random.normal(random.PRNGKey(2), (7, 5))
y, intermediates = transform.call_with_intermediates(x)
logdet = transform.log_abs_det_jacobian(x, y, intermediates)
assert_allclose(y, transform(x))
assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))
@pytest.mark.parametrize('x_dim, y_dim', [(3, 3), (3, 4)])
def test_unpack_transform(x_dim, y_dim):
xy = np.random.randn(x_dim + y_dim)
unpack_fn = lambda xy: {'x': xy[:x_dim], 'y': xy[x_dim:]}
transform = transforms.UnpackTransform(unpack_fn)
z = transform(xy)
if x_dim == y_dim:
with pytest.warns(UserWarning, match='UnpackTransform.inv'):
t = transform.inv(z)
else:
t = transform.inv(z)
assert_allclose(t, xy)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)
def test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=
100000, key=random.PRNGKey(11)):
"""On samplers that we do not get directly from JAX, (e.g. we only get
Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test
agreement in the empirical distribution of generated samples between our
samplers and those from SciPy.
"""
if jax_dist not in [dist.Gumbel]:
pytest.skip(
'{} sampling method taken from upstream, no need totest generated samples.'
.format(jax_dist.__name__))
jax_dist = jax_dist(*params)
if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:
our_samples = jax_dist.sample(key, (N_sample,))
ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)
assert ks_result.pvalue > 0.05
@pytest.mark.parametrize('jax_dist, params, support', [(dist.
BernoulliLogits, (5.0,), jnp.arange(2)), (dist.BernoulliProbs, (0.5,),
jnp.arange(2)), (dist.BinomialLogits, (4.5, 10), jnp.arange(11)), (dist
.BinomialProbs, (0.5, 11), jnp.arange(12)), (dist.BetaBinomial, (2.0,
0.5, 12), jnp.arange(13)), (dist.CategoricalLogits, (np.array([3.0, 4.0,
5.0]),), jnp.arange(3)), (dist.CategoricalProbs, (np.array([0.1, 0.5,
0.4]),), jnp.arange(3))])
@pytest.mark.parametrize('batch_shape', [(5,), ()])
@pytest.mark.parametrize('expand', [False, True])
def test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand
):
p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))
actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)
expected = support.reshape((-1,) + (1,) * len(batch_shape))
if expand:
expected = jnp.broadcast_to(expected, support.shape + batch_shape)
assert_allclose(actual, expected)
def test_zero_inflated_enumerate_support():
base_dist = dist.Bernoulli(0.5)
d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)
assert d.has_enumerate_support
assert_allclose(d.enumerate_support(), base_dist.enumerate_support())
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)
@pytest.mark.parametrize('prepend_shape', [(), (2, 3)])
@pytest.mark.parametrize('sample_shape', [(), (4,)])
def test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):
jax_dist = jax_dist(*params)
new_batch_shape = prepend_shape + jax_dist.batch_shape
expanded_dist = jax_dist.expand(new_batch_shape)
rng_key = random.PRNGKey(0)
samples = expanded_dist.sample(rng_key, sample_shape)
assert expanded_dist.batch_shape == new_batch_shape
assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape
assert expanded_dist.log_prob(samples
).shape == sample_shape + new_batch_shape
assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,
) + new_batch_shape
if prepend_shape:
with pytest.raises(ValueError, match=
'Cannot broadcast distribution of shape'):
assert expanded_dist.expand((3,) + jax_dist.batch_shape)
@pytest.mark.parametrize('base_shape', [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1,
5)])
@pytest.mark.parametrize('event_dim', [0, 1, 2, 3])
@pytest.mark.parametrize('sample_shape', [(1000,), (1000, 7, 1), (1000, 1, 7)])
def test_expand_shuffle_regression(base_shape, event_dim, sample_shape):
expand_shape = 2, 3, 5
event_dim = min(event_dim, len(base_shape))
loc = random.normal(random.PRNGKey(0), base_shape) * 10
base_dist = dist.Normal(loc, 0.1).to_event(event_dim)
expanded_dist = base_dist.expand(expand_shape[:len(expand_shape) -
event_dim])
samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)
expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.
shape())
assert_allclose(samples.mean(0), expected_mean, atol=0.1)
<|reserved_special_token_0|>
def test_sine_bivariate_von_mises_sample_mean():
loc = jnp.array([[2.0, -1.0], [-2, 1.0]])
sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)
samples = sine.sample(random.PRNGKey(0), (5000,))
assert_allclose(_circ_mean(samples).T, loc, rtol=0.005)
@pytest.mark.parametrize('batch_shape', [(), (4,)])
def test_polya_gamma(batch_shape, num_points=20000):
d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)
rng_key = random.PRNGKey(0)
x = jnp.linspace(1e-06, d.truncation_point, num_points)
prob = d.truncation_point / num_points * jnp.exp(logsumexp(d.log_prob(x
), axis=-1))
assert_allclose(prob, jnp.ones(batch_shape), rtol=0.0001)
z = d.sample(rng_key, sample_shape=(3000,))
mean = jnp.mean(z, axis=-1)
assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)
@pytest.mark.parametrize('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)
), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])
def test_expand_reshaped_distribution(extra_event_dims, expand_shape):
loc = jnp.zeros((1, 6))
scale_tril = jnp.eye(6)
d = dist.MultivariateNormal(loc, scale_tril=scale_tril)
full_shape = 4, 1, 1, 1, 6
reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)
cut = 4 - extra_event_dims
batch_shape, event_shape = full_shape[:cut], full_shape[cut:]
assert reshaped_dist.batch_shape == batch_shape
assert reshaped_dist.event_shape == event_shape
large = reshaped_dist.expand(expand_shape)
assert large.batch_shape == expand_shape
assert large.event_shape == event_shape
with pytest.raises((RuntimeError, ValueError)):
reshaped_dist.expand(expand_shape + (3,))
with pytest.raises((RuntimeError, ValueError)):
large.expand(expand_shape[1:])
@pytest.mark.parametrize('batch_shape, mask_shape', [((), ()), ((2,), ()),
((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))])
@pytest.mark.parametrize('event_shape', [(), (3,)])
def test_mask(batch_shape, event_shape, mask_shape):
jax_dist = dist.Normal().expand(batch_shape + event_shape).to_event(len
(event_shape))
mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)
if mask_shape == ():
mask = bool(mask)
samples = jax_dist.sample(random.PRNGKey(1))
actual = jax_dist.mask(mask).log_prob(samples)
assert_allclose(actual != 0, jnp.broadcast_to(mask, lax.
broadcast_shapes(batch_shape, mask_shape)))
@pytest.mark.parametrize('event_shape', [(), (4,), (2, 4)])
def test_mask_grad(event_shape):
def f(x, data):
base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()
mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(
event_shape))))
log_prob = base_dist.mask(mask).log_prob(data)
assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)
]
return log_prob.sum()
data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])
log_prob, grad = jax.value_and_grad(f)(1.0, data)
assert jnp.isfinite(grad) and jnp.isfinite(log_prob)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_dist_pytree(jax_dist, sp_dist, params):
def f(x):
return jax_dist(*params)
if jax_dist is _ImproperWrapper:
pytest.skip('Cannot flattening ImproperUniform')
if jax_dist is dist.EulerMaruyama:
pytest.skip("EulerMaruyama doesn't define flatten/unflatten")
jax.jit(f)(0)
lax.map(f, np.ones(3))
expected_dist = f(0)
actual_dist = jax.jit(f)(0)
expected_sample = expected_dist.sample(random.PRNGKey(0))
actual_sample = actual_dist.sample(random.PRNGKey(0))
expected_log_prob = expected_dist.log_prob(expected_sample)
actual_log_prob = actual_dist.log_prob(actual_sample)
assert_allclose(actual_sample, expected_sample, rtol=1e-06)
assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-06)
<|reserved_special_token_0|>
def test_expand_no_unnecessary_batch_shape_expansion():
for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):
d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])
roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])
assert d.batch_shape == roundtripped_d.batch_shape
assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape
assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)
def bs(arg):
return dist.Normal(arg, arg).expand([10, 3, *arg.shape])
d = bs(arg)
dj = jax.jit(bs)(arg)
assert isinstance(d, dist.ExpandedDistribution)
assert isinstance(dj, dist.ExpandedDistribution)
assert d.batch_shape == dj.batch_shape
assert d.base_dist.batch_shape == dj.base_dist.batch_shape
assert d.base_dist.event_shape == dj.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)
@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)
def test_kl_delta_normal_shape(batch_shape):
v = np.random.normal(size=batch_shape)
loc = np.random.normal(size=batch_shape)
scale = np.exp(np.random.normal(size=batch_shape))
p = dist.Delta(v)
q = dist.Normal(loc, scale)
assert kl_divergence(p, q).shape == batch_shape
def test_kl_delta_normal():
v = np.random.normal()
loc = np.random.normal()
scale = np.exp(np.random.normal())
p = dist.Delta(v, 10.0)
q = dist.Normal(loc, scale)
assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))
@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize('event_shape', [(), (4,), (2, 3)], ids=str)
def test_kl_independent_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(
size=shape)))
q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(
size=shape)))
actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.
Independent(q, len(event_shape)))
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize('event_shape', [(), (4,), (2, 3)], ids=str)
def test_kl_expanded_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(
shape)
q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(
shape)
actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.
Independent(q, len(event_shape)))
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
@pytest.mark.parametrize('shape', [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.
Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.
Normal), (dist.Weibull, dist.Gamma)])
def test_kl_univariate(shape, p_dist, q_dist):
def make_dist(dist_class):
params = {}
for k, c in dist_class.arg_constraints.items():
if c is constraints.real:
params[k] = np.random.normal(size=shape)
elif c is constraints.positive:
params[k] = np.exp(np.random.normal(size=shape))
else:
raise ValueError(f'Missing pattern for param {k}.')
d = dist_class(**params)
if dist_class is dist.Kumaraswamy:
d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000
return d
p = make_dist(p_dist)
q = make_dist(q_dist)
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10000,)).copy()
expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize('shape', [(4,), (2, 3)], ids=str)
def test_kl_dirichlet_dirichlet(shape):
p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10000,)).copy()
expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)
assert_allclose(actual, expected, rtol=0.05)
def test_vmapped_binomial_p0():
def sample_binomial_withp0(key):
n = 2 * (random.uniform(key) > 0.5)
_, key = random.split(key)
return dist.Binomial(total_count=n, probs=0).sample(key)
jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))
<|reserved_special_token_0|>
def _allclose_or_equal(a1, a2):
if isinstance(a1, np.ndarray):
return np.allclose(a2, a1)
elif isinstance(a1, jnp.ndarray):
return jnp.allclose(a2, a1)
elif isinstance(a1, csr_matrix):
return np.allclose(a2.todense(), a1.todense())
else:
return a2 == a1 or a2 is a1
def _tree_equal(t1, t2):
t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)
return jnp.all(jax.flatten_util.ravel_pytree(t)[0])
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_vmap_dist(jax_dist, sp_dist, params):
param_names = list(inspect.signature(jax_dist).parameters.keys())
vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)
vmappable_param_idxs = vmappable_param_idxs[:len(params)]
if len(vmappable_param_idxs) == 0:
return
def make_jax_dist(*params):
return jax_dist(*params)
def sample(d: dist.Distribution):
return d.sample(random.PRNGKey(0))
d = make_jax_dist(*params)
if isinstance(d, _SparseCAR) and d.is_sparse:
return
in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for
i in range(len(params))), 0), *(([(0 if i == idx else None) for i in
range(len(params))], 0) for idx in vmappable_param_idxs if params[
idx] is not None), *(([(0 if i == idx else None) for i in range(len
(params))], vmap_over(d, **{param_names[idx]: 0})) for idx in
vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==
idx else None) for i in range(len(params))], vmap_over(d, **{
param_names[idx]: 1})) for idx in vmappable_param_idxs if
isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).
ndim > 0 and jax_dist is not _GeneralMixture)]
for in_axes, out_axes in in_out_axes_cases:
batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),
arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,
in_axes)]
d = make_jax_dist(*params)
batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes
)(*batched_params)
eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(
batched_d, d)
assert eq == jnp.array([True])
samples_dist = sample(d)
samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)
assert samples_batched_dist.shape == (1, *samples_dist.shape)
def test_multinomial_abstract_total_count():
probs = jnp.array([0.2, 0.5, 0.3])
key = random.PRNGKey(0)
def f(x):
total_count = x.sum(-1)
return dist.Multinomial(total_count, probs=probs, total_count_max=10
).sample(key)
x = dist.Multinomial(10, probs).sample(key)
y = jax.jit(f)(x)
assert_allclose(x, y, rtol=1e-06)
def test_normal_log_cdf():
loc = jnp.array([[0.0, -10.0, 20.0]])
scale = jnp.array([[1, 5, 7]])
values = jnp.linspace(-5, 5, 100).reshape(-1, 1)
numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)
numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)
jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)
assert_allclose(numpyro_log_cdf, jax_log_cdf)
assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)
@pytest.mark.parametrize('value', [-15.0, jnp.array([[-15.0], [-10.0], [-
5.0]]), jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]])]
)
def test_truncated_normal_log_prob_in_tail(value):
loc = 1.35
scale = jnp.geomspace(0.01, 1, 10)
low, high = -20, -1.0
a, b = (low - loc) / scale, (high - loc) / scale
numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high
).log_prob(value)
jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)
assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)
def test_sample_truncated_normal_in_tail():
tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)
samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))
assert ~jnp.isinf(samples).any()
@jax.enable_custom_prng()
def test_jax_custom_prng():
samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))
assert ~jnp.isinf(samples).any()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def my_kron(A, B):
D = A[..., :, None, :, None] * B[..., None, :, None, :]
ds = D.shape
newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]
return D.reshape(newshape)
def _identity(x):
return x
def _circ_mean(angles):
return jnp.arctan2(jnp.mean(jnp.sin(angles), axis=0), jnp.mean(jnp.cos(
angles), axis=0))
def sde_fn1(x, _):
lam = 0.1
sigma2 = 0.1
return lam * x, sigma2
<|reserved_special_token_0|>
class T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):
def __new__(cls, jax_dist, *params):
sp_dist = get_sp_dist(jax_dist)
return super(cls, T).__new__(cls, jax_dist, sp_dist, params)
def _mvn_to_scipy(loc, cov, prec, tril):
jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_normal(mean=mean, cov=cov)
def _multivariate_t_to_scipy(df, loc, tril):
if scipy.__version__ < '1.6.0':
pytest.skip(
'Multivariate Student-T distribution is not available in scipy < 1.6'
)
jax_dist = dist.MultivariateStudentT(df, loc, tril)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_t(loc=mean, shape=cov, df=df)
def _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):
jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_normal(mean=mean, cov=cov)
<|reserved_special_token_0|>
def _TruncatedNormal(loc, scale, low, high):
return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)
def _TruncatedCauchy(loc, scale, low, high):
return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)
<|reserved_special_token_0|>
class SineSkewedUniform(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,
math.pi])
base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,
skewness=skewness)
class SineSkewedVonMises(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = np.array([0.0]), np.array([1.0])
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc
.ndim)
super().__init__(base_dist, skewness, **kwargs)
<|reserved_special_token_0|>
class SineSkewedVonMisesBatched(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc
.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_von_mises_batched(self:
SineSkewedVonMisesBatched, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,
skewness=skewness)
class _GaussianMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, scale):
component_dist = dist.Normal(loc=loc, scale=scale)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(mixing_distribution=mixing_distribution,
component_distribution=component_dist)
@property
def loc(self):
return self.component_distribution.loc
@property
def scale(self):
return self.component_distribution.scale
@vmap_over.register
def _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):
component_distribution = vmap_over(self.component_distribution, loc=loc,
scale=scale)
return vmap_over.dispatch(dist.MixtureSameFamily)(self,
_component_distribution=component_distribution)
class _Gaussian2DMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, covariance_matrix):
component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix
=covariance_matrix)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(mixing_distribution=mixing_distribution,
component_distribution=component_dist)
@property
def loc(self):
return self.component_distribution.loc
@property
def covariance_matrix(self):
return self.component_distribution.covariance_matrix
@vmap_over.register
def _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):
component_distribution = vmap_over(self.component_distribution, loc=loc)
return vmap_over.dispatch(dist.MixtureSameFamily)(self,
_component_distribution=component_distribution)
class _GeneralMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, scales):
component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,
scale_ in zip(locs, scales)]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(mixing_distribution=mixing_distribution,
component_distributions=component_dists)
@property
def locs(self):
return self.component_distributions[0].loc
@property
def scales(self):
return self.component_distributions[0].scale
@vmap_over.register
def _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):
component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in
self.component_distributions]
return vmap_over.dispatch(dist.MixtureGeneral)(self,
_component_distributions=component_distributions)
class _General2DMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, covariance_matrices):
component_dists = [dist.MultivariateNormal(loc=loc_,
covariance_matrix=covariance_matrix) for loc_,
covariance_matrix in zip(locs, covariance_matrices)]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(mixing_distribution=mixing_distribution,
component_distributions=component_dists)
@property
def locs(self):
return self.component_distributions[0].loc
@property
def covariance_matrices(self):
return self.component_distributions[0].covariance_matrix
@vmap_over.register
def _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):
component_distributions = [vmap_over(d, loc=locs) for d in self.
component_distributions]
return vmap_over.dispatch(dist.MixtureGeneral)(self,
_component_distributions=component_distributions)
class _ImproperWrapper(dist.ImproperUniform):
def sample(self, key, sample_shape=()):
transform = biject_to(self.support)
prototype_value = jnp.zeros(self.event_shape)
unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))
shape = sample_shape + self.batch_shape + unconstrained_event_shape
unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)
return transform(unconstrained_samples)
class ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):
arg_constraints = {'rate': constraints.positive, 'gate_logits':
constraints.real}
pytree_data_fields = 'rate',
def __init__(self, rate, gate_logits, *, validate_args=None):
self.rate = rate
super().__init__(dist.Poisson(rate), gate_logits, validate_args=
validate_args)
@vmap_over.register
def _vmap_over_zero_inflated_poisson_logits(self: ZeroInflatedPoissonLogits,
rate=None, gate_logits=None):
dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(self,
base_dist=vmap_over(self.base_dist, rate=rate), gate_logits=
gate_logits, gate=gate_logits)
dist_axes.rate = rate
return dist_axes
class SparsePoisson(dist.Poisson):
def __init__(self, rate, *, validate_args=None):
super().__init__(rate, is_sparse=True, validate_args=validate_args)
class FoldedNormal(dist.FoldedDistribution):
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
def __init__(self, loc, scale, validate_args=None):
self.loc = loc
self.scale = scale
super().__init__(dist.Normal(loc, scale), validate_args=validate_args)
@vmap_over.register
def _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):
d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=
vmap_over(self.base_dist, loc=loc, scale=scale))
d.loc = loc
d.scale = scale
return d
class _SparseCAR(dist.CAR):
reparametrized_params = ['loc', 'correlation', 'conditional_precision']
def __init__(self, loc, correlation, conditional_precision, adj_matrix,
*, is_sparse=True, validate_args=None):
super().__init__(loc, correlation, conditional_precision,
adj_matrix, is_sparse=True, validate_args=validate_args)
<|reserved_special_token_0|>
def get_sp_dist(jax_dist):
classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]
for cls in classes:
if cls in _DIST_MAP:
return _DIST_MAP[cls]
<|reserved_special_token_0|>
def _is_batched_multivariate(jax_dist):
return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0
def gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):
eps = 1e-06
if constraint is constraints.boolean:
return random.bernoulli(key, shape=size)
elif isinstance(constraint, constraints.greater_than):
return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps
elif isinstance(constraint, constraints.integer_interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.randint(key, size, lower_bound, upper_bound + 1)
elif isinstance(constraint, constraints.integer_greater_than):
return constraint.lower_bound + random.poisson(key, np.array(5),
shape=size)
elif isinstance(constraint, constraints.interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.uniform(key, size, minval=lower_bound, maxval=upper_bound
)
elif constraint in (constraints.real, constraints.real_vector):
return random.normal(key, size)
elif constraint is constraints.simplex:
return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])
elif isinstance(constraint, constraints.multinomial):
n = size[-1]
return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.
upper_bound, shape=size[:-1])
elif constraint is constraints.corr_cholesky:
return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (
size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))
elif constraint is constraints.corr_matrix:
cholesky = signed_stick_breaking_tril(random.uniform(key, size[:-2] +
(size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))
return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))
elif constraint is constraints.lower_cholesky:
return jnp.tril(random.uniform(key, size))
elif constraint is constraints.positive_definite:
x = random.normal(key, size)
return jnp.matmul(x, jnp.swapaxes(x, -2, -1))
elif constraint is constraints.ordered_vector:
x = jnp.cumsum(random.exponential(key, size), -1)
return x - random.normal(key, size[:-1] + (1,))
elif isinstance(constraint, constraints.independent):
return gen_values_within_bounds(constraint.base_constraint, size, key)
elif constraint is constraints.sphere:
x = random.normal(key, size)
return x / jnp.linalg.norm(x, axis=-1)
elif constraint is constraints.l1_ball:
key1, key2 = random.split(key)
sign = random.bernoulli(key1)
bounds = [0, (-1) ** sign * 0.5]
return random.uniform(key, size, float, *sorted(bounds))
else:
raise NotImplementedError('{} not implemented.'.format(constraint))
def gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):
if constraint is constraints.boolean:
return random.bernoulli(key, shape=size) - 2
elif isinstance(constraint, constraints.greater_than):
return constraint.lower_bound - jnp.exp(random.normal(key, size))
elif isinstance(constraint, constraints.integer_interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
return random.randint(key, size, lower_bound - 1, lower_bound)
elif isinstance(constraint, constraints.integer_greater_than):
return constraint.lower_bound - random.poisson(key, np.array(5),
shape=size)
elif isinstance(constraint, constraints.interval):
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.uniform(key, size, minval=upper_bound, maxval=
upper_bound + 1.0)
elif constraint in [constraints.real, constraints.real_vector]:
return lax.full(size, np.nan)
elif constraint is constraints.simplex:
return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]
) + 0.01
elif isinstance(constraint, constraints.multinomial):
n = size[-1]
return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.
upper_bound, shape=size[:-1]) + 1
elif constraint is constraints.corr_cholesky:
return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (
size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)) + 0.01
elif constraint is constraints.corr_matrix:
cholesky = 0.01 + signed_stick_breaking_tril(random.uniform(key,
size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)
)
return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))
elif constraint is constraints.lower_cholesky:
return random.uniform(key, size)
elif constraint is constraints.positive_definite:
return random.normal(key, size)
elif constraint is constraints.ordered_vector:
x = jnp.cumsum(random.exponential(key, size), -1)
return x[..., ::-1]
elif isinstance(constraint, constraints.independent):
return gen_values_outside_bounds(constraint.base_constraint, size, key)
elif constraint is constraints.sphere:
x = random.normal(key, size)
x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)
return 2 * x
elif constraint is constraints.l1_ball:
key1, key2 = random.split(key)
sign = random.bernoulli(key1)
bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]
return random.uniform(key, size, float, *sorted(bounds))
else:
raise NotImplementedError('{} not implemented.'.format(constraint))
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])
def test_dist_shape(jax_dist, sp_dist, params, prepend_shape):
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.
event_shape)
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert isinstance(samples, jnp.ndarray)
assert jnp.shape(samples) == expected_shape
if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(
jax_dist, dist.MultivariateStudentT):
sp_dist = sp_dist(*params)
sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)
assert jnp.shape(sp_samples) == expected_shape
elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(
jax_dist, dist.MultivariateStudentT):
sp_dist = sp_dist(*params)
size_ = prepend_shape + jax_dist.batch_shape
size = 1 if size_ == () else size_
try:
sp_samples = sp_dist.rvs(size=size)
except ValueError:
pytest.skip(
"scipy multivariate t doesn't support size with > 1 element")
assert jnp.shape(sp_samples) == expected_shape
if isinstance(jax_dist, (dist.MultivariateNormal, dist.
MultivariateStudentT)):
assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2
assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.
covariance_matrix), rtol=1e-06)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_infer_shapes(jax_dist, sp_dist, params):
shapes = tuple(getattr(p, 'shape', ()) for p in params)
shapes = tuple(x() if callable(x) else x for x in shapes)
jax_dist = jax_dist(*params)
try:
expected_batch_shape, expected_event_shape = type(jax_dist
).infer_shapes(*shapes)
except NotImplementedError:
pytest.skip(
f'{type(jax_dist).__name__}.infer_shapes() is not implemented')
assert jax_dist.batch_shape == expected_batch_shape
assert jax_dist.event_shape == expected_event_shape
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_has_rsample(jax_dist, sp_dist, params):
jax_dist = jax_dist(*params)
masked_dist = jax_dist.mask(False)
indept_dist = jax_dist.expand_by([2]).to_event(1)
transf_dist = dist.TransformedDistribution(jax_dist, biject_to(
constraints.real))
assert masked_dist.has_rsample == jax_dist.has_rsample
assert indept_dist.has_rsample == jax_dist.has_rsample
assert transf_dist.has_rsample == jax_dist.has_rsample
if jax_dist.has_rsample:
assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete
if isinstance(jax_dist, dist.TransformedDistribution):
assert jax_dist.base_dist.has_rsample
else:
assert set(jax_dist.arg_constraints) == set(jax_dist.
reparametrized_params)
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.Normal):
masked_dist.rsample(random.PRNGKey(0))
indept_dist.rsample(random.PRNGKey(0))
transf_dist.rsample(random.PRNGKey(0))
else:
with pytest.raises(NotImplementedError):
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.BernoulliProbs):
with pytest.raises(NotImplementedError):
masked_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
indept_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
transf_dist.rsample(random.PRNGKey(0))
@pytest.mark.parametrize('batch_shape', [(), (4,), (3, 2)])
def test_unit(batch_shape):
log_factor = random.normal(random.PRNGKey(0), batch_shape)
d = dist.Unit(log_factor=log_factor)
x = d.sample(random.PRNGKey(1))
assert x.shape == batch_shape + (0,)
assert (d.log_prob(x) == log_factor).all()
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)
def test_sample_gradient(jax_dist, sp_dist, params):
gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [
'concentration1', 'concentration0'], 'BetaProportion': ['mean',
'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],
'InverseGamma': ['concentration'], 'LKJ': ['concentration'],
'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.
__name__, [])
dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1
:] if inspect.isclass(jax_dist) else inspect.getfullargspec(
jax_dist)[0])]
params_dict = dict(zip(dist_args[:len(params)], params))
jax_class = type(jax_dist(**params_dict))
reparametrized_params = [p for p in jax_class.reparametrized_params if
p not in gamma_derived_params]
if not reparametrized_params:
pytest.skip('{} not reparametrized.'.format(jax_class.__name__))
nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in
reparametrized_params}
repara_params = tuple(v for k, v in params_dict.items() if k in
reparametrized_params)
rng_key = random.PRNGKey(0)
def fn(args):
args_dict = dict(zip(reparametrized_params, args))
return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).
sample(key=rng_key))
actual_grad = jax.grad(fn)(repara_params)
assert len(actual_grad) == len(repara_params)
eps = 0.001
for i in range(len(repara_params)):
if repara_params[i] is None:
continue
args_lhs = [(p if j != i else p - eps) for j, p in enumerate(
repara_params)]
args_rhs = [(p if j != i else p + eps) for j, p in enumerate(
repara_params)]
fn_lhs = fn(args_lhs)
fn_rhs = fn(args_rhs)
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])
assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,
atol=0.03)
@pytest.mark.parametrize('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.
Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,
(0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,
(5.0, 2.0, 4.0))])
def test_pathwise_gradient(jax_dist, params):
rng_key = random.PRNGKey(0)
N = 1000000
def f(params):
z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))
return (z + z ** 2).mean(0)
def g(params):
d = jax_dist(*params)
return d.mean + d.variance + d.mean ** 2
actual_grad = grad(f)(params)
expected_grad = grad(g)(params)
assert_allclose(actual_grad, expected_grad, rtol=0.005)
<|reserved_special_token_0|>
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])
@pytest.mark.parametrize('jit', [False, True])
def test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):
jit_fn = _identity if not jit else jax.jit
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert jax_dist.log_prob(samples
).shape == prepend_shape + jax_dist.batch_shape
truncated_dists = (dist.LeftTruncatedDistribution, dist.
RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)
if sp_dist is None:
if isinstance(jax_dist, truncated_dists):
if isinstance(params[0], dist.Distribution):
loc, scale, low, high = params[0].loc, params[0].scale, params[
1], params[2]
else:
loc, scale, low, high = params
if low is None:
low = -np.inf
if high is None:
high = np.inf
sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)
expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -
sp_dist.cdf(low))
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,
atol=1e-05)
return
pytest.skip('no corresponding scipy distn.')
if _is_batched_multivariate(jax_dist):
pytest.skip('batching not allowed in multivariate distns.')
if jax_dist.event_shape and prepend_shape:
pytest.skip(
'batched samples cannot be scored by multivariate distributions.')
sp_dist = sp_dist(*params)
try:
expected = sp_dist.logpdf(samples)
except AttributeError:
expected = sp_dist.logpmf(samples)
except ValueError as e:
if "The input vector 'x' must lie within the normal simplex." in str(e
):
samples = jax.device_get(samples).astype('float64')
samples = samples / samples.sum(axis=-1, keepdims=True)
expected = sp_dist.logpdf(samples)
else:
raise e
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)
def test_mixture_log_prob():
gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist
.Normal(0, 1).expand([2]))
actual = gmm.log_prob(0.0)
expected = dist.Normal(0, 1).log_prob(0.0)
assert_allclose(actual, expected)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.
Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])
@pytest.mark.filterwarnings('ignore:overflow encountered:RuntimeWarning')
def test_cdf_and_icdf(jax_dist, sp_dist, params):
d = jax_dist(*params)
if d.event_dim > 0:
pytest.skip(
'skip testing cdf/icdf methods of multivariate distributions')
samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))
quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())
try:
rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05
if d.shape() == () and not d.is_discrete:
assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.
log_prob(samples)), atol=1e-05, rtol=rtol)
assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(
-d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)
assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,
rtol=1e-05)
assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)
except NotImplementedError:
pass
if not sp_dist:
pytest.skip('no corresponding scipy distn.')
sp_dist = sp_dist(*params)
try:
actual_cdf = d.cdf(samples)
expected_cdf = sp_dist.cdf(samples)
assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)
actual_icdf = d.icdf(quantiles)
expected_icdf = sp_dist.ppf(quantiles)
assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)
except NotImplementedError:
pass
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DIRECTIONAL)
def test_gof(jax_dist, sp_dist, params):
if 'Improper' in jax_dist.__name__:
pytest.skip('distribution has improper .log_prob()')
if 'LKJ' in jax_dist.__name__:
pytest.xfail('incorrect submanifold scaling')
if jax_dist is dist.EulerMaruyama:
d = jax_dist(*params)
if d.event_dim > 1:
pytest.skip(
'EulerMaruyama skip test when event shape is non-trivial.')
num_samples = 10000
if 'BetaProportion' in jax_dist.__name__:
num_samples = 20000
rng_key = random.PRNGKey(0)
d = jax_dist(*params)
samples = d.sample(key=rng_key, sample_shape=(num_samples,))
probs = np.exp(d.log_prob(samples))
dim = None
if jax_dist is dist.ProjectedNormal:
dim = samples.shape[-1] - 1
probs = probs.reshape(num_samples, -1)
samples = samples.reshape(probs.shape + d.event_shape)
if 'Dirichlet' in jax_dist.__name__:
samples = samples[..., :-1]
for b in range(probs.shape[1]):
try:
gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)
except InvalidTest:
pytest.skip('expensive test')
else:
assert gof > TEST_FAILURE_RATE
<|reserved_special_token_0|>
def _tril_cholesky_to_tril_corr(x):
w = vec_to_tril_matrix(x, diagonal=-1)
diag = jnp.sqrt(1 - jnp.sum(w ** 2, axis=-1))
cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])
corr = jnp.matmul(cholesky, cholesky.T)
return matrix_to_tril_vec(corr, diagonal=-1)
@pytest.mark.parametrize('dimension', [2, 3, 5])
def test_log_prob_LKJCholesky_uniform(dimension):
d = dist.LKJCholesky(dimension=dimension, concentration=1)
N = 5
corr_log_prob = []
for i in range(N):
sample = d.sample(random.PRNGKey(i))
log_prob = d.log_prob(sample)
sample_tril = matrix_to_tril_vec(sample, diagonal=-1)
cholesky_to_corr_jac = np.linalg.slogdet(jax.jacobian(
_tril_cholesky_to_tril_corr)(sample_tril))[1]
corr_log_prob.append(log_prob - cholesky_to_corr_jac)
corr_log_prob = np.array(corr_log_prob)
assert_allclose(corr_log_prob, jnp.broadcast_to(corr_log_prob[0],
corr_log_prob.shape), rtol=1e-06)
if dimension == 2:
assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-06)
@pytest.mark.parametrize('dimension', [2, 3, 5])
@pytest.mark.parametrize('concentration', [0.6, 2.2])
def test_log_prob_LKJCholesky(dimension, concentration):
d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')
beta_sample = d._beta.sample(random.PRNGKey(0))
beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))
partial_correlation = 2 * beta_sample - 1
affine_logdet = beta_sample.shape[-1] * jnp.log(2)
sample = signed_stick_breaking_tril(partial_correlation)
inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2
inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(
partial_correlation)))
unconstrained = inv_tanh(partial_correlation)
corr_cholesky_logdet = biject_to(constraints.corr_cholesky
).log_abs_det_jacobian(unconstrained, sample)
signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet
actual_log_prob = d.log_prob(sample)
expected_log_prob = (beta_log_prob - affine_logdet -
signed_stick_breaking_logdet)
assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)
assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06
)
def test_zero_inflated_logits_probs_agree():
concentration = np.exp(np.random.normal(1))
rate = np.exp(np.random.normal(1))
d = dist.GammaPoisson(concentration, rate)
gate_logits = np.random.normal(0)
gate_probs = expit(gate_logits)
zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)
zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)
sample = np.random.randint(0, 20, (1000, 100))
assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))
<|reserved_special_token_0|>
@pytest.mark.parametrize('total_count', [1, 2, 3, 10])
@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])
def test_beta_binomial_log_prob(total_count, shape):
concentration0 = np.exp(np.random.normal(size=shape))
concentration1 = np.exp(np.random.normal(size=shape))
value = jnp.arange(1 + total_count)
num_samples = 100000
probs = np.random.beta(concentration1, concentration0, size=(
num_samples,) + shape)
log_probs = dist.Binomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.BetaBinomial(concentration1, concentration0, total_count
).log_prob(value)
assert_allclose(actual, expected, rtol=0.02)
@pytest.mark.parametrize('total_count', [1, 2, 3, 10])
@pytest.mark.parametrize('batch_shape', [(1,), (3, 1), (2, 3, 1)])
def test_dirichlet_multinomial_log_prob(total_count, batch_shape):
event_shape = 3,
concentration = np.exp(np.random.normal(size=batch_shape + event_shape))
value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1
,) * len(batch_shape) + event_shape)
num_samples = 100000
probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (
num_samples, 1))
log_probs = dist.Multinomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.DirichletMultinomial(concentration, total_count).log_prob(
value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])
def test_gamma_poisson_log_prob(shape):
gamma_conc = np.exp(np.random.normal(size=shape))
gamma_rate = np.exp(np.random.normal(size=shape))
value = jnp.arange(15)
num_samples = 300000
poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(
num_samples,) + shape)
log_probs = dist.Poisson(poisson_rate).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_log_prob_gradient(jax_dist, sp_dist, params):
if jax_dist in [dist.LKJ, dist.LKJCholesky]:
pytest.skip('we have separated tests for LKJCholesky distribution')
if jax_dist is _ImproperWrapper:
pytest.skip(
'no param for ImproperUniform to test for log_prob gradient')
rng_key = random.PRNGKey(0)
value = jax_dist(*params).sample(rng_key)
def fn(*args):
return jnp.sum(jax_dist(*args).log_prob(value))
eps = 0.001
for i in range(len(params)):
if jax_dist is dist.EulerMaruyama and i == 1:
continue
if jax_dist is _SparseCAR and i == 3:
continue
if isinstance(params[i], dist.Distribution):
continue
if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,
jnp.int64):
continue
actual_grad = jax.grad(fn, i)(*params)
args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]
args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]
fn_lhs = fn(*args_lhs)
fn_rhs = fn(*args_rhs)
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad) == jnp.shape(params[i])
if i == 0 and jax_dist is dist.Delta:
expected_grad = 0.0
assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,
atol=0.01)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_mean_var(jax_dist, sp_dist, params):
if jax_dist is _ImproperWrapper:
pytest.skip('Improper distribution does not has mean/var implemented')
if jax_dist is FoldedNormal:
pytest.skip('Folded distribution does not has mean/var implemented')
if jax_dist is dist.EulerMaruyama:
pytest.skip(
'EulerMaruyama distribution does not has mean/var implemented')
if jax_dist is dist.RelaxedBernoulliLogits:
pytest.skip(
'RelaxedBernoulli distribution does not has mean/var implemented')
if 'SineSkewed' in jax_dist.__name__:
pytest.skip('Skewed Distribution are not symmetric about location.')
if jax_dist in (_TruncatedNormal, _TruncatedCauchy, dist.
LeftTruncatedDistribution, dist.RightTruncatedDistribution, dist.
TwoSidedTruncatedDistribution):
pytest.skip('Truncated distributions do not has mean/var implemented')
if jax_dist is dist.ProjectedNormal:
pytest.skip('Mean is defined in submanifold')
n = 20000 if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.
SineBivariateVonMises] else 200000
d_jax = jax_dist(*params)
k = random.PRNGKey(0)
samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)
if sp_dist and not _is_batched_multivariate(d_jax) and jax_dist not in [
dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]:
d_sp = sp_dist(*params)
try:
sp_mean = d_sp.mean()
except TypeError:
sp_mean = d_sp.mean
if d_jax.event_shape:
try:
sp_var = jnp.diag(d_sp.cov())
except TypeError:
sp_var = jnp.diag(d_sp.cov)
except AttributeError:
sp_var = d_sp.var()
else:
sp_var = d_sp.var()
assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-07)
assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-07)
if jnp.all(jnp.isfinite(sp_mean)):
assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,
atol=0.01)
if jnp.all(jnp.isfinite(sp_var)):
assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),
rtol=0.05, atol=0.01)
elif jax_dist in [dist.LKJ, dist.LKJCholesky]:
if jax_dist is dist.LKJCholesky:
corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))
else:
corr_samples = samples
dimension, concentration, _ = params
marginal = dist.Beta(concentration + 0.5 * (dimension - 2),
concentration + 0.5 * (dimension - 2))
marginal_mean = 2 * marginal.mean - 1
marginal_std = 2 * jnp.sqrt(marginal.variance)
expected_mean = jnp.broadcast_to(jnp.reshape(marginal_mean, jnp.
shape(marginal_mean) + (1, 1)), jnp.shape(marginal_mean) +
d_jax.event_shape)
expected_std = jnp.broadcast_to(jnp.reshape(marginal_std, jnp.shape
(marginal_std) + (1, 1)), jnp.shape(marginal_std) + d_jax.
event_shape)
expected_mean = expected_mean * (1 - jnp.identity(dimension)
) + jnp.identity(dimension)
expected_std = expected_std * (1 - jnp.identity(dimension))
assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol
=0.01)
assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)
elif jax_dist in [dist.VonMises]:
assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=0.01)
x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)
expected_variance = 1 - jnp.sqrt(x ** 2 + y ** 2)
assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=0.01
)
elif jax_dist in [dist.SineBivariateVonMises]:
phi_loc = _circ_mean(samples[..., 0])
psi_loc = _circ_mean(samples[..., 1])
assert_allclose(d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1),
rtol=0.05, atol=0.01)
elif jax_dist in [dist.MatrixNormal]:
sample_shape = 200000,
if len(d_jax.batch_shape) > 0:
axes = [(len(sample_shape) + i) for i in range(len(d_jax.
batch_shape))]
axes = tuple(axes)
samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))
subshape = samples_re.shape[:len(axes)]
ixi = product(*[range(k) for k in subshape])
for ix in ixi:
def get_min_shape(ix, batch_shape):
return min(ix, tuple(map(lambda x: x - 1, batch_shape)))
ix_loc = get_min_shape(ix, d_jax.loc.shape[:len(ix)])
jnp.allclose(jnp.mean(samples_re[ix], 0), jnp.squeeze(d_jax
.mean[ix_loc]), rtol=0.5, atol=0.01)
samples_mvn = jnp.squeeze(samples_re[ix]).reshape(
sample_shape + (-1,), order='F')
ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[:
len(ix)])
ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[:len(ix)]
)
scale_tril = my_kron(d_jax.scale_tril_column[ix_col], d_jax
.scale_tril_row[ix_row])
sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))
jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01
)
else:
jnp.allclose(jnp.mean(samples, 0), jnp.squeeze(d_jax.mean),
rtol=0.5, atol=0.01)
samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,),
order='F')
scale_tril = my_kron(jnp.squeeze(d_jax.scale_tril_column), jnp.
squeeze(d_jax.scale_tril_row))
sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))
jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01)
else:
if jnp.all(jnp.isfinite(d_jax.mean)):
assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,
atol=0.01)
if isinstance(d_jax, dist.CAR):
pytest.skip(
'CAR distribution does not have `variance` implemented.')
if isinstance(d_jax, dist.Gompertz):
pytest.skip(
'Gompertz distribution does not have `variance` implemented.')
if jnp.all(jnp.isfinite(d_jax.variance)):
assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),
rtol=0.05, atol=0.01)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])
def test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):
if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,
_Gaussian2DMixture, _GeneralMixture, _General2DMixture):
pytest.skip(f'{jax_dist.__name__} is a function, not a class')
dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]
valid_params, oob_params = list(params), list(params)
key = random.PRNGKey(1)
dependent_constraint = False
for i in range(len(params)):
if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky
) and dist_args[i] != 'concentration':
continue
if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':
continue
if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':
continue
if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i
] == 'base_dist':
continue
if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':
continue
if jax_dist is dist.SineBivariateVonMises and dist_args[i
] == 'weighted_correlation':
continue
if params[i] is None:
oob_params[i] = None
valid_params[i] = None
continue
constraint = jax_dist.arg_constraints[dist_args[i]]
if isinstance(constraint, constraints._Dependent):
dependent_constraint = True
break
key, key_gen = random.split(key)
oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(
params[i]), key_gen)
valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(
params[i]), key_gen)
if jax_dist is dist.MultivariateStudentT:
valid_params[0] += 1
if jax_dist is dist.LogUniform:
valid_params[1] += valid_params[0]
assert jax_dist(*oob_params)
if not dependent_constraint and (jax_dist is not _ImproperWrapper and
'SineSkewed' not in jax_dist.__name__):
with pytest.raises(ValueError):
jax_dist(*oob_params, validate_args=True)
with pytest.raises(ValueError):
oob_params = jax.device_get(oob_params)
def dist_gen_fn():
d = jax_dist(*oob_params, validate_args=True)
return d
jax.jit(dist_gen_fn)()
d = jax_dist(*valid_params, validate_args=True)
if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and
prepend_shape):
valid_samples = gen_values_within_bounds(d.support, size=
prepend_shape + d.batch_shape + d.event_shape)
try:
expected = sp_dist(*valid_params).logpdf(valid_samples)
except AttributeError:
expected = sp_dist(*valid_params).logpmf(valid_samples)
assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,
rtol=1e-05)
oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +
d.batch_shape + d.event_shape)
with pytest.warns(UserWarning, match='Out-of-support'):
d.log_prob(oob_samples)
with pytest.warns(UserWarning, match='Out-of-support'):
oob_samples = jax.device_get(oob_samples)
valid_params = jax.device_get(valid_params)
def log_prob_fn():
d = jax_dist(*valid_params, validate_args=True)
return d.log_prob(oob_samples)
jax.jit(log_prob_fn)()
def test_omnistaging_invalid_param():
def f(x):
return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)
with pytest.raises(ValueError, match='got invalid'):
jax.jit(f)(0)
def test_omnistaging_invalid_sample():
def f(x):
return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)
with pytest.warns(UserWarning, match='Out-of-support'):
jax.jit(f)(0)
def test_categorical_log_prob_grad():
data = jnp.repeat(jnp.arange(3), 10)
def f(x):
return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(
data).sum()
def g(x):
return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data
).sum()
x = 0.5
fx, grad_fx = jax.value_and_grad(f)(x)
gx, grad_gx = jax.value_and_grad(g)(x)
assert_allclose(fx, gx, rtol=1e-06)
assert_allclose(grad_fx, grad_gx, atol=0.0001)
def test_beta_proportion_invalid_mean():
with dist.distribution.validation_enabled(), pytest.raises(ValueError,
match='^BetaProportion distribution got invalid mean parameter\\.$'):
dist.BetaProportion(1.0, 1.0)
@pytest.mark.parametrize('constraint, x, expected', [(constraints.boolean,
np.array([True, False]), np.array([True, True])), (constraints.boolean,
np.array([1, 1]), np.array([True, True])), (constraints.boolean, np.
array([-1, 1]), np.array([False, True])), (constraints.corr_cholesky,
np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False
])), (constraints.corr_cholesky, np.array([[[1, 0], [1, 0]], [[1, 0], [
0.5, 0.5]]]), np.array([False, False])), (constraints.corr_matrix, np.
array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False])),
(constraints.corr_matrix, np.array([[[1, 0], [1, 0]], [[1, 0], [0.5,
0.5]]]), np.array([False, False])), (constraints.greater_than(1), 3,
True), (constraints.greater_than(1), np.array([-1, 1, 5]), np.array([
False, False, True])), (constraints.integer_interval(-3, 5), 0, True),
(constraints.integer_interval(-3, 5), np.array([-5, -3, 0, 1.1, 5, 7]),
np.array([False, True, True, False, True, False])), (constraints.
interval(-3, 5), 0, True), (constraints.interval(-3, 5), np.array([-5,
-3, 0, 5, 7]), np.array([False, True, True, True, False])), (
constraints.less_than(1), -2, True), (constraints.less_than(1), np.
array([-1, 1, 5]), np.array([True, False, False])), (constraints.
lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True), (
constraints.lower_cholesky, np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0,
0.1], [2.0, 0.2]]]), np.array([False, False])), (constraints.
nonnegative_integer, 3, True), (constraints.nonnegative_integer, np.
array([-1.0, 0.0, 5.0]), np.array([False, True, True])), (constraints.
positive, 3, True), (constraints.positive, np.array([-1, 0, 5]), np.
array([False, False, True])), (constraints.positive_definite, np.array(
[[1.0, 0.3], [0.3, 1.0]]), True), (constraints.positive_definite, np.
array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]), np.array([
False, False])), (constraints.positive_integer, 3, True), (constraints.
positive_integer, np.array([-1.0, 0.0, 5.0]), np.array([False, False,
True])), (constraints.real, -1, True), (constraints.real, np.array([np.
inf, -np.inf, np.nan, np.pi]), np.array([False, False, False, True])),
(constraints.simplex, np.array([0.1, 0.3, 0.6]), True), (constraints.
simplex, np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),
np.array([True, False, False])), (constraints.softplus_positive, 3,
True), (constraints.softplus_positive, np.array([-1, 0, 5]), np.array([
False, False, True])), (constraints.softplus_lower_cholesky, np.array([
[1.0, 0.0], [-2.0, 0.1]]), True), (constraints.softplus_lower_cholesky,
np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]), np.
array([False, False])), (constraints.unit_interval, 0.1, True), (
constraints.unit_interval, np.array([-5, 0, 0.5, 1, 7]), np.array([
False, True, True, True, False])), (constraints.sphere, np.array([[1, 0,
0], [0.5, 0.5, 0]]), np.array([True, False])), (constraints.
open_interval(0.0, 1.0), np.array([-5, 0, 0.5, 1, 7]), np.array([False,
False, True, False, False]))])
def test_constraints(constraint, x, expected):
v = constraint.feasible_like(x)
if jnp.result_type(v) == 'float32' or jnp.result_type(v) == 'float64':
assert not constraint.is_discrete
assert_array_equal(constraint(x), expected)
feasible_value = constraint.feasible_like(x)
assert jnp.shape(feasible_value) == jnp.shape(x)
assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected
), True))
try:
inverse = biject_to(constraint).inv(feasible_value)
except NotImplementedError:
pass
else:
assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-07)
@pytest.mark.parametrize('constraint', [constraints.corr_cholesky,
constraints.corr_matrix, constraints.greater_than(2), constraints.
interval(-3, 5), constraints.l1_ball, constraints.less_than(1),
constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,
constraints.ordered_vector, constraints.positive, constraints.
positive_definite, constraints.positive_ordered_vector, constraints.
real, constraints.real_vector, constraints.simplex, constraints.
softplus_positive, constraints.softplus_lower_cholesky, constraints.
unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.
__class__)
@pytest.mark.parametrize('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,
3)])
def test_biject_to(constraint, shape):
transform = biject_to(constraint)
event_dim = transform.domain.event_dim
if isinstance(constraint, constraints._Interval):
assert transform.codomain.upper_bound == constraint.upper_bound
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._GreaterThan):
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._LessThan):
assert transform.codomain.upper_bound == constraint.upper_bound
if len(shape) < event_dim:
return
rng_key = random.PRNGKey(0)
x = random.normal(rng_key, shape)
y = transform(x)
assert transform.forward_shape(x.shape) == y.shape
assert transform.inverse_shape(y.shape) == x.shape
x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))
assert x_nan.shape == x.shape
batch_shape = shape if event_dim == 0 else shape[:-1]
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=
jnp.bool_))
z = transform.inv(y)
assert_allclose(x, z, atol=1e-05, rtol=1e-05)
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
actual = transform.log_abs_det_jacobian(x, y)
assert jnp.shape(actual) == batch_shape
if len(shape) == event_dim:
if constraint is constraints.simplex:
expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)
[:, :-1])[1]
elif constraint in [constraints.real_vector, constraints.
ordered_vector, constraints.positive_ordered_vector,
constraints.l1_ball]:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
elif constraint in [constraints.corr_cholesky, constraints.corr_matrix
]:
vec_transform = lambda x: matrix_to_tril_vec(transform(x),
diagonal=-1)
y_tril = matrix_to_tril_vec(y, diagonal=-1)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y, diagonal=-1)
if constraint is constraints.corr_matrix:
matrix = matrix + jnp.swapaxes(matrix, -2, -1
) + jnp.identity(matrix.shape[-1])
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform
)(y_tril))[1]
elif constraint in [constraints.lower_cholesky, constraints.
scaled_unit_lower_cholesky, constraints.positive_definite,
constraints.softplus_lower_cholesky]:
vec_transform = lambda x: matrix_to_tril_vec(transform(x))
y_tril = matrix_to_tril_vec(y)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y)
if constraint is constraints.positive_definite:
matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(
jnp.diag(matrix))
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform
)(y_tril))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)
assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)
@pytest.mark.parametrize('transform, event_shape', [(PermuteTransform(np.
array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (
SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np
.array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(
[biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),
biject_to(constraints.ordered_vector).inv]), (5,))])
@pytest.mark.parametrize('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1,
3), (5, 3)])
def test_bijective_transforms(transform, event_shape, batch_shape):
shape = batch_shape + event_shape
rng_key = random.PRNGKey(0)
x = biject_to(transform.domain)(random.normal(rng_key, shape))
y = transform(x)
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))
z = transform.inv(y)
assert_allclose(x, z, atol=1e-06, rtol=0.0001)
assert transform.inv.inv is transform
assert transform.inv is transform.inv
assert transform.domain is transform.inv.codomain
assert transform.codomain is transform.inv.domain
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
actual = transform.log_abs_det_jacobian(x, y)
assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))
assert jnp.shape(actual) == batch_shape
if len(shape) == transform.domain.event_dim:
if len(event_shape) == 1:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-06)
assert_allclose(actual, -inv_expected, atol=1e-06)
@pytest.mark.parametrize('batch_shape', [(), (5,)])
def test_composed_transform(batch_shape):
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
t = transforms.ComposeTransform([t1, t2, t1])
assert t.domain.event_dim == 1
assert t.codomain.event_dim == 2
x = np.random.normal(size=batch_shape + (6,))
y = t(x)
log_det = t.log_abs_det_jacobian(x, y)
assert log_det.shape == batch_shape
expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2
) + jnp.log(2) * 9
assert_allclose(log_det, expected_log_det)
@pytest.mark.parametrize('batch_shape', [(), (5,)])
def test_composed_transform_1(batch_shape):
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
t = transforms.ComposeTransform([t1, t2, t2])
assert t.domain.event_dim == 1
assert t.codomain.event_dim == 3
x = np.random.normal(size=batch_shape + (6,))
y = t(x)
log_det = t.log_abs_det_jacobian(x, y)
assert log_det.shape == batch_shape
z = t2(x * 2)
expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z
) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)
assert_allclose(log_det, expected_log_det)
@pytest.mark.parametrize('batch_shape', [(), (5,)])
def test_simplex_to_order_transform(batch_shape):
simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()
simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)
transform = SimplexToOrderedTransform()
out = transform(simplex)
assert out.shape == transform.forward_shape(simplex.shape)
assert simplex.shape == transform.inverse_shape(out.shape)
@pytest.mark.parametrize('batch_shape', [(), (5,)])
@pytest.mark.parametrize('prepend_event_shape', [(), (4,)])
@pytest.mark.parametrize('sample_shape', [(), (7,)])
def test_transformed_distribution(batch_shape, prepend_event_shape,
sample_shape):
base_dist = dist.Normal(0, 1).expand(batch_shape + prepend_event_shape +
(6,)).to_event(1 + len(prepend_event_shape))
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
d = dist.TransformedDistribution(base_dist, [t1, t2, t1])
assert d.event_dim == 2 + len(prepend_event_shape)
y = d.sample(random.PRNGKey(0), sample_shape)
t = transforms.ComposeTransform([t1, t2, t1])
x = t.inv(y)
assert x.shape == sample_shape + base_dist.shape()
log_prob = d.log_prob(y)
assert log_prob.shape == sample_shape + batch_shape
t_log_det = t.log_abs_det_jacobian(x, y)
if prepend_event_shape:
t_log_det = t_log_det.sum(-1)
expected_log_prob = base_dist.log_prob(x) - t_log_det
assert_allclose(log_prob, expected_log_prob, atol=1e-05)
@pytest.mark.parametrize('transformed_dist', [dist.TransformedDistribution(
dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),
dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms
.PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])
def test_transformed_distribution_intermediates(transformed_dist):
sample, intermediates = transformed_dist.sample_with_intermediates(random
.PRNGKey(1))
assert_allclose(transformed_dist.log_prob(sample, intermediates),
transformed_dist.log_prob(sample))
def test_transformed_transformed_distribution():
loc, scale = -2, 3
dist1 = dist.TransformedDistribution(dist.Normal(2, 3), transforms.
PowerTransform(2.0))
dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(
-2, 3))
assert isinstance(dist2.base_dist, dist.Normal)
assert len(dist2.transforms) == 2
assert isinstance(dist2.transforms[0], transforms.PowerTransform)
assert isinstance(dist2.transforms[1], transforms.AffineTransform)
rng_key = random.PRNGKey(0)
assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))
intermediates = dist2.sample_with_intermediates(rng_key)
assert len(intermediates) == 2
def _make_iaf(input_dim, hidden_dims, rng_key):
arn_init, arn = AutoregressiveNN(input_dim, hidden_dims, param_dims=[1, 1])
_, init_params = arn_init(rng_key, (input_dim,))
return InverseAutoregressiveTransform(partial(arn, init_params))
@pytest.mark.parametrize('ts', [[transforms.PowerTransform(0.7), transforms
.AffineTransform(2.0, 3.0)], [transforms.ExpTransform()], [transforms.
ComposeTransform([transforms.AffineTransform(-2, 3), transforms.
ExpTransform()]), transforms.PowerTransform(3.0)], [_make_iaf(5,
hidden_dims=[10], rng_key=random.PRNGKey(0)), transforms.
PermuteTransform(jnp.arange(5)[::-1]), _make_iaf(5, hidden_dims=[10],
rng_key=random.PRNGKey(1))]])
def test_compose_transform_with_intermediates(ts):
transform = transforms.ComposeTransform(ts)
x = random.normal(random.PRNGKey(2), (7, 5))
y, intermediates = transform.call_with_intermediates(x)
logdet = transform.log_abs_det_jacobian(x, y, intermediates)
assert_allclose(y, transform(x))
assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))
@pytest.mark.parametrize('x_dim, y_dim', [(3, 3), (3, 4)])
def test_unpack_transform(x_dim, y_dim):
xy = np.random.randn(x_dim + y_dim)
unpack_fn = lambda xy: {'x': xy[:x_dim], 'y': xy[x_dim:]}
transform = transforms.UnpackTransform(unpack_fn)
z = transform(xy)
if x_dim == y_dim:
with pytest.warns(UserWarning, match='UnpackTransform.inv'):
t = transform.inv(z)
else:
t = transform.inv(z)
assert_allclose(t, xy)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)
def test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=
100000, key=random.PRNGKey(11)):
"""On samplers that we do not get directly from JAX, (e.g. we only get
Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test
agreement in the empirical distribution of generated samples between our
samplers and those from SciPy.
"""
if jax_dist not in [dist.Gumbel]:
pytest.skip(
'{} sampling method taken from upstream, no need totest generated samples.'
.format(jax_dist.__name__))
jax_dist = jax_dist(*params)
if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:
our_samples = jax_dist.sample(key, (N_sample,))
ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)
assert ks_result.pvalue > 0.05
@pytest.mark.parametrize('jax_dist, params, support', [(dist.
BernoulliLogits, (5.0,), jnp.arange(2)), (dist.BernoulliProbs, (0.5,),
jnp.arange(2)), (dist.BinomialLogits, (4.5, 10), jnp.arange(11)), (dist
.BinomialProbs, (0.5, 11), jnp.arange(12)), (dist.BetaBinomial, (2.0,
0.5, 12), jnp.arange(13)), (dist.CategoricalLogits, (np.array([3.0, 4.0,
5.0]),), jnp.arange(3)), (dist.CategoricalProbs, (np.array([0.1, 0.5,
0.4]),), jnp.arange(3))])
@pytest.mark.parametrize('batch_shape', [(5,), ()])
@pytest.mark.parametrize('expand', [False, True])
def test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand
):
p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))
actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)
expected = support.reshape((-1,) + (1,) * len(batch_shape))
if expand:
expected = jnp.broadcast_to(expected, support.shape + batch_shape)
assert_allclose(actual, expected)
def test_zero_inflated_enumerate_support():
base_dist = dist.Bernoulli(0.5)
d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)
assert d.has_enumerate_support
assert_allclose(d.enumerate_support(), base_dist.enumerate_support())
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)
@pytest.mark.parametrize('prepend_shape', [(), (2, 3)])
@pytest.mark.parametrize('sample_shape', [(), (4,)])
def test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):
jax_dist = jax_dist(*params)
new_batch_shape = prepend_shape + jax_dist.batch_shape
expanded_dist = jax_dist.expand(new_batch_shape)
rng_key = random.PRNGKey(0)
samples = expanded_dist.sample(rng_key, sample_shape)
assert expanded_dist.batch_shape == new_batch_shape
assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape
assert expanded_dist.log_prob(samples
).shape == sample_shape + new_batch_shape
assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,
) + new_batch_shape
if prepend_shape:
with pytest.raises(ValueError, match=
'Cannot broadcast distribution of shape'):
assert expanded_dist.expand((3,) + jax_dist.batch_shape)
@pytest.mark.parametrize('base_shape', [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1,
5)])
@pytest.mark.parametrize('event_dim', [0, 1, 2, 3])
@pytest.mark.parametrize('sample_shape', [(1000,), (1000, 7, 1), (1000, 1, 7)])
def test_expand_shuffle_regression(base_shape, event_dim, sample_shape):
expand_shape = 2, 3, 5
event_dim = min(event_dim, len(base_shape))
loc = random.normal(random.PRNGKey(0), base_shape) * 10
base_dist = dist.Normal(loc, 0.1).to_event(event_dim)
expanded_dist = base_dist.expand(expand_shape[:len(expand_shape) -
event_dim])
samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)
expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.
shape())
assert_allclose(samples.mean(0), expected_mean, atol=0.1)
<|reserved_special_token_0|>
def test_sine_bivariate_von_mises_sample_mean():
loc = jnp.array([[2.0, -1.0], [-2, 1.0]])
sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)
samples = sine.sample(random.PRNGKey(0), (5000,))
assert_allclose(_circ_mean(samples).T, loc, rtol=0.005)
@pytest.mark.parametrize('batch_shape', [(), (4,)])
def test_polya_gamma(batch_shape, num_points=20000):
d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)
rng_key = random.PRNGKey(0)
x = jnp.linspace(1e-06, d.truncation_point, num_points)
prob = d.truncation_point / num_points * jnp.exp(logsumexp(d.log_prob(x
), axis=-1))
assert_allclose(prob, jnp.ones(batch_shape), rtol=0.0001)
z = d.sample(rng_key, sample_shape=(3000,))
mean = jnp.mean(z, axis=-1)
assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)
@pytest.mark.parametrize('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)
), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])
def test_expand_reshaped_distribution(extra_event_dims, expand_shape):
loc = jnp.zeros((1, 6))
scale_tril = jnp.eye(6)
d = dist.MultivariateNormal(loc, scale_tril=scale_tril)
full_shape = 4, 1, 1, 1, 6
reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)
cut = 4 - extra_event_dims
batch_shape, event_shape = full_shape[:cut], full_shape[cut:]
assert reshaped_dist.batch_shape == batch_shape
assert reshaped_dist.event_shape == event_shape
large = reshaped_dist.expand(expand_shape)
assert large.batch_shape == expand_shape
assert large.event_shape == event_shape
with pytest.raises((RuntimeError, ValueError)):
reshaped_dist.expand(expand_shape + (3,))
with pytest.raises((RuntimeError, ValueError)):
large.expand(expand_shape[1:])
@pytest.mark.parametrize('batch_shape, mask_shape', [((), ()), ((2,), ()),
((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))])
@pytest.mark.parametrize('event_shape', [(), (3,)])
def test_mask(batch_shape, event_shape, mask_shape):
jax_dist = dist.Normal().expand(batch_shape + event_shape).to_event(len
(event_shape))
mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)
if mask_shape == ():
mask = bool(mask)
samples = jax_dist.sample(random.PRNGKey(1))
actual = jax_dist.mask(mask).log_prob(samples)
assert_allclose(actual != 0, jnp.broadcast_to(mask, lax.
broadcast_shapes(batch_shape, mask_shape)))
@pytest.mark.parametrize('event_shape', [(), (4,), (2, 4)])
def test_mask_grad(event_shape):
def f(x, data):
base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()
mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(
event_shape))))
log_prob = base_dist.mask(mask).log_prob(data)
assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)
]
return log_prob.sum()
data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])
log_prob, grad = jax.value_and_grad(f)(1.0, data)
assert jnp.isfinite(grad) and jnp.isfinite(log_prob)
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_dist_pytree(jax_dist, sp_dist, params):
def f(x):
return jax_dist(*params)
if jax_dist is _ImproperWrapper:
pytest.skip('Cannot flattening ImproperUniform')
if jax_dist is dist.EulerMaruyama:
pytest.skip("EulerMaruyama doesn't define flatten/unflatten")
jax.jit(f)(0)
lax.map(f, np.ones(3))
expected_dist = f(0)
actual_dist = jax.jit(f)(0)
expected_sample = expected_dist.sample(random.PRNGKey(0))
actual_sample = actual_dist.sample(random.PRNGKey(0))
expected_log_prob = expected_dist.log_prob(expected_sample)
actual_log_prob = actual_dist.log_prob(actual_sample)
assert_allclose(actual_sample, expected_sample, rtol=1e-06)
assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-06)
<|reserved_special_token_0|>
def test_expand_no_unnecessary_batch_shape_expansion():
for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):
d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])
roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])
assert d.batch_shape == roundtripped_d.batch_shape
assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape
assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)
def bs(arg):
return dist.Normal(arg, arg).expand([10, 3, *arg.shape])
d = bs(arg)
dj = jax.jit(bs)(arg)
assert isinstance(d, dist.ExpandedDistribution)
assert isinstance(dj, dist.ExpandedDistribution)
assert d.batch_shape == dj.batch_shape
assert d.base_dist.batch_shape == dj.base_dist.batch_shape
assert d.base_dist.event_shape == dj.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)
@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)
def test_kl_delta_normal_shape(batch_shape):
v = np.random.normal(size=batch_shape)
loc = np.random.normal(size=batch_shape)
scale = np.exp(np.random.normal(size=batch_shape))
p = dist.Delta(v)
q = dist.Normal(loc, scale)
assert kl_divergence(p, q).shape == batch_shape
def test_kl_delta_normal():
v = np.random.normal()
loc = np.random.normal()
scale = np.exp(np.random.normal())
p = dist.Delta(v, 10.0)
q = dist.Normal(loc, scale)
assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))
@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize('event_shape', [(), (4,), (2, 3)], ids=str)
def test_kl_independent_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(
size=shape)))
q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(
size=shape)))
actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.
Independent(q, len(event_shape)))
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize('event_shape', [(), (4,), (2, 3)], ids=str)
def test_kl_expanded_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(
shape)
q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(
shape)
actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.
Independent(q, len(event_shape)))
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
@pytest.mark.parametrize('shape', [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.
Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.
Normal), (dist.Weibull, dist.Gamma)])
def test_kl_univariate(shape, p_dist, q_dist):
def make_dist(dist_class):
params = {}
for k, c in dist_class.arg_constraints.items():
if c is constraints.real:
params[k] = np.random.normal(size=shape)
elif c is constraints.positive:
params[k] = np.exp(np.random.normal(size=shape))
else:
raise ValueError(f'Missing pattern for param {k}.')
d = dist_class(**params)
if dist_class is dist.Kumaraswamy:
d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000
return d
p = make_dist(p_dist)
q = make_dist(q_dist)
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10000,)).copy()
expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize('shape', [(4,), (2, 3)], ids=str)
def test_kl_dirichlet_dirichlet(shape):
p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10000,)).copy()
expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)
assert_allclose(actual, expected, rtol=0.05)
def test_vmapped_binomial_p0():
def sample_binomial_withp0(key):
n = 2 * (random.uniform(key) > 0.5)
_, key = random.split(key)
return dist.Binomial(total_count=n, probs=0).sample(key)
jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))
<|reserved_special_token_0|>
def _allclose_or_equal(a1, a2):
if isinstance(a1, np.ndarray):
return np.allclose(a2, a1)
elif isinstance(a1, jnp.ndarray):
return jnp.allclose(a2, a1)
elif isinstance(a1, csr_matrix):
return np.allclose(a2.todense(), a1.todense())
else:
return a2 == a1 or a2 is a1
def _tree_equal(t1, t2):
t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)
return jnp.all(jax.flatten_util.ravel_pytree(t)[0])
@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +
DIRECTIONAL)
def test_vmap_dist(jax_dist, sp_dist, params):
param_names = list(inspect.signature(jax_dist).parameters.keys())
vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)
vmappable_param_idxs = vmappable_param_idxs[:len(params)]
if len(vmappable_param_idxs) == 0:
return
def make_jax_dist(*params):
return jax_dist(*params)
def sample(d: dist.Distribution):
return d.sample(random.PRNGKey(0))
d = make_jax_dist(*params)
if isinstance(d, _SparseCAR) and d.is_sparse:
return
in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for
i in range(len(params))), 0), *(([(0 if i == idx else None) for i in
range(len(params))], 0) for idx in vmappable_param_idxs if params[
idx] is not None), *(([(0 if i == idx else None) for i in range(len
(params))], vmap_over(d, **{param_names[idx]: 0})) for idx in
vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==
idx else None) for i in range(len(params))], vmap_over(d, **{
param_names[idx]: 1})) for idx in vmappable_param_idxs if
isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).
ndim > 0 and jax_dist is not _GeneralMixture)]
for in_axes, out_axes in in_out_axes_cases:
batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),
arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,
in_axes)]
d = make_jax_dist(*params)
batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes
)(*batched_params)
eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(
batched_d, d)
assert eq == jnp.array([True])
samples_dist = sample(d)
samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)
assert samples_batched_dist.shape == (1, *samples_dist.shape)
def test_multinomial_abstract_total_count():
probs = jnp.array([0.2, 0.5, 0.3])
key = random.PRNGKey(0)
def f(x):
total_count = x.sum(-1)
return dist.Multinomial(total_count, probs=probs, total_count_max=10
).sample(key)
x = dist.Multinomial(10, probs).sample(key)
y = jax.jit(f)(x)
assert_allclose(x, y, rtol=1e-06)
def test_normal_log_cdf():
loc = jnp.array([[0.0, -10.0, 20.0]])
scale = jnp.array([[1, 5, 7]])
values = jnp.linspace(-5, 5, 100).reshape(-1, 1)
numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)
numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)
jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)
assert_allclose(numpyro_log_cdf, jax_log_cdf)
assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)
@pytest.mark.parametrize('value', [-15.0, jnp.array([[-15.0], [-10.0], [-
5.0]]), jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]])]
)
def test_truncated_normal_log_prob_in_tail(value):
loc = 1.35
scale = jnp.geomspace(0.01, 1, 10)
low, high = -20, -1.0
a, b = (low - loc) / scale, (high - loc) / scale
numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high
).log_prob(value)
jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)
assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)
def test_sample_truncated_normal_in_tail():
tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)
samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))
assert ~jnp.isinf(samples).any()
@jax.enable_custom_prng()
def test_jax_custom_prng():
samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))
assert ~jnp.isinf(samples).any()
<|reserved_special_token_1|>
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from collections import namedtuple
from functools import partial
import inspect
from itertools import product
import math
import os
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pytest
import scipy
from scipy.sparse import csr_matrix
import scipy.stats as osp
import jax
from jax import grad, lax, vmap
import jax.numpy as jnp
import jax.random as random
from jax.scipy.special import expit, logsumexp
from jax.scipy.stats import norm as jax_norm, truncnorm as jax_truncnorm
import numpyro.distributions as dist
from numpyro.distributions import (
SineBivariateVonMises,
constraints,
kl_divergence,
transforms,
)
from numpyro.distributions.batch_util import vmap_over
from numpyro.distributions.discrete import _to_probs_bernoulli, _to_probs_multinom
from numpyro.distributions.flows import InverseAutoregressiveTransform
from numpyro.distributions.gof import InvalidTest, auto_goodness_of_fit
from numpyro.distributions.transforms import (
LowerCholeskyAffine,
PermuteTransform,
PowerTransform,
SimplexToOrderedTransform,
SoftplusTransform,
biject_to,
)
from numpyro.distributions.util import (
matrix_to_tril_vec,
multinomial,
signed_stick_breaking_tril,
sum_rightmost,
vec_to_tril_matrix,
)
from numpyro.nn import AutoregressiveNN
TEST_FAILURE_RATE = 2e-5 # For all goodness-of-fit tests.
def my_kron(A, B):
D = A[..., :, None, :, None] * B[..., None, :, None, :]
ds = D.shape
newshape = (*ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1])
return D.reshape(newshape)
def _identity(x):
return x
def _circ_mean(angles):
return jnp.arctan2(
jnp.mean(jnp.sin(angles), axis=0), jnp.mean(jnp.cos(angles), axis=0)
)
def sde_fn1(x, _):
lam = 0.1
sigma2 = 0.1
return lam * x, sigma2
def sde_fn2(xy, _):
tau, a = 2.0, 1.1
x, y = xy[0], xy[1]
dx = tau * (x - x**3.0 / 3.0 + y)
dy = (1.0 / tau) * (a - x)
dxy = jnp.vstack([dx, dy]).reshape(xy.shape)
sigma2 = 0.1
return dxy, sigma2
class T(namedtuple("TestCase", ["jax_dist", "sp_dist", "params"])):
def __new__(cls, jax_dist, *params):
sp_dist = get_sp_dist(jax_dist)
return super(cls, T).__new__(cls, jax_dist, sp_dist, params)
def _mvn_to_scipy(loc, cov, prec, tril):
jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_normal(mean=mean, cov=cov)
def _multivariate_t_to_scipy(df, loc, tril):
if scipy.__version__ < "1.6.0":
pytest.skip(
"Multivariate Student-T distribution is not available in scipy < 1.6"
)
jax_dist = dist.MultivariateStudentT(df, loc, tril)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_t(loc=mean, shape=cov, df=df)
def _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):
jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_normal(mean=mean, cov=cov)
def _truncnorm_to_scipy(loc, scale, low, high):
if low is None:
a = -np.inf
else:
a = (low - loc) / scale
if high is None:
b = np.inf
else:
b = (high - loc) / scale
return osp.truncnorm(a, b, loc=loc, scale=scale)
def _TruncatedNormal(loc, scale, low, high):
return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)
def _TruncatedCauchy(loc, scale, low, high):
return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)
_TruncatedNormal.arg_constraints = {}
_TruncatedNormal.reparametrized_params = []
_TruncatedNormal.infer_shapes = lambda *args: (lax.broadcast_shapes(*args), ())
class SineSkewedUniform(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
lower, upper = (np.array([-math.pi, -math.pi]), np.array([math.pi, math.pi]))
base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)
class SineSkewedVonMises(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = (np.array([0.0]), np.array([1.0]))
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_von_mises(self: SineSkewedVonMises, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)
class SineSkewedVonMisesBatched(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = (np.array([0.0, -1.234]), np.array([1.0, 10.0]))
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_von_mises_batched(
self: SineSkewedVonMisesBatched, skewness=None
):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)
class _GaussianMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, scale):
component_dist = dist.Normal(loc=loc, scale=scale)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(
mixing_distribution=mixing_distribution,
component_distribution=component_dist,
)
@property
def loc(self):
return self.component_distribution.loc
@property
def scale(self):
return self.component_distribution.scale
@vmap_over.register
def _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):
component_distribution = vmap_over(
self.component_distribution, loc=loc, scale=scale
)
return vmap_over.dispatch(dist.MixtureSameFamily)(
self, _component_distribution=component_distribution
)
class _Gaussian2DMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, covariance_matrix):
component_dist = dist.MultivariateNormal(
loc=loc, covariance_matrix=covariance_matrix
)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(
mixing_distribution=mixing_distribution,
component_distribution=component_dist,
)
@property
def loc(self):
return self.component_distribution.loc
@property
def covariance_matrix(self):
return self.component_distribution.covariance_matrix
@vmap_over.register
def _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):
component_distribution = vmap_over(self.component_distribution, loc=loc)
return vmap_over.dispatch(dist.MixtureSameFamily)(
self, _component_distribution=component_distribution
)
class _GeneralMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, scales):
component_dists = [
dist.Normal(loc=loc_, scale=scale_) for loc_, scale_ in zip(locs, scales)
]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(
mixing_distribution=mixing_distribution,
component_distributions=component_dists,
)
@property
def locs(self):
# hotfix for vmapping tests, which cannot easily check non-array attributes
return self.component_distributions[0].loc
@property
def scales(self):
return self.component_distributions[0].scale
@vmap_over.register
def _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):
component_distributions = [
vmap_over(d, loc=locs, scale=scales) for d in self.component_distributions
]
return vmap_over.dispatch(dist.MixtureGeneral)(
self, _component_distributions=component_distributions
)
class _General2DMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, covariance_matrices):
component_dists = [
dist.MultivariateNormal(loc=loc_, covariance_matrix=covariance_matrix)
for loc_, covariance_matrix in zip(locs, covariance_matrices)
]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(
mixing_distribution=mixing_distribution,
component_distributions=component_dists,
)
@property
def locs(self):
# hotfix for vmapping tests, which cannot easily check non-array attributes
return self.component_distributions[0].loc
@property
def covariance_matrices(self):
return self.component_distributions[0].covariance_matrix
@vmap_over.register
def _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):
component_distributions = [
vmap_over(d, loc=locs) for d in self.component_distributions
]
return vmap_over.dispatch(dist.MixtureGeneral)(
self, _component_distributions=component_distributions
)
class _ImproperWrapper(dist.ImproperUniform):
def sample(self, key, sample_shape=()):
transform = biject_to(self.support)
prototype_value = jnp.zeros(self.event_shape)
unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))
shape = sample_shape + self.batch_shape + unconstrained_event_shape
unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)
return transform(unconstrained_samples)
class ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):
arg_constraints = {"rate": constraints.positive, "gate_logits": constraints.real}
pytree_data_fields = ("rate",)
def __init__(self, rate, gate_logits, *, validate_args=None):
self.rate = rate
super().__init__(dist.Poisson(rate), gate_logits, validate_args=validate_args)
@vmap_over.register
def _vmap_over_zero_inflated_poisson_logits(
self: ZeroInflatedPoissonLogits, rate=None, gate_logits=None
):
dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(
self,
base_dist=vmap_over(self.base_dist, rate=rate),
gate_logits=gate_logits,
gate=gate_logits,
)
dist_axes.rate = rate
return dist_axes
class SparsePoisson(dist.Poisson):
def __init__(self, rate, *, validate_args=None):
super().__init__(rate, is_sparse=True, validate_args=validate_args)
class FoldedNormal(dist.FoldedDistribution):
arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
def __init__(self, loc, scale, validate_args=None):
self.loc = loc
self.scale = scale
super().__init__(dist.Normal(loc, scale), validate_args=validate_args)
@vmap_over.register
def _vmap_over_folded_normal(self: "FoldedNormal", loc=None, scale=None):
d = vmap_over.dispatch(dist.FoldedDistribution)(
self, base_dist=vmap_over(self.base_dist, loc=loc, scale=scale)
)
d.loc = loc
d.scale = scale
return d
class _SparseCAR(dist.CAR):
reparametrized_params = ["loc", "correlation", "conditional_precision"]
def __init__(
self,
loc,
correlation,
conditional_precision,
adj_matrix,
*,
is_sparse=True,
validate_args=None,
):
super().__init__(
loc,
correlation,
conditional_precision,
adj_matrix,
is_sparse=True,
validate_args=validate_args,
)
_DIST_MAP = {
dist.AsymmetricLaplace: lambda loc, scale, asymmetry: osp.laplace_asymmetric(
asymmetry, loc=loc, scale=scale
),
dist.BernoulliProbs: lambda probs: osp.bernoulli(p=probs),
dist.BernoulliLogits: lambda logits: osp.bernoulli(p=_to_probs_bernoulli(logits)),
dist.Beta: lambda con1, con0: osp.beta(con1, con0),
dist.BetaProportion: lambda mu, kappa: osp.beta(mu * kappa, (1 - mu) * kappa),
dist.BinomialProbs: lambda probs, total_count: osp.binom(n=total_count, p=probs),
dist.BinomialLogits: lambda logits, total_count: osp.binom(
n=total_count, p=_to_probs_bernoulli(logits)
),
dist.Cauchy: lambda loc, scale: osp.cauchy(loc=loc, scale=scale),
dist.Chi2: lambda df: osp.chi2(df),
dist.Dirichlet: lambda conc: osp.dirichlet(conc),
dist.Exponential: lambda rate: osp.expon(scale=jnp.reciprocal(rate)),
dist.Gamma: lambda conc, rate: osp.gamma(conc, scale=1.0 / rate),
dist.GeometricProbs: lambda probs: osp.geom(p=probs, loc=-1),
dist.GeometricLogits: lambda logits: osp.geom(
p=_to_probs_bernoulli(logits), loc=-1
),
dist.Gumbel: lambda loc, scale: osp.gumbel_r(loc=loc, scale=scale),
dist.HalfCauchy: lambda scale: osp.halfcauchy(scale=scale),
dist.HalfNormal: lambda scale: osp.halfnorm(scale=scale),
dist.InverseGamma: lambda conc, rate: osp.invgamma(conc, scale=rate),
dist.Laplace: lambda loc, scale: osp.laplace(loc=loc, scale=scale),
dist.LogNormal: lambda loc, scale: osp.lognorm(s=scale, scale=jnp.exp(loc)),
dist.LogUniform: lambda a, b: osp.loguniform(a, b),
dist.MultinomialProbs: lambda probs, total_count: osp.multinomial(
n=total_count, p=probs
),
dist.MultinomialLogits: lambda logits, total_count: osp.multinomial(
n=total_count, p=_to_probs_multinom(logits)
),
dist.MultivariateNormal: _mvn_to_scipy,
dist.MultivariateStudentT: _multivariate_t_to_scipy,
dist.LowRankMultivariateNormal: _lowrank_mvn_to_scipy,
dist.Normal: lambda loc, scale: osp.norm(loc=loc, scale=scale),
dist.Pareto: lambda scale, alpha: osp.pareto(alpha, scale=scale),
dist.Poisson: lambda rate: osp.poisson(rate),
dist.StudentT: lambda df, loc, scale: osp.t(df=df, loc=loc, scale=scale),
dist.Uniform: lambda a, b: osp.uniform(a, b - a),
dist.Logistic: lambda loc, scale: osp.logistic(loc=loc, scale=scale),
dist.VonMises: lambda loc, conc: osp.vonmises(
loc=np.array(loc, dtype=np.float64), kappa=np.array(conc, dtype=np.float64)
),
dist.Weibull: lambda scale, conc: osp.weibull_min(
c=conc,
scale=scale,
),
_TruncatedNormal: _truncnorm_to_scipy,
}
def get_sp_dist(jax_dist):
classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]
for cls in classes:
if cls in _DIST_MAP:
return _DIST_MAP[cls]
CONTINUOUS = [
T(dist.AsymmetricLaplace, 1.0, 0.5, 1.0),
T(dist.AsymmetricLaplace, np.array([1.0, 2.0]), 2.0, 2.0),
T(dist.AsymmetricLaplace, np.array([[1.0], [2.0]]), 2.0, np.array([3.0, 5.0])),
T(dist.AsymmetricLaplaceQuantile, 0.0, 1.0, 0.5),
T(dist.AsymmetricLaplaceQuantile, np.array([1.0, 2.0]), 2.0, 0.7),
T(
dist.AsymmetricLaplaceQuantile,
np.array([[1.0], [2.0]]),
2.0,
np.array([0.2, 0.8]),
),
T(dist.Beta, 0.2, 1.1),
T(dist.Beta, 1.0, np.array([2.0, 2.0])),
T(dist.Beta, 1.0, np.array([[1.0, 1.0], [2.0, 2.0]])),
T(dist.BetaProportion, 0.2, 10.0),
T(dist.BetaProportion, 0.51, np.array([2.0, 1.0])),
T(dist.BetaProportion, 0.5, np.array([[4.0, 4.0], [2.0, 2.0]])),
T(dist.Chi2, 2.0),
T(dist.Chi2, np.array([0.3, 1.3])),
T(dist.Cauchy, 0.0, 1.0),
T(dist.Cauchy, 0.0, np.array([1.0, 2.0])),
T(dist.Cauchy, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),
T(dist.Dirichlet, np.array([1.7])),
T(dist.Dirichlet, np.array([0.2, 1.1])),
T(dist.Dirichlet, np.array([[0.2, 1.1], [2.0, 2.0]])),
T(
dist.EulerMaruyama,
np.array([0.0, 0.1, 0.2]),
sde_fn1,
dist.Normal(0.1, 1.0),
),
T(
dist.EulerMaruyama,
np.array([0.0, 0.1, 0.2]),
sde_fn2,
dist.Normal(jnp.array([0.0, 1.0]), 1e-3).to_event(1),
),
T(
dist.EulerMaruyama,
np.array([[0.0, 0.1, 0.2], [10.0, 10.1, 10.2]]),
sde_fn2,
dist.Normal(jnp.array([0.0, 1.0]), 1e-3).to_event(1),
),
T(
dist.EulerMaruyama,
np.array([[0.0, 0.1, 0.2], [10.0, 10.1, 10.2]]),
sde_fn2,
dist.Normal(jnp.array([[0.0, 1.0], [2.0, 3.0]]), 1e-2).to_event(1),
),
T(dist.Exponential, 2.0),
T(dist.Exponential, np.array([4.0, 2.0])),
T(dist.Gamma, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.Gamma, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),
T(dist.GaussianRandomWalk, 0.1, 10),
T(dist.GaussianRandomWalk, np.array([0.1, 0.3, 0.25]), 10),
T(
dist.GaussianCopulaBeta,
np.array([7.0, 2.0]),
np.array([4.0, 10.0]),
np.array([[1.0, 0.75], [0.75, 1.0]]),
),
T(dist.GaussianCopulaBeta, 2.0, 1.5, np.eye(3)),
T(dist.GaussianCopulaBeta, 2.0, 1.5, np.full((5, 3, 3), np.eye(3))),
T(dist.Gompertz, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.Gompertz, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),
T(dist.Gumbel, 0.0, 1.0),
T(dist.Gumbel, 0.5, 2.0),
T(dist.Gumbel, np.array([0.0, 0.5]), np.array([1.0, 2.0])),
T(FoldedNormal, 2.0, 4.0),
T(FoldedNormal, np.array([2.0, 50.0]), np.array([4.0, 100.0])),
T(dist.HalfCauchy, 1.0),
T(dist.HalfCauchy, np.array([1.0, 2.0])),
T(dist.HalfNormal, 1.0),
T(dist.HalfNormal, np.array([1.0, 2.0])),
T(_ImproperWrapper, constraints.positive, (), (3,)),
T(dist.InverseGamma, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.InverseGamma, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),
T(dist.Kumaraswamy, 10.0, np.array([2.0, 3.0])),
T(dist.Kumaraswamy, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.Kumaraswamy, 0.6, 0.5),
T(dist.Laplace, 0.0, 1.0),
T(dist.Laplace, 0.5, np.array([1.0, 2.5])),
T(dist.Laplace, np.array([1.0, -0.5]), np.array([2.3, 3.0])),
T(dist.LKJ, 2, 0.5, "onion"),
T(dist.LKJ, 5, np.array([0.5, 1.0, 2.0]), "cvine"),
T(dist.LKJCholesky, 2, 0.5, "onion"),
T(dist.LKJCholesky, 2, 0.5, "cvine"),
T(dist.LKJCholesky, 5, np.array([0.5, 1.0, 2.0]), "onion"),
pytest.param(
*T(dist.LKJCholesky, 5, np.array([0.5, 1.0, 2.0]), "cvine"),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
pytest.param(
*T(dist.LKJCholesky, 3, np.array([[3.0, 0.6], [0.2, 5.0]]), "onion"),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
T(dist.LKJCholesky, 3, np.array([[3.0, 0.6], [0.2, 5.0]]), "cvine"),
T(dist.Logistic, 0.0, 1.0),
T(dist.Logistic, 1.0, np.array([1.0, 2.0])),
T(dist.Logistic, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),
T(dist.LogNormal, 1.0, 0.2),
T(dist.LogNormal, -1.0, np.array([0.5, 1.3])),
T(dist.LogNormal, np.array([0.5, -0.7]), np.array([[0.1, 0.4], [0.5, 0.1]])),
T(dist.LogUniform, 1.0, 2.0),
T(dist.LogUniform, 1.0, np.array([2.0, 3.0])),
T(dist.LogUniform, np.array([1.0, 2.0]), np.array([[3.0], [4.0]])),
T(
dist.MatrixNormal,
1.0 * np.arange(6).reshape(3, 2),
np.array([[1.0, 0, 0], [0.3, 0.36, 0], [0.4, 0.49, 4]]),
np.array([[1.0, 0], [0.4, 1]]),
),
T(
dist.MatrixNormal,
1.0 * np.arange(12).reshape((2, 3, 2)),
np.array([[1.0, 0, 0], [0.3, 0.36, 0], [0.4, 0.49, 4]]) * np.ones((2, 3, 3)),
np.array([[1.0, 0], [0.4, 0.5]]) * np.ones((2, 2, 2)),
),
T(
dist.MatrixNormal,
1.0 * np.arange(36).reshape((2, 3, 3, 2)),
np.identity(3),
np.identity(2),
),
T(dist.MultivariateNormal, 0.0, np.array([[1.0, 0.5], [0.5, 1.0]]), None, None),
T(
dist.MultivariateNormal,
np.array([1.0, 3.0]),
None,
np.array([[1.0, 0.5], [0.5, 1.0]]),
None,
),
T(
dist.MultivariateNormal,
np.array([1.0, 3.0]),
None,
np.array([[[1.0, 0.5], [0.5, 1.0]]]),
None,
),
T(
dist.MultivariateNormal,
np.array([2.0]),
None,
None,
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateNormal,
np.arange(6, dtype=np.float32).reshape((3, 2)),
None,
None,
np.array([[1.0, 0.0], [0.0, 1.0]]),
),
T(
dist.MultivariateNormal,
0.0,
None,
np.broadcast_to(np.identity(3), (2, 3, 3)),
None,
),
T(
dist.CAR,
1.2,
np.array([-0.2, 0.3]),
0.1,
np.array(
[
[0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0],
]
),
),
T(
dist.CAR,
np.array([0.0, 1.0, 3.0, 4.0]),
0.1,
np.array([0.3, 0.7]),
np.array(
[
[0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0],
]
),
),
T(
_SparseCAR,
np.array([[0.0, 1.0, 3.0, 4.0], [2.0, -1.0, -3.0, 2.0]]),
0.0,
0.1,
np.array(
[
[0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0],
]
),
),
T(
dist.MultivariateStudentT,
15.0,
0.0,
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.array([1.0, 3.0]),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.array([1.0, 3.0]),
np.array([[[1.0, 0.0], [0.5, 1.0]]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.array([3.0]),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.arange(6, dtype=np.float32).reshape((3, 2)),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.ones(3),
np.broadcast_to(np.identity(3), (2, 3, 3)),
),
T(
dist.MultivariateStudentT,
np.array(7.0),
np.array([1.0, 3.0]),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
np.arange(20, 22, dtype=jnp.float32),
np.ones(3),
np.broadcast_to(jnp.identity(3), (2, 3, 3)),
),
T(
dist.MultivariateStudentT,
np.arange(20, 26, dtype=jnp.float32).reshape((3, 2)),
np.ones(2),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.LowRankMultivariateNormal,
np.zeros(2),
np.array([[1.0], [0.0]]),
np.array([1.0, 1.0]),
),
T(
dist.LowRankMultivariateNormal,
np.arange(6, dtype=jnp.float32).reshape((2, 3)),
np.arange(6, dtype=jnp.float32).reshape((3, 2)),
np.array([1.0, 2.0, 3.0]),
),
T(dist.Normal, 0.0, 1.0),
T(dist.Normal, 1.0, np.array([1.0, 2.0])),
T(dist.Normal, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),
T(dist.Pareto, 1.0, 2.0),
T(dist.Pareto, np.array([1.0, 0.5]), np.array([0.3, 2.0])),
T(dist.Pareto, np.array([[1.0], [3.0]]), np.array([1.0, 0.5])),
T(dist.RelaxedBernoulliLogits, 2.0, -10.0),
T(dist.RelaxedBernoulliLogits, np.array([1.0, 3.0]), np.array([3.0, 8.0])),
T(dist.SoftLaplace, 1.0, 1.0),
T(dist.SoftLaplace, np.array([-1.0, 50.0]), np.array([4.0, 100.0])),
T(dist.StudentT, 1.0, 1.0, 0.5),
T(dist.StudentT, 2.0, np.array([1.0, 2.0]), 2.0),
T(dist.StudentT, np.array([3.0, 5.0]), np.array([[1.0], [2.0]]), 2.0),
T(_TruncatedCauchy, 0.0, 1.0, -1.0, None),
T(_TruncatedCauchy, 0.0, np.array([1.0, 2.0]), 1.0, None),
T(
_TruncatedCauchy,
np.array([0.0, 1.0]),
np.array([[1.0], [2.0]]),
np.array([-2.0, 2.0]),
None,
),
T(_TruncatedCauchy, 0.0, 1.0, None, 1.0),
T(_TruncatedCauchy, 0.0, 1.0, -1.0, 1.0),
T(_TruncatedNormal, 0.0, 1.0, -1.0, None),
T(_TruncatedNormal, -1.0, np.array([1.0, 2.0]), 1.0, None),
T(
_TruncatedNormal,
np.array([0.0, 1.0]),
np.array([[1.0], [2.0]]),
np.array([-2.0, 2.0]),
None,
),
T(_TruncatedNormal, -1.0, 2.0, 1.0, 5.0),
T(_TruncatedNormal, np.array([-1.0, 4.0]), 2.0, None, 5.0),
T(_TruncatedNormal, -1.0, np.array([2.0, 3.0]), 1.0, None),
T(_TruncatedNormal, -1.0, 2.0, np.array([-6.0, 4.0]), np.array([-4.0, 6.0])),
T(
_TruncatedNormal,
np.array([0.0, 1.0]),
np.array([[1.0], [2.0]]),
None,
np.array([-2.0, 2.0]),
),
T(dist.TwoSidedTruncatedDistribution, dist.Laplace(0.0, 1.0), -2.0, 3.0),
T(dist.Uniform, 0.0, 2.0),
T(dist.Uniform, 1.0, np.array([2.0, 3.0])),
T(dist.Uniform, np.array([0.0, 0.0]), np.array([[2.0], [3.0]])),
T(dist.Weibull, 0.2, 1.1),
T(dist.Weibull, 2.8, np.array([2.0, 2.0])),
T(dist.Weibull, 1.8, np.array([[1.0, 1.0], [2.0, 2.0]])),
T(
_GaussianMixture,
np.ones(3) / 3.0,
np.array([0.0, 7.7, 2.1]),
np.array([4.2, 7.7, 2.1]),
),
T(
_Gaussian2DMixture,
np.array([0.2, 0.5, 0.3]),
np.array([[-1.2, 1.5], [2.0, 2.0], [-1, 4.0]]), # Mean
np.array(
[
[
[0.1, -0.2],
[-0.2, 1.0],
],
[
[0.75, 0.0],
[0.0, 0.75],
],
[
[1.0, 0.5],
[0.5, 0.27],
],
]
), # Covariance
),
T(
_GeneralMixture,
np.array([0.2, 0.3, 0.5]),
np.array([0.0, 7.7, 2.1]),
np.array([4.2, 1.7, 2.1]),
),
T(
_General2DMixture,
np.array([0.2, 0.5, 0.3]),
np.array([[-1.2, 1.5], [2.0, 2.0], [-1, 4.0]]), # Mean
np.array(
[
[
[0.1, -0.2],
[-0.2, 1.0],
],
[
[0.75, 0.0],
[0.0, 0.75],
],
[
[1.0, 0.5],
[0.5, 0.27],
],
]
), # Covariance
),
]
DIRECTIONAL = [
T(dist.VonMises, 2.0, 10.0),
T(dist.VonMises, 2.0, np.array([150.0, 10.0])),
T(dist.VonMises, np.array([1 / 3 * np.pi, -1.0]), np.array([20.0, 30.0])),
pytest.param(
*T(
dist.SineBivariateVonMises,
0.0,
0.0,
5.0,
6.0,
2.0,
),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
T(
dist.SineBivariateVonMises,
3.003,
-1.343,
5.0,
6.0,
2.0,
),
pytest.param(
*T(
dist.SineBivariateVonMises,
-1.232,
-1.3430,
3.4,
2.0,
1.0,
),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
pytest.param(
*T(
dist.SineBivariateVonMises,
np.array([math.pi - 0.2, 1.0]),
np.array([0.0, 1.0]),
np.array([5.0, 5.0]),
np.array([7.0, 0.5]),
None,
np.array([0.5, 0.1]),
),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
T(dist.ProjectedNormal, np.array([0.0, 0.0])),
T(dist.ProjectedNormal, np.array([[2.0, 3.0]])),
T(dist.ProjectedNormal, np.array([0.0, 0.0, 0.0])),
T(dist.ProjectedNormal, np.array([[-1.0, 2.0, 3.0]])),
T(SineSkewedUniform, np.array([-math.pi / 4, 0.1])),
T(SineSkewedVonMises, np.array([0.342355])),
T(SineSkewedVonMisesBatched, np.array([[0.342355, -0.0001], [0.91, 0.09]])),
]
DISCRETE = [
T(dist.BetaBinomial, 2.0, 5.0, 10),
T(
dist.BetaBinomial,
np.array([2.0, 4.0]),
np.array([5.0, 3.0]),
np.array([10, 12]),
),
T(dist.BernoulliProbs, 0.2),
T(dist.BernoulliProbs, np.array([0.2, 0.7])),
T(dist.BernoulliLogits, np.array([-1.0, 3.0])),
T(dist.BinomialProbs, np.array([0.2, 0.7]), np.array([10, 2])),
T(dist.BinomialProbs, np.array([0.2, 0.7]), np.array([5, 8])),
T(dist.BinomialLogits, np.array([-1.0, 3.0]), np.array([5, 8])),
T(dist.CategoricalProbs, np.array([1.0])),
T(dist.CategoricalProbs, np.array([0.1, 0.5, 0.4])),
T(dist.CategoricalProbs, np.array([[0.1, 0.5, 0.4], [0.4, 0.4, 0.2]])),
T(dist.CategoricalLogits, np.array([-5.0])),
T(dist.CategoricalLogits, np.array([1.0, 2.0, -2.0])),
T(dist.CategoricalLogits, np.array([[-1, 2.0, 3.0], [3.0, -4.0, -2.0]])),
T(dist.Delta, 1),
T(dist.Delta, np.array([0.0, 2.0])),
T(dist.Delta, np.array([0.0, 2.0]), np.array([-2.0, -4.0])),
T(dist.DirichletMultinomial, np.array([1.0, 2.0, 3.9]), 10),
T(dist.DirichletMultinomial, np.array([0.2, 0.7, 1.1]), np.array([5, 5])),
T(dist.GammaPoisson, 2.0, 2.0),
T(dist.GammaPoisson, np.array([6.0, 2]), np.array([2.0, 8.0])),
T(dist.GeometricProbs, 0.2),
T(dist.GeometricProbs, np.array([0.2, 0.7])),
T(dist.GeometricLogits, np.array([-1.0, 3.0])),
T(dist.MultinomialProbs, np.array([0.2, 0.7, 0.1]), 10),
T(dist.MultinomialProbs, np.array([0.2, 0.7, 0.1]), np.array([5, 8])),
T(dist.MultinomialLogits, np.array([-1.0, 3.0]), np.array([[5], [8]])),
T(dist.NegativeBinomialProbs, 10, 0.2),
T(dist.NegativeBinomialProbs, 10, np.array([0.2, 0.6])),
T(dist.NegativeBinomialProbs, np.array([4.2, 10.7, 2.1]), 0.2),
T(
dist.NegativeBinomialProbs,
np.array([4.2, 10.7, 2.1]),
np.array([0.2, 0.6, 0.5]),
),
T(dist.NegativeBinomialLogits, 10, -2.1),
T(dist.NegativeBinomialLogits, 10, np.array([-5.2, 2.1])),
T(dist.NegativeBinomialLogits, np.array([4.2, 10.7, 2.1]), -5.2),
T(
dist.NegativeBinomialLogits,
np.array([4.2, 7.7, 2.1]),
np.array([4.2, 0.7, 2.1]),
),
T(dist.NegativeBinomial2, 0.3, 10),
T(dist.NegativeBinomial2, np.array([10.2, 7, 31]), 10),
T(dist.NegativeBinomial2, np.array([10.2, 7, 31]), np.array([10.2, 20.7, 2.1])),
T(dist.OrderedLogistic, -2, np.array([-10.0, 4.0, 9.0])),
T(dist.OrderedLogistic, np.array([-4, 3, 4, 5]), np.array([-1.5])),
T(dist.DiscreteUniform, -2, np.array([-1.0, 4.0, 9.0])),
T(dist.DiscreteUniform, np.array([-4, 3, 4, 5]), np.array([6])),
T(dist.Poisson, 2.0),
T(dist.Poisson, np.array([2.0, 3.0, 5.0])),
T(SparsePoisson, 2.0),
T(SparsePoisson, np.array([2.0, 3.0, 5.0])),
T(SparsePoisson, 2),
T(dist.ZeroInflatedPoisson, 0.6, 2.0),
T(dist.ZeroInflatedPoisson, np.array([0.2, 0.7, 0.3]), np.array([2.0, 3.0, 5.0])),
T(ZeroInflatedPoissonLogits, 2.0, 3.0),
T(
ZeroInflatedPoissonLogits,
np.array([0.2, 4.0, 0.3]),
np.array([2.0, -3.0, 5.0]),
),
]
def _is_batched_multivariate(jax_dist):
return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0
def gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):
eps = 1e-6
if constraint is constraints.boolean:
return random.bernoulli(key, shape=size)
elif isinstance(constraint, constraints.greater_than):
return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps
elif isinstance(constraint, constraints.integer_interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.randint(key, size, lower_bound, upper_bound + 1)
elif isinstance(constraint, constraints.integer_greater_than):
return constraint.lower_bound + random.poisson(key, np.array(5), shape=size)
elif isinstance(constraint, constraints.interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.uniform(key, size, minval=lower_bound, maxval=upper_bound)
elif constraint in (constraints.real, constraints.real_vector):
return random.normal(key, size)
elif constraint is constraints.simplex:
return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])
elif isinstance(constraint, constraints.multinomial):
n = size[-1]
return multinomial(
key, p=jnp.ones((n,)) / n, n=constraint.upper_bound, shape=size[:-1]
)
elif constraint is constraints.corr_cholesky:
return signed_stick_breaking_tril(
random.uniform(
key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1
)
)
elif constraint is constraints.corr_matrix:
cholesky = signed_stick_breaking_tril(
random.uniform(
key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1
)
)
return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))
elif constraint is constraints.lower_cholesky:
return jnp.tril(random.uniform(key, size))
elif constraint is constraints.positive_definite:
x = random.normal(key, size)
return jnp.matmul(x, jnp.swapaxes(x, -2, -1))
elif constraint is constraints.ordered_vector:
x = jnp.cumsum(random.exponential(key, size), -1)
return x - random.normal(key, size[:-1] + (1,))
elif isinstance(constraint, constraints.independent):
return gen_values_within_bounds(constraint.base_constraint, size, key)
elif constraint is constraints.sphere:
x = random.normal(key, size)
return x / jnp.linalg.norm(x, axis=-1)
elif constraint is constraints.l1_ball:
key1, key2 = random.split(key)
sign = random.bernoulli(key1)
bounds = [0, (-1) ** sign * 0.5]
return random.uniform(key, size, float, *sorted(bounds))
else:
raise NotImplementedError("{} not implemented.".format(constraint))
def gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):
if constraint is constraints.boolean:
return random.bernoulli(key, shape=size) - 2
elif isinstance(constraint, constraints.greater_than):
return constraint.lower_bound - jnp.exp(random.normal(key, size))
elif isinstance(constraint, constraints.integer_interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
return random.randint(key, size, lower_bound - 1, lower_bound)
elif isinstance(constraint, constraints.integer_greater_than):
return constraint.lower_bound - random.poisson(key, np.array(5), shape=size)
elif isinstance(constraint, constraints.interval):
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.uniform(key, size, minval=upper_bound, maxval=upper_bound + 1.0)
elif constraint in [constraints.real, constraints.real_vector]:
return lax.full(size, np.nan)
elif constraint is constraints.simplex:
return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]) + 1e-2
elif isinstance(constraint, constraints.multinomial):
n = size[-1]
return (
multinomial(
key, p=jnp.ones((n,)) / n, n=constraint.upper_bound, shape=size[:-1]
)
+ 1
)
elif constraint is constraints.corr_cholesky:
return (
signed_stick_breaking_tril(
random.uniform(
key,
size[:-2] + (size[-1] * (size[-1] - 1) // 2,),
minval=-1,
maxval=1,
)
)
+ 1e-2
)
elif constraint is constraints.corr_matrix:
cholesky = 1e-2 + signed_stick_breaking_tril(
random.uniform(
key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1
)
)
return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))
elif constraint is constraints.lower_cholesky:
return random.uniform(key, size)
elif constraint is constraints.positive_definite:
return random.normal(key, size)
elif constraint is constraints.ordered_vector:
x = jnp.cumsum(random.exponential(key, size), -1)
return x[..., ::-1]
elif isinstance(constraint, constraints.independent):
return gen_values_outside_bounds(constraint.base_constraint, size, key)
elif constraint is constraints.sphere:
x = random.normal(key, size)
x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)
return 2 * x
elif constraint is constraints.l1_ball:
key1, key2 = random.split(key)
sign = random.bernoulli(key1)
bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]
return random.uniform(key, size, float, *sorted(bounds))
else:
raise NotImplementedError("{} not implemented.".format(constraint))
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
@pytest.mark.parametrize("prepend_shape", [(), (2,), (2, 3)])
def test_dist_shape(jax_dist, sp_dist, params, prepend_shape):
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
expected_shape = prepend_shape + jax_dist.batch_shape + jax_dist.event_shape
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert isinstance(samples, jnp.ndarray)
assert jnp.shape(samples) == expected_shape
if (
sp_dist
and not _is_batched_multivariate(jax_dist)
and not isinstance(jax_dist, dist.MultivariateStudentT)
):
sp_dist = sp_dist(*params)
sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)
assert jnp.shape(sp_samples) == expected_shape
elif (
sp_dist
and not _is_batched_multivariate(jax_dist)
and isinstance(jax_dist, dist.MultivariateStudentT)
):
sp_dist = sp_dist(*params)
size_ = prepend_shape + jax_dist.batch_shape
size = (1) if size_ == () else size_
try:
sp_samples = sp_dist.rvs(size=size)
except ValueError:
pytest.skip("scipy multivariate t doesn't support size with > 1 element")
assert jnp.shape(sp_samples) == expected_shape
if isinstance(jax_dist, (dist.MultivariateNormal, dist.MultivariateStudentT)):
assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2
assert_allclose(
jax_dist.precision_matrix,
jnp.linalg.inv(jax_dist.covariance_matrix),
rtol=1e-6,
)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_infer_shapes(jax_dist, sp_dist, params):
shapes = tuple(getattr(p, "shape", ()) for p in params)
shapes = tuple(x() if callable(x) else x for x in shapes)
jax_dist = jax_dist(*params)
try:
expected_batch_shape, expected_event_shape = type(jax_dist).infer_shapes(
*shapes
)
except NotImplementedError:
pytest.skip(f"{type(jax_dist).__name__}.infer_shapes() is not implemented")
assert jax_dist.batch_shape == expected_batch_shape
assert jax_dist.event_shape == expected_event_shape
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_has_rsample(jax_dist, sp_dist, params):
jax_dist = jax_dist(*params)
masked_dist = jax_dist.mask(False)
indept_dist = jax_dist.expand_by([2]).to_event(1)
transf_dist = dist.TransformedDistribution(jax_dist, biject_to(constraints.real))
assert masked_dist.has_rsample == jax_dist.has_rsample
assert indept_dist.has_rsample == jax_dist.has_rsample
assert transf_dist.has_rsample == jax_dist.has_rsample
if jax_dist.has_rsample:
assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete
if isinstance(jax_dist, dist.TransformedDistribution):
assert jax_dist.base_dist.has_rsample
else:
assert set(jax_dist.arg_constraints) == set(jax_dist.reparametrized_params)
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.Normal):
masked_dist.rsample(random.PRNGKey(0))
indept_dist.rsample(random.PRNGKey(0))
transf_dist.rsample(random.PRNGKey(0))
else:
with pytest.raises(NotImplementedError):
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.BernoulliProbs):
with pytest.raises(NotImplementedError):
masked_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
indept_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
transf_dist.rsample(random.PRNGKey(0))
@pytest.mark.parametrize("batch_shape", [(), (4,), (3, 2)])
def test_unit(batch_shape):
log_factor = random.normal(random.PRNGKey(0), batch_shape)
d = dist.Unit(log_factor=log_factor)
x = d.sample(random.PRNGKey(1))
assert x.shape == batch_shape + (0,)
assert (d.log_prob(x) == log_factor).all()
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS)
def test_sample_gradient(jax_dist, sp_dist, params):
# we have pathwise gradient for gamma sampler
gamma_derived_params = {
"Gamma": ["concentration"],
"Beta": ["concentration1", "concentration0"],
"BetaProportion": ["mean", "concentration"],
"Chi2": ["df"],
"Dirichlet": ["concentration"],
"InverseGamma": ["concentration"],
"LKJ": ["concentration"],
"LKJCholesky": ["concentration"],
"StudentT": ["df"],
}.get(jax_dist.__name__, [])
dist_args = [
p
for p in (
inspect.getfullargspec(jax_dist.__init__)[0][1:]
if inspect.isclass(jax_dist)
# account the the case jax_dist is a function
else inspect.getfullargspec(jax_dist)[0]
)
]
params_dict = dict(zip(dist_args[: len(params)], params))
jax_class = type(jax_dist(**params_dict))
reparametrized_params = [
p for p in jax_class.reparametrized_params if p not in gamma_derived_params
]
if not reparametrized_params:
pytest.skip("{} not reparametrized.".format(jax_class.__name__))
nonrepara_params_dict = {
k: v for k, v in params_dict.items() if k not in reparametrized_params
}
repara_params = tuple(
v for k, v in params_dict.items() if k in reparametrized_params
)
rng_key = random.PRNGKey(0)
def fn(args):
args_dict = dict(zip(reparametrized_params, args))
return jnp.sum(
jax_dist(**args_dict, **nonrepara_params_dict).sample(key=rng_key)
)
actual_grad = jax.grad(fn)(repara_params)
assert len(actual_grad) == len(repara_params)
eps = 1e-3
for i in range(len(repara_params)):
if repara_params[i] is None:
continue
args_lhs = [p if j != i else p - eps for j, p in enumerate(repara_params)]
args_rhs = [p if j != i else p + eps for j, p in enumerate(repara_params)]
fn_lhs = fn(args_lhs)
fn_rhs = fn(args_rhs)
# finite diff approximation
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])
assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02, atol=0.03)
@pytest.mark.parametrize(
"jax_dist, params",
[
(dist.Gamma, (1.0,)),
(dist.Gamma, (0.1,)),
(dist.Gamma, (10.0,)),
(dist.Chi2, (1.0,)),
(dist.Chi2, (0.1,)),
(dist.Chi2, (10.0,)),
(dist.Beta, (1.0, 1.0)),
(dist.StudentT, (5.0, 2.0, 4.0)),
],
)
def test_pathwise_gradient(jax_dist, params):
rng_key = random.PRNGKey(0)
N = 1000000
def f(params):
z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))
return (z + z**2).mean(0)
def g(params):
d = jax_dist(*params)
return d.mean + d.variance + d.mean**2
actual_grad = grad(f)(params)
expected_grad = grad(g)(params)
assert_allclose(actual_grad, expected_grad, rtol=0.005)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_jit_log_likelihood(jax_dist, sp_dist, params):
if jax_dist.__name__ in (
"EulerMaruyama",
"GaussianRandomWalk",
"_ImproperWrapper",
"LKJ",
"LKJCholesky",
"_SparseCAR",
):
pytest.xfail(reason="non-jittable params")
rng_key = random.PRNGKey(0)
samples = jax_dist(*params).sample(key=rng_key, sample_shape=(2, 3))
def log_likelihood(*params):
return jax_dist(*params).log_prob(samples)
expected = log_likelihood(*params)
actual = jax.jit(log_likelihood)(*params)
assert_allclose(actual, expected, atol=2e-5, rtol=2e-5)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
@pytest.mark.parametrize("prepend_shape", [(), (2,), (2, 3)])
@pytest.mark.parametrize("jit", [False, True])
def test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):
jit_fn = _identity if not jit else jax.jit
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert jax_dist.log_prob(samples).shape == prepend_shape + jax_dist.batch_shape
truncated_dists = (
dist.LeftTruncatedDistribution,
dist.RightTruncatedDistribution,
dist.TwoSidedTruncatedDistribution,
)
if sp_dist is None:
if isinstance(jax_dist, truncated_dists):
if isinstance(params[0], dist.Distribution):
# new api
loc, scale, low, high = (
params[0].loc,
params[0].scale,
params[1],
params[2],
)
else:
# old api
loc, scale, low, high = params
if low is None:
low = -np.inf
if high is None:
high = np.inf
sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)
expected = sp_dist.logpdf(samples) - jnp.log(
sp_dist.cdf(high) - sp_dist.cdf(low)
)
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-5)
return
pytest.skip("no corresponding scipy distn.")
if _is_batched_multivariate(jax_dist):
pytest.skip("batching not allowed in multivariate distns.")
if jax_dist.event_shape and prepend_shape:
# >>> d = sp.dirichlet([1.1, 1.1])
# >>> samples = d.rvs(size=(2,))
# >>> d.logpdf(samples)
# ValueError: The input vector 'x' must lie within the normal simplex ...
pytest.skip("batched samples cannot be scored by multivariate distributions.")
sp_dist = sp_dist(*params)
try:
expected = sp_dist.logpdf(samples)
except AttributeError:
expected = sp_dist.logpmf(samples)
except ValueError as e:
# precision issue: jnp.sum(x / jnp.sum(x)) = 0.99999994 != 1
if "The input vector 'x' must lie within the normal simplex." in str(e):
samples = jax.device_get(samples).astype("float64")
samples = samples / samples.sum(axis=-1, keepdims=True)
expected = sp_dist.logpdf(samples)
else:
raise e
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-5)
def test_mixture_log_prob():
gmm = dist.MixtureSameFamily(
dist.Categorical(logits=np.zeros(2)), dist.Normal(0, 1).expand([2])
)
actual = gmm.log_prob(0.0)
expected = dist.Normal(0, 1).log_prob(0.0)
assert_allclose(actual, expected)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params",
# TODO: add more complete pattern for Discrete.cdf
CONTINUOUS + [T(dist.Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))],
)
@pytest.mark.filterwarnings("ignore:overflow encountered:RuntimeWarning")
def test_cdf_and_icdf(jax_dist, sp_dist, params):
d = jax_dist(*params)
if d.event_dim > 0:
pytest.skip("skip testing cdf/icdf methods of multivariate distributions")
samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))
quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())
try:
rtol = 2e-3 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-5
if d.shape() == () and not d.is_discrete:
assert_allclose(
jax.vmap(jax.grad(d.cdf))(samples),
jnp.exp(d.log_prob(samples)),
atol=1e-5,
rtol=rtol,
)
assert_allclose(
jax.vmap(jax.grad(d.icdf))(quantiles),
jnp.exp(-d.log_prob(d.icdf(quantiles))),
atol=1e-5,
rtol=rtol,
)
assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-5, rtol=1e-5)
assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-5, rtol=rtol)
except NotImplementedError:
pass
# test against scipy
if not sp_dist:
pytest.skip("no corresponding scipy distn.")
sp_dist = sp_dist(*params)
try:
actual_cdf = d.cdf(samples)
expected_cdf = sp_dist.cdf(samples)
assert_allclose(actual_cdf, expected_cdf, atol=1e-5, rtol=1e-5)
actual_icdf = d.icdf(quantiles)
expected_icdf = sp_dist.ppf(quantiles)
assert_allclose(actual_icdf, expected_icdf, atol=1e-4, rtol=1e-4)
except NotImplementedError:
pass
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS + DIRECTIONAL)
def test_gof(jax_dist, sp_dist, params):
if "Improper" in jax_dist.__name__:
pytest.skip("distribution has improper .log_prob()")
if "LKJ" in jax_dist.__name__:
pytest.xfail("incorrect submanifold scaling")
if jax_dist is dist.EulerMaruyama:
d = jax_dist(*params)
if d.event_dim > 1:
pytest.skip("EulerMaruyama skip test when event shape is non-trivial.")
num_samples = 10000
if "BetaProportion" in jax_dist.__name__:
num_samples = 20000
rng_key = random.PRNGKey(0)
d = jax_dist(*params)
samples = d.sample(key=rng_key, sample_shape=(num_samples,))
probs = np.exp(d.log_prob(samples))
dim = None
if jax_dist is dist.ProjectedNormal:
dim = samples.shape[-1] - 1
# Test each batch independently.
probs = probs.reshape(num_samples, -1)
samples = samples.reshape(probs.shape + d.event_shape)
if "Dirichlet" in jax_dist.__name__:
# The Dirichlet density is over all but one of the probs.
samples = samples[..., :-1]
for b in range(probs.shape[1]):
try:
gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)
except InvalidTest:
pytest.skip("expensive test")
else:
assert gof > TEST_FAILURE_RATE
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS + DISCRETE)
def test_independent_shape(jax_dist, sp_dist, params):
d = jax_dist(*params)
batch_shape, event_shape = d.batch_shape, d.event_shape
shape = batch_shape + event_shape
for i in range(len(batch_shape)):
indep = dist.Independent(d, reinterpreted_batch_ndims=i)
sample = indep.sample(random.PRNGKey(0))
event_boundary = len(shape) - len(event_shape) - i
assert indep.batch_shape == shape[:event_boundary]
assert indep.event_shape == shape[event_boundary:]
assert jnp.shape(indep.log_prob(sample)) == shape[:event_boundary]
def _tril_cholesky_to_tril_corr(x):
w = vec_to_tril_matrix(x, diagonal=-1)
diag = jnp.sqrt(1 - jnp.sum(w**2, axis=-1))
cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])
corr = jnp.matmul(cholesky, cholesky.T)
return matrix_to_tril_vec(corr, diagonal=-1)
@pytest.mark.parametrize("dimension", [2, 3, 5])
def test_log_prob_LKJCholesky_uniform(dimension):
# When concentration=1, the distribution of correlation matrices is uniform.
# We will test that fact here.
d = dist.LKJCholesky(dimension=dimension, concentration=1)
N = 5
corr_log_prob = []
for i in range(N):
sample = d.sample(random.PRNGKey(i))
log_prob = d.log_prob(sample)
sample_tril = matrix_to_tril_vec(sample, diagonal=-1)
cholesky_to_corr_jac = np.linalg.slogdet(
jax.jacobian(_tril_cholesky_to_tril_corr)(sample_tril)
)[1]
corr_log_prob.append(log_prob - cholesky_to_corr_jac)
corr_log_prob = np.array(corr_log_prob)
# test if they are constant
assert_allclose(
corr_log_prob,
jnp.broadcast_to(corr_log_prob[0], corr_log_prob.shape),
rtol=1e-6,
)
if dimension == 2:
# when concentration = 1, LKJ gives a uniform distribution over correlation matrix,
# hence for the case dimension = 2,
# density of a correlation matrix will be Uniform(-1, 1) = 0.5.
# In addition, jacobian of the transformation from cholesky -> corr is 1 (hence its
# log value is 0) because the off-diagonal lower triangular element does not change
# in the transform.
# So target_log_prob = log(0.5)
assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-6)
@pytest.mark.parametrize("dimension", [2, 3, 5])
@pytest.mark.parametrize("concentration", [0.6, 2.2])
def test_log_prob_LKJCholesky(dimension, concentration):
# We will test against the fact that LKJCorrCholesky can be seen as a
# TransformedDistribution with base distribution is a distribution of partial
# correlations in C-vine method (modulo an affine transform to change domain from (0, 1)
# to (1, 0)) and transform is a signed stick-breaking process.
d = dist.LKJCholesky(dimension, concentration, sample_method="cvine")
beta_sample = d._beta.sample(random.PRNGKey(0))
beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))
partial_correlation = 2 * beta_sample - 1
affine_logdet = beta_sample.shape[-1] * jnp.log(2)
sample = signed_stick_breaking_tril(partial_correlation)
# compute signed stick breaking logdet
inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2 # noqa: E731
inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(partial_correlation)))
unconstrained = inv_tanh(partial_correlation)
corr_cholesky_logdet = biject_to(constraints.corr_cholesky).log_abs_det_jacobian(
unconstrained, sample
)
signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet
actual_log_prob = d.log_prob(sample)
expected_log_prob = beta_log_prob - affine_logdet - signed_stick_breaking_logdet
assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-5)
assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-6)
def test_zero_inflated_logits_probs_agree():
concentration = np.exp(np.random.normal(1))
rate = np.exp(np.random.normal(1))
d = dist.GammaPoisson(concentration, rate)
gate_logits = np.random.normal(0)
gate_probs = expit(gate_logits)
zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)
zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)
sample = np.random.randint(
0,
20,
(
1000,
100,
),
)
assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))
@pytest.mark.parametrize("rate", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
def test_ZIP_log_prob(rate):
# if gate is 0 ZIP is Poisson
zip_ = dist.ZeroInflatedPoisson(0.0, rate)
pois = dist.Poisson(rate)
s = zip_.sample(random.PRNGKey(0), (20,))
zip_prob = zip_.log_prob(s)
pois_prob = pois.log_prob(s)
assert_allclose(zip_prob, pois_prob, rtol=1e-6)
# if gate is 1 ZIP is Delta(0)
zip_ = dist.ZeroInflatedPoisson(1.0, rate)
delta = dist.Delta(0.0)
s = np.array([0.0, 1.0])
zip_prob = zip_.log_prob(s)
delta_prob = delta.log_prob(s)
assert_allclose(zip_prob, delta_prob, rtol=1e-6)
@pytest.mark.parametrize("total_count", [1, 2, 3, 10])
@pytest.mark.parametrize("shape", [(1,), (3, 1), (2, 3, 1)])
def test_beta_binomial_log_prob(total_count, shape):
concentration0 = np.exp(np.random.normal(size=shape))
concentration1 = np.exp(np.random.normal(size=shape))
value = jnp.arange(1 + total_count)
num_samples = 100000
probs = np.random.beta(concentration1, concentration0, size=(num_samples,) + shape)
log_probs = dist.Binomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.BetaBinomial(concentration1, concentration0, total_count).log_prob(
value
)
assert_allclose(actual, expected, rtol=0.02)
@pytest.mark.parametrize("total_count", [1, 2, 3, 10])
@pytest.mark.parametrize("batch_shape", [(1,), (3, 1), (2, 3, 1)])
def test_dirichlet_multinomial_log_prob(total_count, batch_shape):
event_shape = (3,)
concentration = np.exp(np.random.normal(size=batch_shape + event_shape))
# test on one-hots
value = total_count * jnp.eye(event_shape[-1]).reshape(
event_shape + (1,) * len(batch_shape) + event_shape
)
num_samples = 100000
probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (num_samples, 1))
log_probs = dist.Multinomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.DirichletMultinomial(concentration, total_count).log_prob(value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize("shape", [(1,), (3, 1), (2, 3, 1)])
def test_gamma_poisson_log_prob(shape):
gamma_conc = np.exp(np.random.normal(size=shape))
gamma_rate = np.exp(np.random.normal(size=shape))
value = jnp.arange(15)
num_samples = 300000
poisson_rate = np.random.gamma(
gamma_conc, 1 / gamma_rate, size=(num_samples,) + shape
)
log_probs = dist.Poisson(poisson_rate).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_log_prob_gradient(jax_dist, sp_dist, params):
if jax_dist in [dist.LKJ, dist.LKJCholesky]:
pytest.skip("we have separated tests for LKJCholesky distribution")
if jax_dist is _ImproperWrapper:
pytest.skip("no param for ImproperUniform to test for log_prob gradient")
rng_key = random.PRNGKey(0)
value = jax_dist(*params).sample(rng_key)
def fn(*args):
return jnp.sum(jax_dist(*args).log_prob(value))
eps = 1e-3
for i in range(len(params)):
if jax_dist is dist.EulerMaruyama and i == 1:
# skip taking grad w.r.t. sde_fn
continue
if jax_dist is _SparseCAR and i == 3:
# skip taking grad w.r.t. adj_matrix
continue
if isinstance(
params[i], dist.Distribution
): # skip taking grad w.r.t. base_dist
continue
if params[i] is None or jnp.result_type(params[i]) in (jnp.int32, jnp.int64):
continue
actual_grad = jax.grad(fn, i)(*params)
args_lhs = [p if j != i else p - eps for j, p in enumerate(params)]
args_rhs = [p if j != i else p + eps for j, p in enumerate(params)]
fn_lhs = fn(*args_lhs)
fn_rhs = fn(*args_rhs)
# finite diff approximation
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad) == jnp.shape(params[i])
if i == 0 and jax_dist is dist.Delta:
# grad w.r.t. `value` of Delta distribution will be 0
# but numerical value will give nan (= inf - inf)
expected_grad = 0.0
assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01, atol=0.01)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_mean_var(jax_dist, sp_dist, params):
if jax_dist is _ImproperWrapper:
pytest.skip("Improper distribution does not has mean/var implemented")
if jax_dist is FoldedNormal:
pytest.skip("Folded distribution does not has mean/var implemented")
if jax_dist is dist.EulerMaruyama:
pytest.skip("EulerMaruyama distribution does not has mean/var implemented")
if jax_dist is dist.RelaxedBernoulliLogits:
pytest.skip("RelaxedBernoulli distribution does not has mean/var implemented")
if "SineSkewed" in jax_dist.__name__:
pytest.skip("Skewed Distribution are not symmetric about location.")
if jax_dist in (
_TruncatedNormal,
_TruncatedCauchy,
dist.LeftTruncatedDistribution,
dist.RightTruncatedDistribution,
dist.TwoSidedTruncatedDistribution,
):
pytest.skip("Truncated distributions do not has mean/var implemented")
if jax_dist is dist.ProjectedNormal:
pytest.skip("Mean is defined in submanifold")
n = (
20000
if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.SineBivariateVonMises]
else 200000
)
d_jax = jax_dist(*params)
k = random.PRNGKey(0)
samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)
# check with suitable scipy implementation if available
# XXX: VonMises is already tested below
if (
sp_dist
and not _is_batched_multivariate(d_jax)
and jax_dist
not in [dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]
):
d_sp = sp_dist(*params)
try:
sp_mean = d_sp.mean()
except TypeError: # mvn does not have .mean() method
sp_mean = d_sp.mean
# for multivariate distns try .cov first
if d_jax.event_shape:
try:
sp_var = jnp.diag(d_sp.cov())
except TypeError: # mvn does not have .cov() method
sp_var = jnp.diag(d_sp.cov)
except AttributeError:
sp_var = d_sp.var()
else:
sp_var = d_sp.var()
assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-7)
assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-7)
if jnp.all(jnp.isfinite(sp_mean)):
assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05, atol=1e-2)
if jnp.all(jnp.isfinite(sp_var)):
assert_allclose(
jnp.std(samples, 0), jnp.sqrt(d_jax.variance), rtol=0.05, atol=1e-2
)
elif jax_dist in [dist.LKJ, dist.LKJCholesky]:
if jax_dist is dist.LKJCholesky:
corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))
else:
corr_samples = samples
dimension, concentration, _ = params
# marginal of off-diagonal entries
marginal = dist.Beta(
concentration + 0.5 * (dimension - 2), concentration + 0.5 * (dimension - 2)
)
# scale statistics due to linear mapping
marginal_mean = 2 * marginal.mean - 1
marginal_std = 2 * jnp.sqrt(marginal.variance)
expected_mean = jnp.broadcast_to(
jnp.reshape(marginal_mean, jnp.shape(marginal_mean) + (1, 1)),
jnp.shape(marginal_mean) + d_jax.event_shape,
)
expected_std = jnp.broadcast_to(
jnp.reshape(marginal_std, jnp.shape(marginal_std) + (1, 1)),
jnp.shape(marginal_std) + d_jax.event_shape,
)
# diagonal elements of correlation matrices are 1
expected_mean = expected_mean * (1 - jnp.identity(dimension)) + jnp.identity(
dimension
)
expected_std = expected_std * (1 - jnp.identity(dimension))
assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol=0.01)
assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)
elif jax_dist in [dist.VonMises]:
# circular mean = sample mean
assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=1e-2)
# circular variance
x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)
expected_variance = 1 - jnp.sqrt(x**2 + y**2)
assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=1e-2)
elif jax_dist in [dist.SineBivariateVonMises]:
phi_loc = _circ_mean(samples[..., 0])
psi_loc = _circ_mean(samples[..., 1])
assert_allclose(
d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1), rtol=0.05, atol=1e-2
)
elif jax_dist in [dist.MatrixNormal]:
sample_shape = (200_000,)
# use X ~ MN(loc, U, V) then vec(X) ~ MVN(vec(loc), kron(V, U))
if len(d_jax.batch_shape) > 0:
axes = [len(sample_shape) + i for i in range(len(d_jax.batch_shape))]
axes = tuple(axes)
samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))
subshape = samples_re.shape[: len(axes)]
ixi = product(*[range(k) for k in subshape])
for ix in ixi:
# mean
def get_min_shape(ix, batch_shape):
return min(ix, tuple(map(lambda x: x - 1, batch_shape)))
ix_loc = get_min_shape(ix, d_jax.loc.shape[: len(ix)])
jnp.allclose(
jnp.mean(samples_re[ix], 0),
jnp.squeeze(d_jax.mean[ix_loc]),
rtol=0.5,
atol=1e-2,
)
# cov
samples_mvn = jnp.squeeze(samples_re[ix]).reshape(
sample_shape + (-1,), order="F"
)
ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[: len(ix)])
ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[: len(ix)])
scale_tril = my_kron(
d_jax.scale_tril_column[ix_col],
d_jax.scale_tril_row[ix_row],
)
sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))
jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=1e-2)
else: # unbatched
# mean
jnp.allclose(
jnp.mean(samples, 0),
jnp.squeeze(d_jax.mean),
rtol=0.5,
atol=1e-2,
)
# cov
samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,), order="F")
scale_tril = my_kron(
jnp.squeeze(d_jax.scale_tril_column), jnp.squeeze(d_jax.scale_tril_row)
)
sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))
jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=1e-2)
else:
if jnp.all(jnp.isfinite(d_jax.mean)):
assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05, atol=1e-2)
if isinstance(d_jax, dist.CAR):
pytest.skip("CAR distribution does not have `variance` implemented.")
if isinstance(d_jax, dist.Gompertz):
pytest.skip("Gompertz distribution does not have `variance` implemented.")
if jnp.all(jnp.isfinite(d_jax.variance)):
assert_allclose(
jnp.std(samples, 0), jnp.sqrt(d_jax.variance), rtol=0.05, atol=1e-2
)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
@pytest.mark.parametrize("prepend_shape", [(), (2,), (2, 3)])
def test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):
if jax_dist in (
_TruncatedNormal,
_TruncatedCauchy,
_GaussianMixture,
_Gaussian2DMixture,
_GeneralMixture,
_General2DMixture,
):
pytest.skip(f"{jax_dist.__name__} is a function, not a class")
dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]
valid_params, oob_params = list(params), list(params)
key = random.PRNGKey(1)
dependent_constraint = False
for i in range(len(params)):
if (
jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky)
and dist_args[i] != "concentration"
):
continue
if "SineSkewed" in jax_dist.__name__ and dist_args[i] != "skewness":
continue
if jax_dist is dist.EulerMaruyama and dist_args[i] != "t":
continue
if (
jax_dist is dist.TwoSidedTruncatedDistribution
and dist_args[i] == "base_dist"
):
continue
if jax_dist is dist.GaussianRandomWalk and dist_args[i] == "num_steps":
continue
if (
jax_dist is dist.SineBivariateVonMises
and dist_args[i] == "weighted_correlation"
):
continue
if params[i] is None:
oob_params[i] = None
valid_params[i] = None
continue
constraint = jax_dist.arg_constraints[dist_args[i]]
if isinstance(constraint, constraints._Dependent):
dependent_constraint = True
break
key, key_gen = random.split(key)
oob_params[i] = gen_values_outside_bounds(
constraint, jnp.shape(params[i]), key_gen
)
valid_params[i] = gen_values_within_bounds(
constraint, jnp.shape(params[i]), key_gen
)
if jax_dist is dist.MultivariateStudentT:
# As mean is only defined for df > 1 & we instantiate
# scipy.stats.multivariate_t with same mean as jax_dist
# we need to ensure this is defined, so force df >= 1
valid_params[0] += 1
if jax_dist is dist.LogUniform:
# scipy.stats.loguniform take parameter a and b
# which is a > 0 and b > a.
# gen_values_within_bounds() generates just
# a > 0 and b > 0. Then, make b = a + b.
valid_params[1] += valid_params[0]
assert jax_dist(*oob_params)
# Invalid parameter values throw ValueError
if not dependent_constraint and (
jax_dist is not _ImproperWrapper and "SineSkewed" not in jax_dist.__name__
):
with pytest.raises(ValueError):
jax_dist(*oob_params, validate_args=True)
with pytest.raises(ValueError):
# test error raised under jit omnistaging
oob_params = jax.device_get(oob_params)
def dist_gen_fn():
d = jax_dist(*oob_params, validate_args=True)
return d
jax.jit(dist_gen_fn)()
d = jax_dist(*valid_params, validate_args=True)
# Test agreement of log density evaluation on randomly generated samples
# with scipy's implementation when available.
if (
sp_dist
and not _is_batched_multivariate(d)
and not (d.event_shape and prepend_shape)
):
valid_samples = gen_values_within_bounds(
d.support, size=prepend_shape + d.batch_shape + d.event_shape
)
try:
expected = sp_dist(*valid_params).logpdf(valid_samples)
except AttributeError:
expected = sp_dist(*valid_params).logpmf(valid_samples)
assert_allclose(d.log_prob(valid_samples), expected, atol=1e-5, rtol=1e-5)
# Out of support samples throw ValueError
oob_samples = gen_values_outside_bounds(
d.support, size=prepend_shape + d.batch_shape + d.event_shape
)
with pytest.warns(UserWarning, match="Out-of-support"):
d.log_prob(oob_samples)
with pytest.warns(UserWarning, match="Out-of-support"):
# test warning work under jit omnistaging
oob_samples = jax.device_get(oob_samples)
valid_params = jax.device_get(valid_params)
def log_prob_fn():
d = jax_dist(*valid_params, validate_args=True)
return d.log_prob(oob_samples)
jax.jit(log_prob_fn)()
def test_omnistaging_invalid_param():
def f(x):
return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)
with pytest.raises(ValueError, match="got invalid"):
jax.jit(f)(0)
def test_omnistaging_invalid_sample():
def f(x):
return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)
with pytest.warns(UserWarning, match="Out-of-support"):
jax.jit(f)(0)
def test_categorical_log_prob_grad():
data = jnp.repeat(jnp.arange(3), 10)
def f(x):
return (
dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(data).sum()
)
def g(x):
return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data).sum()
x = 0.5
fx, grad_fx = jax.value_and_grad(f)(x)
gx, grad_gx = jax.value_and_grad(g)(x)
assert_allclose(fx, gx, rtol=1e-6)
assert_allclose(grad_fx, grad_gx, atol=1e-4)
def test_beta_proportion_invalid_mean():
with dist.distribution.validation_enabled(), pytest.raises(
ValueError, match=r"^BetaProportion distribution got invalid mean parameter\.$"
):
dist.BetaProportion(1.0, 1.0)
########################################
# Tests for constraints and transforms #
########################################
@pytest.mark.parametrize(
"constraint, x, expected",
[
(constraints.boolean, np.array([True, False]), np.array([True, True])),
(constraints.boolean, np.array([1, 1]), np.array([True, True])),
(constraints.boolean, np.array([-1, 1]), np.array([False, True])),
(
constraints.corr_cholesky,
np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]),
np.array([True, False]),
), # NB: not lower_triangular
(
constraints.corr_cholesky,
np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, 0.5]]]),
np.array([False, False]),
), # NB: not positive_diagonal & not unit_norm_row
(
constraints.corr_matrix,
np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]),
np.array([True, False]),
), # NB: not lower_triangular
(
constraints.corr_matrix,
np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, 0.5]]]),
np.array([False, False]),
), # NB: not unit diagonal
(constraints.greater_than(1), 3, True),
(
constraints.greater_than(1),
np.array([-1, 1, 5]),
np.array([False, False, True]),
),
(constraints.integer_interval(-3, 5), 0, True),
(
constraints.integer_interval(-3, 5),
np.array([-5, -3, 0, 1.1, 5, 7]),
np.array([False, True, True, False, True, False]),
),
(constraints.interval(-3, 5), 0, True),
(
constraints.interval(-3, 5),
np.array([-5, -3, 0, 5, 7]),
np.array([False, True, True, True, False]),
),
(constraints.less_than(1), -2, True),
(
constraints.less_than(1),
np.array([-1, 1, 5]),
np.array([True, False, False]),
),
(constraints.lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True),
(
constraints.lower_cholesky,
np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]),
np.array([False, False]),
),
(constraints.nonnegative_integer, 3, True),
(
constraints.nonnegative_integer,
np.array([-1.0, 0.0, 5.0]),
np.array([False, True, True]),
),
(constraints.positive, 3, True),
(constraints.positive, np.array([-1, 0, 5]), np.array([False, False, True])),
(constraints.positive_definite, np.array([[1.0, 0.3], [0.3, 1.0]]), True),
(
constraints.positive_definite,
np.array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]),
np.array([False, False]),
),
(constraints.positive_integer, 3, True),
(
constraints.positive_integer,
np.array([-1.0, 0.0, 5.0]),
np.array([False, False, True]),
),
(constraints.real, -1, True),
(
constraints.real,
np.array([np.inf, -np.inf, np.nan, np.pi]),
np.array([False, False, False, True]),
),
(constraints.simplex, np.array([0.1, 0.3, 0.6]), True),
(
constraints.simplex,
np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),
np.array([True, False, False]),
),
(constraints.softplus_positive, 3, True),
(
constraints.softplus_positive,
np.array([-1, 0, 5]),
np.array([False, False, True]),
),
(
constraints.softplus_lower_cholesky,
np.array([[1.0, 0.0], [-2.0, 0.1]]),
True,
),
(
constraints.softplus_lower_cholesky,
np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]),
np.array([False, False]),
),
(constraints.unit_interval, 0.1, True),
(
constraints.unit_interval,
np.array([-5, 0, 0.5, 1, 7]),
np.array([False, True, True, True, False]),
),
(
constraints.sphere,
np.array([[1, 0, 0], [0.5, 0.5, 0]]),
np.array([True, False]),
),
(
constraints.open_interval(0.0, 1.0),
np.array([-5, 0, 0.5, 1, 7]),
np.array([False, False, True, False, False]),
),
],
)
def test_constraints(constraint, x, expected):
v = constraint.feasible_like(x)
if jnp.result_type(v) == "float32" or jnp.result_type(v) == "float64":
assert not constraint.is_discrete
assert_array_equal(constraint(x), expected)
feasible_value = constraint.feasible_like(x)
assert jnp.shape(feasible_value) == jnp.shape(x)
assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected), True))
try:
inverse = biject_to(constraint).inv(feasible_value)
except NotImplementedError:
pass
else:
assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-7)
@pytest.mark.parametrize(
"constraint",
[
constraints.corr_cholesky,
constraints.corr_matrix,
constraints.greater_than(2),
constraints.interval(-3, 5),
constraints.l1_ball,
constraints.less_than(1),
constraints.lower_cholesky,
constraints.scaled_unit_lower_cholesky,
constraints.ordered_vector,
constraints.positive,
constraints.positive_definite,
constraints.positive_ordered_vector,
constraints.real,
constraints.real_vector,
constraints.simplex,
constraints.softplus_positive,
constraints.softplus_lower_cholesky,
constraints.unit_interval,
constraints.open_interval(0.0, 1.0),
],
ids=lambda x: x.__class__,
)
@pytest.mark.parametrize("shape", [(), (1,), (3,), (6,), (3, 1), (1, 3), (5, 3)])
def test_biject_to(constraint, shape):
transform = biject_to(constraint)
event_dim = transform.domain.event_dim
if isinstance(constraint, constraints._Interval):
assert transform.codomain.upper_bound == constraint.upper_bound
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._GreaterThan):
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._LessThan):
assert transform.codomain.upper_bound == constraint.upper_bound
if len(shape) < event_dim:
return
rng_key = random.PRNGKey(0)
x = random.normal(rng_key, shape)
y = transform(x)
assert transform.forward_shape(x.shape) == y.shape
assert transform.inverse_shape(y.shape) == x.shape
# test inv work for NaN arrays:
x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))
assert x_nan.shape == x.shape
# test codomain
batch_shape = shape if event_dim == 0 else shape[:-1]
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=jnp.bool_))
# test inv
z = transform.inv(y)
assert_allclose(x, z, atol=1e-5, rtol=1e-5)
# test domain, currently all is constraints.real or constraints.real_vector
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
# test log_abs_det_jacobian
actual = transform.log_abs_det_jacobian(x, y)
assert jnp.shape(actual) == batch_shape
if len(shape) == event_dim:
if constraint is constraints.simplex:
expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)[:, :-1])[1]
elif constraint in [
constraints.real_vector,
constraints.ordered_vector,
constraints.positive_ordered_vector,
constraints.l1_ball,
]:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
elif constraint in [constraints.corr_cholesky, constraints.corr_matrix]:
vec_transform = lambda x: matrix_to_tril_vec( # noqa: E731
transform(x), diagonal=-1
)
y_tril = matrix_to_tril_vec(y, diagonal=-1)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y, diagonal=-1)
if constraint is constraints.corr_matrix:
# fill the upper triangular part
matrix = (
matrix
+ jnp.swapaxes(matrix, -2, -1)
+ jnp.identity(matrix.shape[-1])
)
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform)(y_tril))[1]
elif constraint in [
constraints.lower_cholesky,
constraints.scaled_unit_lower_cholesky,
constraints.positive_definite,
constraints.softplus_lower_cholesky,
]:
vec_transform = lambda x: matrix_to_tril_vec(transform(x)) # noqa: E731
y_tril = matrix_to_tril_vec(y)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y)
if constraint is constraints.positive_definite:
# fill the upper triangular part
matrix = (
matrix
+ jnp.swapaxes(matrix, -2, -1)
- jnp.diag(jnp.diag(matrix))
)
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform)(y_tril))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-5, rtol=1e-5)
assert_allclose(actual, -inv_expected, atol=1e-5, rtol=1e-5)
# NB: skip transforms which are tested in `test_biject_to`
@pytest.mark.parametrize(
"transform, event_shape",
[
(PermuteTransform(np.array([3, 0, 4, 1, 2])), (5,)),
(PowerTransform(2.0), ()),
(SoftplusTransform(), ()),
(
LowerCholeskyAffine(
np.array([1.0, 2.0]), np.array([[0.6, 0.0], [1.5, 0.4]])
),
(2,),
),
(
transforms.ComposeTransform(
[
biject_to(constraints.simplex),
SimplexToOrderedTransform(0.0),
biject_to(constraints.ordered_vector).inv,
]
),
(5,),
),
],
)
@pytest.mark.parametrize(
"batch_shape",
[
(),
(1,),
(3,),
(6,),
(3, 1),
(1, 3),
(5, 3),
],
)
def test_bijective_transforms(transform, event_shape, batch_shape):
shape = batch_shape + event_shape
rng_key = random.PRNGKey(0)
x = biject_to(transform.domain)(random.normal(rng_key, shape))
y = transform(x)
# test codomain
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))
# test inv
z = transform.inv(y)
assert_allclose(x, z, atol=1e-6, rtol=1e-4)
assert transform.inv.inv is transform
assert transform.inv is transform.inv
assert transform.domain is transform.inv.codomain
assert transform.codomain is transform.inv.domain
# test domain
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
# test log_abs_det_jacobian
actual = transform.log_abs_det_jacobian(x, y)
assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))
assert jnp.shape(actual) == batch_shape
if len(shape) == transform.domain.event_dim:
if len(event_shape) == 1:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-6)
assert_allclose(actual, -inv_expected, atol=1e-6)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
def test_composed_transform(batch_shape):
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
t = transforms.ComposeTransform([t1, t2, t1])
assert t.domain.event_dim == 1
assert t.codomain.event_dim == 2
x = np.random.normal(size=batch_shape + (6,))
y = t(x)
log_det = t.log_abs_det_jacobian(x, y)
assert log_det.shape == batch_shape
expected_log_det = (
jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2) + jnp.log(2) * 9
)
assert_allclose(log_det, expected_log_det)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
def test_composed_transform_1(batch_shape):
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
t = transforms.ComposeTransform([t1, t2, t2])
assert t.domain.event_dim == 1
assert t.codomain.event_dim == 3
x = np.random.normal(size=batch_shape + (6,))
y = t(x)
log_det = t.log_abs_det_jacobian(x, y)
assert log_det.shape == batch_shape
z = t2(x * 2)
expected_log_det = (
jnp.log(2) * 6
+ t2.log_abs_det_jacobian(x * 2, z)
+ t2.log_abs_det_jacobian(z, t2(z)).sum(-1)
)
assert_allclose(log_det, expected_log_det)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
def test_simplex_to_order_transform(batch_shape):
simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()
simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)
transform = SimplexToOrderedTransform()
out = transform(simplex)
assert out.shape == transform.forward_shape(simplex.shape)
assert simplex.shape == transform.inverse_shape(out.shape)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
@pytest.mark.parametrize("prepend_event_shape", [(), (4,)])
@pytest.mark.parametrize("sample_shape", [(), (7,)])
def test_transformed_distribution(batch_shape, prepend_event_shape, sample_shape):
base_dist = (
dist.Normal(0, 1)
.expand(batch_shape + prepend_event_shape + (6,))
.to_event(1 + len(prepend_event_shape))
)
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
d = dist.TransformedDistribution(base_dist, [t1, t2, t1])
assert d.event_dim == 2 + len(prepend_event_shape)
y = d.sample(random.PRNGKey(0), sample_shape)
t = transforms.ComposeTransform([t1, t2, t1])
x = t.inv(y)
assert x.shape == sample_shape + base_dist.shape()
log_prob = d.log_prob(y)
assert log_prob.shape == sample_shape + batch_shape
t_log_det = t.log_abs_det_jacobian(x, y)
if prepend_event_shape:
t_log_det = t_log_det.sum(-1)
expected_log_prob = base_dist.log_prob(x) - t_log_det
assert_allclose(log_prob, expected_log_prob, atol=1e-5)
@pytest.mark.parametrize(
"transformed_dist",
[
dist.TransformedDistribution(
dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()
),
dist.TransformedDistribution(
dist.Exponential(jnp.ones(2)),
[
transforms.PowerTransform(0.7),
transforms.AffineTransform(0.0, jnp.ones(2) * 3),
],
),
],
)
def test_transformed_distribution_intermediates(transformed_dist):
sample, intermediates = transformed_dist.sample_with_intermediates(
random.PRNGKey(1)
)
assert_allclose(
transformed_dist.log_prob(sample, intermediates),
transformed_dist.log_prob(sample),
)
def test_transformed_transformed_distribution():
loc, scale = -2, 3
dist1 = dist.TransformedDistribution(
dist.Normal(2, 3), transforms.PowerTransform(2.0)
)
dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(-2, 3))
assert isinstance(dist2.base_dist, dist.Normal)
assert len(dist2.transforms) == 2
assert isinstance(dist2.transforms[0], transforms.PowerTransform)
assert isinstance(dist2.transforms[1], transforms.AffineTransform)
rng_key = random.PRNGKey(0)
assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))
intermediates = dist2.sample_with_intermediates(rng_key)
assert len(intermediates) == 2
def _make_iaf(input_dim, hidden_dims, rng_key):
arn_init, arn = AutoregressiveNN(input_dim, hidden_dims, param_dims=[1, 1])
_, init_params = arn_init(rng_key, (input_dim,))
return InverseAutoregressiveTransform(partial(arn, init_params))
@pytest.mark.parametrize(
"ts",
[
[transforms.PowerTransform(0.7), transforms.AffineTransform(2.0, 3.0)],
[transforms.ExpTransform()],
[
transforms.ComposeTransform(
[transforms.AffineTransform(-2, 3), transforms.ExpTransform()]
),
transforms.PowerTransform(3.0),
],
[
_make_iaf(5, hidden_dims=[10], rng_key=random.PRNGKey(0)),
transforms.PermuteTransform(jnp.arange(5)[::-1]),
_make_iaf(5, hidden_dims=[10], rng_key=random.PRNGKey(1)),
],
],
)
def test_compose_transform_with_intermediates(ts):
transform = transforms.ComposeTransform(ts)
x = random.normal(random.PRNGKey(2), (7, 5))
y, intermediates = transform.call_with_intermediates(x)
logdet = transform.log_abs_det_jacobian(x, y, intermediates)
assert_allclose(y, transform(x))
assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))
@pytest.mark.parametrize("x_dim, y_dim", [(3, 3), (3, 4)])
def test_unpack_transform(x_dim, y_dim):
xy = np.random.randn(x_dim + y_dim)
unpack_fn = lambda xy: {"x": xy[:x_dim], "y": xy[x_dim:]} # noqa: E731
transform = transforms.UnpackTransform(unpack_fn)
z = transform(xy)
if x_dim == y_dim:
with pytest.warns(UserWarning, match="UnpackTransform.inv"):
t = transform.inv(z)
else:
t = transform.inv(z)
assert_allclose(t, xy)
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS)
def test_generated_sample_distribution(
jax_dist, sp_dist, params, N_sample=100_000, key=random.PRNGKey(11)
):
"""On samplers that we do not get directly from JAX, (e.g. we only get
Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test
agreement in the empirical distribution of generated samples between our
samplers and those from SciPy.
"""
if jax_dist not in [dist.Gumbel]:
pytest.skip(
"{} sampling method taken from upstream, no need to"
"test generated samples.".format(jax_dist.__name__)
)
jax_dist = jax_dist(*params)
if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:
our_samples = jax_dist.sample(key, (N_sample,))
ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)
assert ks_result.pvalue > 0.05
@pytest.mark.parametrize(
"jax_dist, params, support",
[
(dist.BernoulliLogits, (5.0,), jnp.arange(2)),
(dist.BernoulliProbs, (0.5,), jnp.arange(2)),
(dist.BinomialLogits, (4.5, 10), jnp.arange(11)),
(dist.BinomialProbs, (0.5, 11), jnp.arange(12)),
(dist.BetaBinomial, (2.0, 0.5, 12), jnp.arange(13)),
(dist.CategoricalLogits, (np.array([3.0, 4.0, 5.0]),), jnp.arange(3)),
(dist.CategoricalProbs, (np.array([0.1, 0.5, 0.4]),), jnp.arange(3)),
],
)
@pytest.mark.parametrize("batch_shape", [(5,), ()])
@pytest.mark.parametrize("expand", [False, True])
def test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand):
p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))
actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)
expected = support.reshape((-1,) + (1,) * len(batch_shape))
if expand:
expected = jnp.broadcast_to(expected, support.shape + batch_shape)
assert_allclose(actual, expected)
def test_zero_inflated_enumerate_support():
base_dist = dist.Bernoulli(0.5)
d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)
assert d.has_enumerate_support
assert_allclose(d.enumerate_support(), base_dist.enumerate_support())
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS + DISCRETE)
@pytest.mark.parametrize("prepend_shape", [(), (2, 3)])
@pytest.mark.parametrize("sample_shape", [(), (4,)])
def test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):
jax_dist = jax_dist(*params)
new_batch_shape = prepend_shape + jax_dist.batch_shape
expanded_dist = jax_dist.expand(new_batch_shape)
rng_key = random.PRNGKey(0)
samples = expanded_dist.sample(rng_key, sample_shape)
assert expanded_dist.batch_shape == new_batch_shape
assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape
assert expanded_dist.log_prob(samples).shape == sample_shape + new_batch_shape
# test expand of expand
assert (
expanded_dist.expand((3,) + new_batch_shape).batch_shape
== (3,) + new_batch_shape
)
# test expand error
if prepend_shape:
with pytest.raises(ValueError, match="Cannot broadcast distribution of shape"):
assert expanded_dist.expand((3,) + jax_dist.batch_shape)
@pytest.mark.parametrize("base_shape", [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1, 5)])
@pytest.mark.parametrize("event_dim", [0, 1, 2, 3])
@pytest.mark.parametrize("sample_shape", [(1000,), (1000, 7, 1), (1000, 1, 7)])
def test_expand_shuffle_regression(base_shape, event_dim, sample_shape):
expand_shape = (2, 3, 5)
event_dim = min(event_dim, len(base_shape))
loc = random.normal(random.PRNGKey(0), base_shape) * 10
base_dist = dist.Normal(loc, 0.1).to_event(event_dim)
expanded_dist = base_dist.expand(expand_shape[: len(expand_shape) - event_dim])
samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)
expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.shape())
assert_allclose(samples.mean(0), expected_mean, atol=0.1)
@pytest.mark.parametrize("batch_shape", [(), (4,), (10, 3)])
def test_sine_bivariate_von_mises_batch_shape(batch_shape):
phi_loc = jnp.broadcast_to(jnp.array(0.0), batch_shape)
psi_loc = jnp.array(0.0)
phi_conc = jnp.array(1.0)
psi_conc = jnp.array(1.0)
corr = jnp.array(0.1)
sine = SineBivariateVonMises(phi_loc, psi_loc, phi_conc, psi_conc, corr)
assert sine.batch_shape == batch_shape
samples = sine.sample(random.PRNGKey(0))
assert samples.shape == (*batch_shape, 2)
def test_sine_bivariate_von_mises_sample_mean():
loc = jnp.array([[2.0, -1.0], [-2, 1.0]])
sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)
samples = sine.sample(random.PRNGKey(0), (5000,))
assert_allclose(_circ_mean(samples).T, loc, rtol=5e-3)
@pytest.mark.parametrize("batch_shape", [(), (4,)])
def test_polya_gamma(batch_shape, num_points=20000):
d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)
rng_key = random.PRNGKey(0)
# test density approximately normalized
x = jnp.linspace(1.0e-6, d.truncation_point, num_points)
prob = (d.truncation_point / num_points) * jnp.exp(
logsumexp(d.log_prob(x), axis=-1)
)
assert_allclose(prob, jnp.ones(batch_shape), rtol=1.0e-4)
# test mean of approximate sampler
z = d.sample(rng_key, sample_shape=(3000,))
mean = jnp.mean(z, axis=-1)
assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)
@pytest.mark.parametrize(
"extra_event_dims,expand_shape",
[(0, (4, 3, 2, 1)), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))],
)
def test_expand_reshaped_distribution(extra_event_dims, expand_shape):
loc = jnp.zeros((1, 6))
scale_tril = jnp.eye(6)
d = dist.MultivariateNormal(loc, scale_tril=scale_tril)
full_shape = (4, 1, 1, 1, 6)
reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)
cut = 4 - extra_event_dims
batch_shape, event_shape = full_shape[:cut], full_shape[cut:]
assert reshaped_dist.batch_shape == batch_shape
assert reshaped_dist.event_shape == event_shape
large = reshaped_dist.expand(expand_shape)
assert large.batch_shape == expand_shape
assert large.event_shape == event_shape
# Throws error when batch shape cannot be broadcasted
with pytest.raises((RuntimeError, ValueError)):
reshaped_dist.expand(expand_shape + (3,))
# Throws error when trying to shrink existing batch shape
with pytest.raises((RuntimeError, ValueError)):
large.expand(expand_shape[1:])
@pytest.mark.parametrize(
"batch_shape, mask_shape",
[((), ()), ((2,), ()), ((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))],
)
@pytest.mark.parametrize("event_shape", [(), (3,)])
def test_mask(batch_shape, event_shape, mask_shape):
jax_dist = (
dist.Normal().expand(batch_shape + event_shape).to_event(len(event_shape))
)
mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)
if mask_shape == ():
mask = bool(mask)
samples = jax_dist.sample(random.PRNGKey(1))
actual = jax_dist.mask(mask).log_prob(samples)
assert_allclose(
actual != 0,
jnp.broadcast_to(mask, lax.broadcast_shapes(batch_shape, mask_shape)),
)
@pytest.mark.parametrize("event_shape", [(), (4,), (2, 4)])
def test_mask_grad(event_shape):
def f(x, data):
base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()
mask = jnp.all(
jnp.isfinite(data), tuple(-i - 1 for i in range(len(event_shape)))
)
log_prob = base_dist.mask(mask).log_prob(data)
assert log_prob.shape == data.shape[: len(data.shape) - len(event_shape)]
return log_prob.sum()
data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])
log_prob, grad = jax.value_and_grad(f)(1.0, data)
assert jnp.isfinite(grad) and jnp.isfinite(log_prob)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_dist_pytree(jax_dist, sp_dist, params):
def f(x):
return jax_dist(*params)
if jax_dist is _ImproperWrapper:
pytest.skip("Cannot flattening ImproperUniform")
if jax_dist is dist.EulerMaruyama:
pytest.skip("EulerMaruyama doesn't define flatten/unflatten")
jax.jit(f)(0) # this test for flatten/unflatten
lax.map(f, np.ones(3)) # this test for compatibility w.r.t. scan
# Test that parameters do not change after flattening.
expected_dist = f(0)
actual_dist = jax.jit(f)(0)
expected_sample = expected_dist.sample(random.PRNGKey(0))
actual_sample = actual_dist.sample(random.PRNGKey(0))
expected_log_prob = expected_dist.log_prob(expected_sample)
actual_log_prob = actual_dist.log_prob(actual_sample)
assert_allclose(actual_sample, expected_sample, rtol=1e-6)
assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-6)
@pytest.mark.parametrize(
"method, arg", [("to_event", 1), ("mask", False), ("expand", [5])]
)
def test_special_dist_pytree(method, arg):
def f(x):
d = dist.Normal(np.zeros(1), np.ones(1))
return getattr(d, method)(arg)
jax.jit(f)(0)
lax.map(f, np.ones(3))
def test_expand_no_unnecessary_batch_shape_expansion():
# ExpandedDistribution can mutate the `batch_shape` of
# its base distribution in order to make ExpandedDistribution
# mappable, see #684. However, this mutation should not take
# place if no mapping operation is performed.
for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):
# Low level test: ensure that (tree_flatten o tree_unflatten)(expanded_dist)
# amounts to an identity operation.
d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])
roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])
assert d.batch_shape == roundtripped_d.batch_shape
assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape
assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)
# High-level test: `jax.jit`ting a function returning an ExpandedDistribution
# (which involves an instance of the low-level case as it will transform
# the original function by adding some flattening and unflattening steps)
# should return same object as its non-jitted equivalent.
def bs(arg):
return dist.Normal(arg, arg).expand([10, 3, *arg.shape])
d = bs(arg)
dj = jax.jit(bs)(arg)
assert isinstance(d, dist.ExpandedDistribution)
assert isinstance(dj, dist.ExpandedDistribution)
assert d.batch_shape == dj.batch_shape
assert d.base_dist.batch_shape == dj.base_dist.batch_shape
assert d.base_dist.event_shape == dj.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)
@pytest.mark.parametrize("batch_shape", [(), (4,), (2, 3)], ids=str)
def test_kl_delta_normal_shape(batch_shape):
v = np.random.normal(size=batch_shape)
loc = np.random.normal(size=batch_shape)
scale = np.exp(np.random.normal(size=batch_shape))
p = dist.Delta(v)
q = dist.Normal(loc, scale)
assert kl_divergence(p, q).shape == batch_shape
def test_kl_delta_normal():
v = np.random.normal()
loc = np.random.normal()
scale = np.exp(np.random.normal())
p = dist.Delta(v, 10.0)
q = dist.Normal(loc, scale)
assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))
@pytest.mark.parametrize("batch_shape", [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize("event_shape", [(), (4,), (2, 3)], ids=str)
def test_kl_independent_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(size=shape)))
q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(size=shape)))
actual = kl_divergence(
dist.Independent(p, len(event_shape)), dist.Independent(q, len(event_shape))
)
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
@pytest.mark.parametrize("batch_shape", [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize("event_shape", [(), (4,), (2, 3)], ids=str)
def test_kl_expanded_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(shape)
q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(shape)
actual = kl_divergence(
dist.Independent(p, len(event_shape)), dist.Independent(q, len(event_shape))
)
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
@pytest.mark.parametrize("shape", [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize(
"p_dist, q_dist",
[
(dist.Beta, dist.Beta),
(dist.Gamma, dist.Gamma),
(dist.Kumaraswamy, dist.Beta),
(dist.Normal, dist.Normal),
(dist.Weibull, dist.Gamma),
],
)
def test_kl_univariate(shape, p_dist, q_dist):
def make_dist(dist_class):
params = {}
for k, c in dist_class.arg_constraints.items():
if c is constraints.real:
params[k] = np.random.normal(size=shape)
elif c is constraints.positive:
params[k] = np.exp(np.random.normal(size=shape))
else:
raise ValueError(f"Missing pattern for param {k}.")
d = dist_class(**params)
if dist_class is dist.Kumaraswamy:
d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000
return d
p = make_dist(p_dist)
q = make_dist(q_dist)
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10000,)).copy()
expected = jnp.mean((p.log_prob(x) - q.log_prob(x)), 0)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize("shape", [(4,), (2, 3)], ids=str)
def test_kl_dirichlet_dirichlet(shape):
p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10_000,)).copy()
expected = jnp.mean((p.log_prob(x) - q.log_prob(x)), 0)
assert_allclose(actual, expected, rtol=0.05)
def test_vmapped_binomial_p0():
# test that vmapped binomial with p = 0 does not have an infinite loop
def sample_binomial_withp0(key):
n = 2 * (random.uniform(key) > 0.5)
_, key = random.split(key)
return dist.Binomial(total_count=n, probs=0).sample(key)
jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))
def _get_vmappable_dist_init_params(jax_dist):
if jax_dist.__name__ == ("_TruncatedCauchy"):
return [2, 3]
elif jax_dist.__name__ == ("_TruncatedNormal"):
return [2, 3]
elif issubclass(jax_dist, dist.Distribution):
init_parameters = list(inspect.signature(jax_dist.__init__).parameters.keys())[
1:
]
vmap_over_parameters = list(
inspect.signature(vmap_over.dispatch(jax_dist)).parameters.keys()
)[1:]
return list(
[
i
for i, name in enumerate(init_parameters)
if name in vmap_over_parameters
]
)
else:
raise ValueError
def _allclose_or_equal(a1, a2):
if isinstance(a1, np.ndarray):
return np.allclose(a2, a1)
elif isinstance(a1, jnp.ndarray):
return jnp.allclose(a2, a1)
elif isinstance(a1, csr_matrix):
return np.allclose(a2.todense(), a1.todense())
else:
return a2 == a1 or a2 is a1
def _tree_equal(t1, t2):
t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)
return jnp.all(jax.flatten_util.ravel_pytree(t)[0])
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_vmap_dist(jax_dist, sp_dist, params):
param_names = list(inspect.signature(jax_dist).parameters.keys())
vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)
vmappable_param_idxs = vmappable_param_idxs[: len(params)]
if len(vmappable_param_idxs) == 0:
return
def make_jax_dist(*params):
return jax_dist(*params)
def sample(d: dist.Distribution):
return d.sample(random.PRNGKey(0))
d = make_jax_dist(*params)
if isinstance(d, _SparseCAR) and d.is_sparse:
# In this case, since csr arrays are not jittable,
# _SparseCAR has a csr_matrix as part of its pytree
# definition (not as a pytree leaf). This causes pytree
# operations like tree_map to fail, since these functions
# compare the pytree def of each of the arguments using ==
# which is ambiguous for array-like objects.
return
in_out_axes_cases = [
# vmap over all args
(
tuple(0 if i in vmappable_param_idxs else None for i in range(len(params))),
0,
),
# vmap over a single arg, out over all attributes of a distribution
*(
([0 if i == idx else None for i in range(len(params))], 0)
for idx in vmappable_param_idxs
if params[idx] is not None
),
# vmap over a single arg, out over the associated attribute of the distribution
*(
(
[0 if i == idx else None for i in range(len(params))],
vmap_over(d, **{param_names[idx]: 0}),
)
for idx in vmappable_param_idxs
if params[idx] is not None
),
# vmap over a single arg, axis=1, (out single attribute, axis=1)
*(
(
[1 if i == idx else None for i in range(len(params))],
vmap_over(d, **{param_names[idx]: 1}),
)
for idx in vmappable_param_idxs
if isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).ndim > 0
# skip this distribution because _GeneralMixture.__init__ turns
# 1d inputs into 0d attributes, thus breaks the expectations of
# the vmapping test case where in_axes=1, only done for rank>=1 tensors.
and jax_dist is not _GeneralMixture
),
]
for in_axes, out_axes in in_out_axes_cases:
batched_params = [
jax.tree_map(lambda x: jnp.expand_dims(x, ax), arg)
if isinstance(ax, int)
else arg
for arg, ax in zip(params, in_axes)
]
# Recreate the jax_dist to avoid side effects coming from `d.sample`
# triggering lazy_property computations, which, in a few cases, break
# vmap_over's expectations regarding existing attributes to be vmapped.
d = make_jax_dist(*params)
batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes)(
*batched_params
)
eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(
batched_d, d
)
assert eq == jnp.array([True])
samples_dist = sample(d)
samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)
assert samples_batched_dist.shape == (1, *samples_dist.shape)
def test_multinomial_abstract_total_count():
probs = jnp.array([0.2, 0.5, 0.3])
key = random.PRNGKey(0)
def f(x):
total_count = x.sum(-1)
return dist.Multinomial(total_count, probs=probs, total_count_max=10).sample(
key
)
x = dist.Multinomial(10, probs).sample(key)
y = jax.jit(f)(x)
assert_allclose(x, y, rtol=1e-6)
def test_normal_log_cdf():
# test if log_cdf method agrees with jax.scipy.stats.norm.logcdf
# and if exp(log_cdf) agrees with cdf
loc = jnp.array([[0.0, -10.0, 20.0]])
scale = jnp.array([[1, 5, 7]])
values = jnp.linspace(-5, 5, 100).reshape(-1, 1)
numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)
numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)
jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)
assert_allclose(numpyro_log_cdf, jax_log_cdf)
assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-6)
@pytest.mark.parametrize(
"value",
[
-15.0,
jnp.array([[-15.0], [-10.0], [-5.0]]),
jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]]),
],
)
def test_truncated_normal_log_prob_in_tail(value):
# define set of distributions truncated in tail of distribution
loc = 1.35
scale = jnp.geomspace(0.01, 1, 10)
low, high = (-20, -1.0)
a, b = (low - loc) / scale, (high - loc) / scale # rescale for jax input
numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high).log_prob(
value
)
jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)
assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)
def test_sample_truncated_normal_in_tail():
# test, if samples from distributions truncated in
# tail of distribution returns any inf's
tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)
samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10_000,))
assert ~jnp.isinf(samples).any()
@jax.enable_custom_prng()
def test_jax_custom_prng():
samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))
assert ~jnp.isinf(samples).any()
|
flexible
|
{
"blob_id": "c5e7fdcbd4a9281597a35a180f2853caac68f811",
"index": 7562,
"step-1": "<mask token>\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\n<mask token>\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\n<mask token>\n\n\nclass T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):\n\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\n<mask token>\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < '1.6.0':\n pytest.skip(\n 'Multivariate Student-T distribution is not available in scipy < 1.6'\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\n<mask token>\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\n<mask token>\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,\n math.pi])\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0]), np.array([1.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n<mask token>\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(self:\n SineSkewedVonMisesBatched, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc,\n scale=scale)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix\n =covariance_matrix)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,\n scale_ in zip(locs, scales)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in\n self.component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [dist.MultivariateNormal(loc=loc_,\n covariance_matrix=covariance_matrix) for loc_,\n covariance_matrix in zip(locs, covariance_matrices)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [vmap_over(d, loc=locs) for d in self.\n component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {'rate': constraints.positive, 'gate_logits':\n constraints.real}\n pytree_data_fields = 'rate',\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=\n validate_args)\n\n\n<mask token>\n\n\nclass SparsePoisson(dist.Poisson):\n\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=\n vmap_over(self.base_dist, loc=loc, scale=scale))\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = ['loc', 'correlation', 'conditional_precision']\n\n def __init__(self, loc, correlation, conditional_precision, adj_matrix,\n *, is_sparse=True, validate_args=None):\n super().__init__(loc, correlation, conditional_precision,\n adj_matrix, is_sparse=True, validate_args=validate_args)\n\n\n<mask token>\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\n<mask token>\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\n@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.\n event_shape)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = 1 if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\n \"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.\n MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.\n covariance_matrix), rtol=1e-06)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, 'shape', ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist\n ).infer_shapes(*shapes)\n except NotImplementedError:\n pytest.skip(\n f'{type(jax_dist).__name__}.infer_shapes() is not implemented')\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(\n constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.\n reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [\n 'concentration1', 'concentration0'], 'BetaProportion': ['mean',\n 'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],\n 'InverseGamma': ['concentration'], 'LKJ': ['concentration'],\n 'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.\n __name__, [])\n dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1\n :] if inspect.isclass(jax_dist) else inspect.getfullargspec(\n jax_dist)[0])]\n params_dict = dict(zip(dist_args[:len(params)], params))\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [p for p in jax_class.reparametrized_params if \n p not in gamma_derived_params]\n if not reparametrized_params:\n pytest.skip('{} not reparametrized.'.format(jax_class.__name__))\n nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in\n reparametrized_params}\n repara_params = tuple(v for k, v in params_dict.items() if k in\n reparametrized_params)\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).\n sample(key=rng_key))\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n eps = 0.001\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(\n repara_params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(\n repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,\n atol=0.03)\n\n\n@pytest.mark.parametrize('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.\n Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,\n (0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,\n (5.0, 2.0, 4.0))])\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z ** 2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean ** 2\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\n@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])\n@pytest.mark.parametrize('jit', [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples\n ).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (dist.LeftTruncatedDistribution, dist.\n RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n loc, scale, low, high = params[0].loc, params[0].scale, params[\n 1], params[2]\n else:\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -\n sp_dist.cdf(low))\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,\n atol=1e-05)\n return\n pytest.skip('no corresponding scipy distn.')\n if _is_batched_multivariate(jax_dist):\n pytest.skip('batching not allowed in multivariate distns.')\n if jax_dist.event_shape and prepend_shape:\n pytest.skip(\n 'batched samples cannot be scored by multivariate distributions.')\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e\n ):\n samples = jax.device_get(samples).astype('float64')\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist\n .Normal(0, 1).expand([2]))\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.\n Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])\n@pytest.mark.filterwarnings('ignore:overflow encountered:RuntimeWarning')\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\n 'skip testing cdf/icdf methods of multivariate distributions')\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05\n if d.shape() == () and not d.is_discrete:\n assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.\n log_prob(samples)), atol=1e-05, rtol=rtol)\n assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(\n -d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,\n rtol=1e-05)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)\n except NotImplementedError:\n pass\n if not sp_dist:\n pytest.skip('no corresponding scipy distn.')\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)\n except NotImplementedError:\n pass\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('dimension', [2, 3, 5])\n@pytest.mark.parametrize('concentration', [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(\n partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky\n ).log_abs_det_jacobian(unconstrained, sample)\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = (beta_log_prob - affine_logdet -\n signed_stick_breaking_logdet)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06\n )\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(0, 20, (1000, 100))\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('total_count', [1, 2, 3, 10])\n@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(\n num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.BetaBinomial(concentration1, concentration0, total_count\n ).log_prob(value)\n assert_allclose(actual, expected, rtol=0.02)\n\n\n@pytest.mark.parametrize('total_count', [1, 2, 3, 10])\n@pytest.mark.parametrize('batch_shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = 3,\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1\n ,) * len(batch_shape) + event_shape)\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (\n num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(\n value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n num_samples = 300000\n poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(\n num_samples,) + shape)\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip('we have separated tests for LKJCholesky distribution')\n if jax_dist is _ImproperWrapper:\n pytest.skip(\n 'no param for ImproperUniform to test for log_prob gradient')\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n eps = 0.001\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n continue\n if jax_dist is _SparseCAR and i == 3:\n continue\n if isinstance(params[i], dist.Distribution):\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,\n jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,\n atol=0.01)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\n@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,\n _Gaussian2DMixture, _GeneralMixture, _General2DMixture):\n pytest.skip(f'{jax_dist.__name__} is a function, not a class')\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky\n ) and dist_args[i] != 'concentration':\n continue\n if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':\n continue\n if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i\n ] == 'base_dist':\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':\n continue\n if jax_dist is dist.SineBivariateVonMises and dist_args[i\n ] == 'weighted_correlation':\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n if jax_dist is dist.MultivariateStudentT:\n valid_params[0] += 1\n if jax_dist is dist.LogUniform:\n valid_params[1] += valid_params[0]\n assert jax_dist(*oob_params)\n if not dependent_constraint and (jax_dist is not _ImproperWrapper and \n 'SineSkewed' not in jax_dist.__name__):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n with pytest.raises(ValueError):\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n jax.jit(dist_gen_fn)()\n d = jax_dist(*valid_params, validate_args=True)\n if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and\n prepend_shape):\n valid_samples = gen_values_within_bounds(d.support, size=\n prepend_shape + d.batch_shape + d.event_shape)\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,\n rtol=1e-05)\n oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +\n d.batch_shape + d.event_shape)\n with pytest.warns(UserWarning, match='Out-of-support'):\n d.log_prob(oob_samples)\n with pytest.warns(UserWarning, match='Out-of-support'):\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n with pytest.raises(ValueError, match='got invalid'):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n with pytest.warns(UserWarning, match='Out-of-support'):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(\n data).sum()\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data\n ).sum()\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-06)\n assert_allclose(grad_fx, grad_gx, atol=0.0001)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(ValueError,\n match='^BetaProportion distribution got invalid mean parameter\\\\.$'):\n dist.BetaProportion(1.0, 1.0)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('constraint', [constraints.corr_cholesky,\n constraints.corr_matrix, constraints.greater_than(2), constraints.\n interval(-3, 5), constraints.l1_ball, constraints.less_than(1),\n constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector, constraints.positive, constraints.\n positive_definite, constraints.positive_ordered_vector, constraints.\n real, constraints.real_vector, constraints.simplex, constraints.\n softplus_positive, constraints.softplus_lower_cholesky, constraints.\n unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.\n __class__)\n@pytest.mark.parametrize('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,\n 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=\n jnp.bool_))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-05, rtol=1e-05)\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)\n [:, :-1])[1]\n elif constraint in [constraints.real_vector, constraints.\n ordered_vector, constraints.positive_ordered_vector,\n constraints.l1_ball]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x),\n diagonal=-1)\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1\n ) + jnp.identity(matrix.shape[-1])\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n elif constraint in [constraints.lower_cholesky, constraints.\n scaled_unit_lower_cholesky, constraints.positive_definite,\n constraints.softplus_lower_cholesky]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x))\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(\n jnp.diag(matrix))\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)\n assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)\n\n\n@pytest.mark.parametrize('transform, event_shape', [(PermuteTransform(np.\n array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (\n SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np\n .array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(\n [biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv]), (5,))])\n@pytest.mark.parametrize('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1, \n 3), (5, 3)])\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-06, rtol=0.0001)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-06)\n assert_allclose(actual, -inv_expected, atol=1e-06)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('batch_shape', [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z\n ) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n assert_allclose(log_det, expected_log_det)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('transformed_dist', [dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),\n dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms\n .PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(random\n .PRNGKey(1))\n assert_allclose(transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample))\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=\n 100000, key=random.PRNGKey(11)):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n '{} sampling method taken from upstream, no need totest generated samples.'\n .format(jax_dist.__name__))\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)\n@pytest.mark.parametrize('prepend_shape', [(), (2, 3)])\n@pytest.mark.parametrize('sample_shape', [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples\n ).shape == sample_shape + new_batch_shape\n assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,\n ) + new_batch_shape\n if prepend_shape:\n with pytest.raises(ValueError, match=\n 'Cannot broadcast distribution of shape'):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)\n ), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = 4, 1, 1, 1, 6\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('event_shape', [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(\n event_shape))))\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)\n ]\n return log_prob.sum()\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\n<mask token>\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)\n@pytest.mark.parametrize('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('shape', [(), (4,), (2, 3)], ids=str)\n@pytest.mark.parametrize('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.\n Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.\n Normal), (dist.Weibull, dist.Gamma)])\ndef test_kl_univariate(shape, p_dist, q_dist):\n\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f'Missing pattern for param {k}.')\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize('shape', [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\n<mask token>\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[:len(params)]\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n d = make_jax_dist(*params)\n if isinstance(d, _SparseCAR) and d.is_sparse:\n return\n in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for\n i in range(len(params))), 0), *(([(0 if i == idx else None) for i in\n range(len(params))], 0) for idx in vmappable_param_idxs if params[\n idx] is not None), *(([(0 if i == idx else None) for i in range(len\n (params))], vmap_over(d, **{param_names[idx]: 0})) for idx in\n vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==\n idx else None) for i in range(len(params))], vmap_over(d, **{\n param_names[idx]: 1})) for idx in vmappable_param_idxs if \n isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).\n ndim > 0 and jax_dist is not _GeneralMixture)]\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),\n arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,\n in_axes)]\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes\n )(*batched_params)\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d)\n assert eq == jnp.array([True])\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10\n ).sample(key)\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-06)\n\n\ndef test_normal_log_cdf():\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)\n\n\n<mask token>\n\n\ndef test_sample_truncated_normal_in_tail():\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))\n assert ~jnp.isinf(samples).any()\n\n\n@jax.enable_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-2": "<mask token>\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\n<mask token>\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\n<mask token>\n\n\nclass T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):\n\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\n<mask token>\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < '1.6.0':\n pytest.skip(\n 'Multivariate Student-T distribution is not available in scipy < 1.6'\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\n<mask token>\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\ndef _TruncatedCauchy(loc, scale, low, high):\n return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)\n\n\n<mask token>\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,\n math.pi])\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0]), np.array([1.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n<mask token>\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(self:\n SineSkewedVonMisesBatched, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc,\n scale=scale)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix\n =covariance_matrix)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,\n scale_ in zip(locs, scales)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in\n self.component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [dist.MultivariateNormal(loc=loc_,\n covariance_matrix=covariance_matrix) for loc_,\n covariance_matrix in zip(locs, covariance_matrices)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [vmap_over(d, loc=locs) for d in self.\n component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {'rate': constraints.positive, 'gate_logits':\n constraints.real}\n pytree_data_fields = 'rate',\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=\n validate_args)\n\n\n<mask token>\n\n\nclass SparsePoisson(dist.Poisson):\n\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=\n vmap_over(self.base_dist, loc=loc, scale=scale))\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = ['loc', 'correlation', 'conditional_precision']\n\n def __init__(self, loc, correlation, conditional_precision, adj_matrix,\n *, is_sparse=True, validate_args=None):\n super().__init__(loc, correlation, conditional_precision,\n adj_matrix, is_sparse=True, validate_args=validate_args)\n\n\n<mask token>\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\n<mask token>\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\n@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.\n event_shape)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = 1 if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\n \"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.\n MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.\n covariance_matrix), rtol=1e-06)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, 'shape', ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist\n ).infer_shapes(*shapes)\n except NotImplementedError:\n pytest.skip(\n f'{type(jax_dist).__name__}.infer_shapes() is not implemented')\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(\n constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.\n reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [\n 'concentration1', 'concentration0'], 'BetaProportion': ['mean',\n 'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],\n 'InverseGamma': ['concentration'], 'LKJ': ['concentration'],\n 'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.\n __name__, [])\n dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1\n :] if inspect.isclass(jax_dist) else inspect.getfullargspec(\n jax_dist)[0])]\n params_dict = dict(zip(dist_args[:len(params)], params))\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [p for p in jax_class.reparametrized_params if \n p not in gamma_derived_params]\n if not reparametrized_params:\n pytest.skip('{} not reparametrized.'.format(jax_class.__name__))\n nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in\n reparametrized_params}\n repara_params = tuple(v for k, v in params_dict.items() if k in\n reparametrized_params)\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).\n sample(key=rng_key))\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n eps = 0.001\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(\n repara_params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(\n repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,\n atol=0.03)\n\n\n@pytest.mark.parametrize('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.\n Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,\n (0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,\n (5.0, 2.0, 4.0))])\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z ** 2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean ** 2\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\n@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])\n@pytest.mark.parametrize('jit', [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples\n ).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (dist.LeftTruncatedDistribution, dist.\n RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n loc, scale, low, high = params[0].loc, params[0].scale, params[\n 1], params[2]\n else:\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -\n sp_dist.cdf(low))\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,\n atol=1e-05)\n return\n pytest.skip('no corresponding scipy distn.')\n if _is_batched_multivariate(jax_dist):\n pytest.skip('batching not allowed in multivariate distns.')\n if jax_dist.event_shape and prepend_shape:\n pytest.skip(\n 'batched samples cannot be scored by multivariate distributions.')\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e\n ):\n samples = jax.device_get(samples).astype('float64')\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist\n .Normal(0, 1).expand([2]))\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.\n Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])\n@pytest.mark.filterwarnings('ignore:overflow encountered:RuntimeWarning')\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\n 'skip testing cdf/icdf methods of multivariate distributions')\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05\n if d.shape() == () and not d.is_discrete:\n assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.\n log_prob(samples)), atol=1e-05, rtol=rtol)\n assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(\n -d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,\n rtol=1e-05)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)\n except NotImplementedError:\n pass\n if not sp_dist:\n pytest.skip('no corresponding scipy distn.')\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)\n except NotImplementedError:\n pass\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('dimension', [2, 3, 5])\n@pytest.mark.parametrize('concentration', [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(\n partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky\n ).log_abs_det_jacobian(unconstrained, sample)\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = (beta_log_prob - affine_logdet -\n signed_stick_breaking_logdet)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06\n )\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(0, 20, (1000, 100))\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('total_count', [1, 2, 3, 10])\n@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(\n num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.BetaBinomial(concentration1, concentration0, total_count\n ).log_prob(value)\n assert_allclose(actual, expected, rtol=0.02)\n\n\n@pytest.mark.parametrize('total_count', [1, 2, 3, 10])\n@pytest.mark.parametrize('batch_shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = 3,\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1\n ,) * len(batch_shape) + event_shape)\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (\n num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(\n value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n num_samples = 300000\n poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(\n num_samples,) + shape)\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip('we have separated tests for LKJCholesky distribution')\n if jax_dist is _ImproperWrapper:\n pytest.skip(\n 'no param for ImproperUniform to test for log_prob gradient')\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n eps = 0.001\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n continue\n if jax_dist is _SparseCAR and i == 3:\n continue\n if isinstance(params[i], dist.Distribution):\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,\n jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,\n atol=0.01)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\n@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,\n _Gaussian2DMixture, _GeneralMixture, _General2DMixture):\n pytest.skip(f'{jax_dist.__name__} is a function, not a class')\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky\n ) and dist_args[i] != 'concentration':\n continue\n if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':\n continue\n if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i\n ] == 'base_dist':\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':\n continue\n if jax_dist is dist.SineBivariateVonMises and dist_args[i\n ] == 'weighted_correlation':\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n if jax_dist is dist.MultivariateStudentT:\n valid_params[0] += 1\n if jax_dist is dist.LogUniform:\n valid_params[1] += valid_params[0]\n assert jax_dist(*oob_params)\n if not dependent_constraint and (jax_dist is not _ImproperWrapper and \n 'SineSkewed' not in jax_dist.__name__):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n with pytest.raises(ValueError):\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n jax.jit(dist_gen_fn)()\n d = jax_dist(*valid_params, validate_args=True)\n if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and\n prepend_shape):\n valid_samples = gen_values_within_bounds(d.support, size=\n prepend_shape + d.batch_shape + d.event_shape)\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,\n rtol=1e-05)\n oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +\n d.batch_shape + d.event_shape)\n with pytest.warns(UserWarning, match='Out-of-support'):\n d.log_prob(oob_samples)\n with pytest.warns(UserWarning, match='Out-of-support'):\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n with pytest.raises(ValueError, match='got invalid'):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n with pytest.warns(UserWarning, match='Out-of-support'):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(\n data).sum()\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data\n ).sum()\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-06)\n assert_allclose(grad_fx, grad_gx, atol=0.0001)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(ValueError,\n match='^BetaProportion distribution got invalid mean parameter\\\\.$'):\n dist.BetaProportion(1.0, 1.0)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('constraint', [constraints.corr_cholesky,\n constraints.corr_matrix, constraints.greater_than(2), constraints.\n interval(-3, 5), constraints.l1_ball, constraints.less_than(1),\n constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector, constraints.positive, constraints.\n positive_definite, constraints.positive_ordered_vector, constraints.\n real, constraints.real_vector, constraints.simplex, constraints.\n softplus_positive, constraints.softplus_lower_cholesky, constraints.\n unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.\n __class__)\n@pytest.mark.parametrize('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,\n 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=\n jnp.bool_))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-05, rtol=1e-05)\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)\n [:, :-1])[1]\n elif constraint in [constraints.real_vector, constraints.\n ordered_vector, constraints.positive_ordered_vector,\n constraints.l1_ball]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x),\n diagonal=-1)\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1\n ) + jnp.identity(matrix.shape[-1])\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n elif constraint in [constraints.lower_cholesky, constraints.\n scaled_unit_lower_cholesky, constraints.positive_definite,\n constraints.softplus_lower_cholesky]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x))\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(\n jnp.diag(matrix))\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)\n assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)\n\n\n@pytest.mark.parametrize('transform, event_shape', [(PermuteTransform(np.\n array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (\n SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np\n .array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(\n [biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv]), (5,))])\n@pytest.mark.parametrize('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1, \n 3), (5, 3)])\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-06, rtol=0.0001)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-06)\n assert_allclose(actual, -inv_expected, atol=1e-06)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('batch_shape', [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z\n ) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n assert_allclose(log_det, expected_log_det)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('transformed_dist', [dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),\n dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms\n .PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(random\n .PRNGKey(1))\n assert_allclose(transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample))\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=\n 100000, key=random.PRNGKey(11)):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n '{} sampling method taken from upstream, no need totest generated samples.'\n .format(jax_dist.__name__))\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)\n@pytest.mark.parametrize('prepend_shape', [(), (2, 3)])\n@pytest.mark.parametrize('sample_shape', [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples\n ).shape == sample_shape + new_batch_shape\n assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,\n ) + new_batch_shape\n if prepend_shape:\n with pytest.raises(ValueError, match=\n 'Cannot broadcast distribution of shape'):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)\n ), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = 4, 1, 1, 1, 6\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('event_shape', [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(\n event_shape))))\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)\n ]\n return log_prob.sum()\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\n<mask token>\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)\n@pytest.mark.parametrize('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)\n@pytest.mark.parametrize('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_expanded_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\n@pytest.mark.parametrize('shape', [(), (4,), (2, 3)], ids=str)\n@pytest.mark.parametrize('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.\n Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.\n Normal), (dist.Weibull, dist.Gamma)])\ndef test_kl_univariate(shape, p_dist, q_dist):\n\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f'Missing pattern for param {k}.')\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize('shape', [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\n<mask token>\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[:len(params)]\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n d = make_jax_dist(*params)\n if isinstance(d, _SparseCAR) and d.is_sparse:\n return\n in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for\n i in range(len(params))), 0), *(([(0 if i == idx else None) for i in\n range(len(params))], 0) for idx in vmappable_param_idxs if params[\n idx] is not None), *(([(0 if i == idx else None) for i in range(len\n (params))], vmap_over(d, **{param_names[idx]: 0})) for idx in\n vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==\n idx else None) for i in range(len(params))], vmap_over(d, **{\n param_names[idx]: 1})) for idx in vmappable_param_idxs if \n isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).\n ndim > 0 and jax_dist is not _GeneralMixture)]\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),\n arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,\n in_axes)]\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes\n )(*batched_params)\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d)\n assert eq == jnp.array([True])\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10\n ).sample(key)\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-06)\n\n\ndef test_normal_log_cdf():\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)\n\n\n@pytest.mark.parametrize('value', [-15.0, jnp.array([[-15.0], [-10.0], [-\n 5.0]]), jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]])]\n )\ndef test_truncated_normal_log_prob_in_tail(value):\n loc = 1.35\n scale = jnp.geomspace(0.01, 1, 10)\n low, high = -20, -1.0\n a, b = (low - loc) / scale, (high - loc) / scale\n numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high\n ).log_prob(value)\n jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)\n assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)\n\n\ndef test_sample_truncated_normal_in_tail():\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))\n assert ~jnp.isinf(samples).any()\n\n\n@jax.enable_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-3": "<mask token>\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\n<mask token>\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\n<mask token>\n\n\nclass T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):\n\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\ndef _mvn_to_scipy(loc, cov, prec, tril):\n jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < '1.6.0':\n pytest.skip(\n 'Multivariate Student-T distribution is not available in scipy < 1.6'\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\n<mask token>\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\ndef _TruncatedCauchy(loc, scale, low, high):\n return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)\n\n\n<mask token>\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,\n math.pi])\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0]), np.array([1.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n<mask token>\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(self:\n SineSkewedVonMisesBatched, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc,\n scale=scale)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix\n =covariance_matrix)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,\n scale_ in zip(locs, scales)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in\n self.component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [dist.MultivariateNormal(loc=loc_,\n covariance_matrix=covariance_matrix) for loc_,\n covariance_matrix in zip(locs, covariance_matrices)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [vmap_over(d, loc=locs) for d in self.\n component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {'rate': constraints.positive, 'gate_logits':\n constraints.real}\n pytree_data_fields = 'rate',\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=\n validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_zero_inflated_poisson_logits(self: ZeroInflatedPoissonLogits,\n rate=None, gate_logits=None):\n dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(self,\n base_dist=vmap_over(self.base_dist, rate=rate), gate_logits=\n gate_logits, gate=gate_logits)\n dist_axes.rate = rate\n return dist_axes\n\n\nclass SparsePoisson(dist.Poisson):\n\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=\n vmap_over(self.base_dist, loc=loc, scale=scale))\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = ['loc', 'correlation', 'conditional_precision']\n\n def __init__(self, loc, correlation, conditional_precision, adj_matrix,\n *, is_sparse=True, validate_args=None):\n super().__init__(loc, correlation, conditional_precision,\n adj_matrix, is_sparse=True, validate_args=validate_args)\n\n\n<mask token>\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\n<mask token>\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\ndef gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):\n eps = 1e-06\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size)\n elif isinstance(constraint, constraints.greater_than):\n return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.randint(key, size, lower_bound, upper_bound + 1)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound + random.poisson(key, np.array(5),\n shape=size)\n elif isinstance(constraint, constraints.interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=lower_bound, maxval=upper_bound\n )\n elif constraint in (constraints.real, constraints.real_vector):\n return random.normal(key, size)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.\n upper_bound, shape=size[:-1])\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (\n size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))\n elif constraint is constraints.corr_matrix:\n cholesky = signed_stick_breaking_tril(random.uniform(key, size[:-2] +\n (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return jnp.tril(random.uniform(key, size))\n elif constraint is constraints.positive_definite:\n x = random.normal(key, size)\n return jnp.matmul(x, jnp.swapaxes(x, -2, -1))\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x - random.normal(key, size[:-1] + (1,))\n elif isinstance(constraint, constraints.independent):\n return gen_values_within_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n return x / jnp.linalg.norm(x, axis=-1)\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [0, (-1) ** sign * 0.5]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError('{} not implemented.'.format(constraint))\n\n\ndef gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size) - 2\n elif isinstance(constraint, constraints.greater_than):\n return constraint.lower_bound - jnp.exp(random.normal(key, size))\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n return random.randint(key, size, lower_bound - 1, lower_bound)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound - random.poisson(key, np.array(5),\n shape=size)\n elif isinstance(constraint, constraints.interval):\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=upper_bound, maxval=\n upper_bound + 1.0)\n elif constraint in [constraints.real, constraints.real_vector]:\n return lax.full(size, np.nan)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]\n ) + 0.01\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.\n upper_bound, shape=size[:-1]) + 1\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (\n size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)) + 0.01\n elif constraint is constraints.corr_matrix:\n cholesky = 0.01 + signed_stick_breaking_tril(random.uniform(key, \n size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)\n )\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return random.uniform(key, size)\n elif constraint is constraints.positive_definite:\n return random.normal(key, size)\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x[..., ::-1]\n elif isinstance(constraint, constraints.independent):\n return gen_values_outside_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)\n return 2 * x\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError('{} not implemented.'.format(constraint))\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\n@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.\n event_shape)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = 1 if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\n \"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.\n MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.\n covariance_matrix), rtol=1e-06)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, 'shape', ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist\n ).infer_shapes(*shapes)\n except NotImplementedError:\n pytest.skip(\n f'{type(jax_dist).__name__}.infer_shapes() is not implemented')\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(\n constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.\n reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,), (3, 2)])\ndef test_unit(batch_shape):\n log_factor = random.normal(random.PRNGKey(0), batch_shape)\n d = dist.Unit(log_factor=log_factor)\n x = d.sample(random.PRNGKey(1))\n assert x.shape == batch_shape + (0,)\n assert (d.log_prob(x) == log_factor).all()\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [\n 'concentration1', 'concentration0'], 'BetaProportion': ['mean',\n 'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],\n 'InverseGamma': ['concentration'], 'LKJ': ['concentration'],\n 'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.\n __name__, [])\n dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1\n :] if inspect.isclass(jax_dist) else inspect.getfullargspec(\n jax_dist)[0])]\n params_dict = dict(zip(dist_args[:len(params)], params))\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [p for p in jax_class.reparametrized_params if \n p not in gamma_derived_params]\n if not reparametrized_params:\n pytest.skip('{} not reparametrized.'.format(jax_class.__name__))\n nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in\n reparametrized_params}\n repara_params = tuple(v for k, v in params_dict.items() if k in\n reparametrized_params)\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).\n sample(key=rng_key))\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n eps = 0.001\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(\n repara_params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(\n repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,\n atol=0.03)\n\n\n@pytest.mark.parametrize('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.\n Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,\n (0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,\n (5.0, 2.0, 4.0))])\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z ** 2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean ** 2\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\n@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])\n@pytest.mark.parametrize('jit', [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples\n ).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (dist.LeftTruncatedDistribution, dist.\n RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n loc, scale, low, high = params[0].loc, params[0].scale, params[\n 1], params[2]\n else:\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -\n sp_dist.cdf(low))\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,\n atol=1e-05)\n return\n pytest.skip('no corresponding scipy distn.')\n if _is_batched_multivariate(jax_dist):\n pytest.skip('batching not allowed in multivariate distns.')\n if jax_dist.event_shape and prepend_shape:\n pytest.skip(\n 'batched samples cannot be scored by multivariate distributions.')\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e\n ):\n samples = jax.device_get(samples).astype('float64')\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist\n .Normal(0, 1).expand([2]))\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.\n Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])\n@pytest.mark.filterwarnings('ignore:overflow encountered:RuntimeWarning')\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\n 'skip testing cdf/icdf methods of multivariate distributions')\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05\n if d.shape() == () and not d.is_discrete:\n assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.\n log_prob(samples)), atol=1e-05, rtol=rtol)\n assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(\n -d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,\n rtol=1e-05)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)\n except NotImplementedError:\n pass\n if not sp_dist:\n pytest.skip('no corresponding scipy distn.')\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)\n except NotImplementedError:\n pass\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DIRECTIONAL)\ndef test_gof(jax_dist, sp_dist, params):\n if 'Improper' in jax_dist.__name__:\n pytest.skip('distribution has improper .log_prob()')\n if 'LKJ' in jax_dist.__name__:\n pytest.xfail('incorrect submanifold scaling')\n if jax_dist is dist.EulerMaruyama:\n d = jax_dist(*params)\n if d.event_dim > 1:\n pytest.skip(\n 'EulerMaruyama skip test when event shape is non-trivial.')\n num_samples = 10000\n if 'BetaProportion' in jax_dist.__name__:\n num_samples = 20000\n rng_key = random.PRNGKey(0)\n d = jax_dist(*params)\n samples = d.sample(key=rng_key, sample_shape=(num_samples,))\n probs = np.exp(d.log_prob(samples))\n dim = None\n if jax_dist is dist.ProjectedNormal:\n dim = samples.shape[-1] - 1\n probs = probs.reshape(num_samples, -1)\n samples = samples.reshape(probs.shape + d.event_shape)\n if 'Dirichlet' in jax_dist.__name__:\n samples = samples[..., :-1]\n for b in range(probs.shape[1]):\n try:\n gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)\n except InvalidTest:\n pytest.skip('expensive test')\n else:\n assert gof > TEST_FAILURE_RATE\n\n\n<mask token>\n\n\ndef _tril_cholesky_to_tril_corr(x):\n w = vec_to_tril_matrix(x, diagonal=-1)\n diag = jnp.sqrt(1 - jnp.sum(w ** 2, axis=-1))\n cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])\n corr = jnp.matmul(cholesky, cholesky.T)\n return matrix_to_tril_vec(corr, diagonal=-1)\n\n\n@pytest.mark.parametrize('dimension', [2, 3, 5])\ndef test_log_prob_LKJCholesky_uniform(dimension):\n d = dist.LKJCholesky(dimension=dimension, concentration=1)\n N = 5\n corr_log_prob = []\n for i in range(N):\n sample = d.sample(random.PRNGKey(i))\n log_prob = d.log_prob(sample)\n sample_tril = matrix_to_tril_vec(sample, diagonal=-1)\n cholesky_to_corr_jac = np.linalg.slogdet(jax.jacobian(\n _tril_cholesky_to_tril_corr)(sample_tril))[1]\n corr_log_prob.append(log_prob - cholesky_to_corr_jac)\n corr_log_prob = np.array(corr_log_prob)\n assert_allclose(corr_log_prob, jnp.broadcast_to(corr_log_prob[0],\n corr_log_prob.shape), rtol=1e-06)\n if dimension == 2:\n assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-06)\n\n\n@pytest.mark.parametrize('dimension', [2, 3, 5])\n@pytest.mark.parametrize('concentration', [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(\n partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky\n ).log_abs_det_jacobian(unconstrained, sample)\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = (beta_log_prob - affine_logdet -\n signed_stick_breaking_logdet)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06\n )\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(0, 20, (1000, 100))\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('total_count', [1, 2, 3, 10])\n@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(\n num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.BetaBinomial(concentration1, concentration0, total_count\n ).log_prob(value)\n assert_allclose(actual, expected, rtol=0.02)\n\n\n@pytest.mark.parametrize('total_count', [1, 2, 3, 10])\n@pytest.mark.parametrize('batch_shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = 3,\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1\n ,) * len(batch_shape) + event_shape)\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (\n num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(\n value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n num_samples = 300000\n poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(\n num_samples,) + shape)\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip('we have separated tests for LKJCholesky distribution')\n if jax_dist is _ImproperWrapper:\n pytest.skip(\n 'no param for ImproperUniform to test for log_prob gradient')\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n eps = 0.001\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n continue\n if jax_dist is _SparseCAR and i == 3:\n continue\n if isinstance(params[i], dist.Distribution):\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,\n jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,\n atol=0.01)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_mean_var(jax_dist, sp_dist, params):\n if jax_dist is _ImproperWrapper:\n pytest.skip('Improper distribution does not has mean/var implemented')\n if jax_dist is FoldedNormal:\n pytest.skip('Folded distribution does not has mean/var implemented')\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\n 'EulerMaruyama distribution does not has mean/var implemented')\n if jax_dist is dist.RelaxedBernoulliLogits:\n pytest.skip(\n 'RelaxedBernoulli distribution does not has mean/var implemented')\n if 'SineSkewed' in jax_dist.__name__:\n pytest.skip('Skewed Distribution are not symmetric about location.')\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, dist.\n LeftTruncatedDistribution, dist.RightTruncatedDistribution, dist.\n TwoSidedTruncatedDistribution):\n pytest.skip('Truncated distributions do not has mean/var implemented')\n if jax_dist is dist.ProjectedNormal:\n pytest.skip('Mean is defined in submanifold')\n n = 20000 if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.\n SineBivariateVonMises] else 200000\n d_jax = jax_dist(*params)\n k = random.PRNGKey(0)\n samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)\n if sp_dist and not _is_batched_multivariate(d_jax) and jax_dist not in [\n dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]:\n d_sp = sp_dist(*params)\n try:\n sp_mean = d_sp.mean()\n except TypeError:\n sp_mean = d_sp.mean\n if d_jax.event_shape:\n try:\n sp_var = jnp.diag(d_sp.cov())\n except TypeError:\n sp_var = jnp.diag(d_sp.cov)\n except AttributeError:\n sp_var = d_sp.var()\n else:\n sp_var = d_sp.var()\n assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-07)\n assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-07)\n if jnp.all(jnp.isfinite(sp_mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,\n atol=0.01)\n if jnp.all(jnp.isfinite(sp_var)):\n assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),\n rtol=0.05, atol=0.01)\n elif jax_dist in [dist.LKJ, dist.LKJCholesky]:\n if jax_dist is dist.LKJCholesky:\n corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))\n else:\n corr_samples = samples\n dimension, concentration, _ = params\n marginal = dist.Beta(concentration + 0.5 * (dimension - 2), \n concentration + 0.5 * (dimension - 2))\n marginal_mean = 2 * marginal.mean - 1\n marginal_std = 2 * jnp.sqrt(marginal.variance)\n expected_mean = jnp.broadcast_to(jnp.reshape(marginal_mean, jnp.\n shape(marginal_mean) + (1, 1)), jnp.shape(marginal_mean) +\n d_jax.event_shape)\n expected_std = jnp.broadcast_to(jnp.reshape(marginal_std, jnp.shape\n (marginal_std) + (1, 1)), jnp.shape(marginal_std) + d_jax.\n event_shape)\n expected_mean = expected_mean * (1 - jnp.identity(dimension)\n ) + jnp.identity(dimension)\n expected_std = expected_std * (1 - jnp.identity(dimension))\n assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol\n =0.01)\n assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)\n elif jax_dist in [dist.VonMises]:\n assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=0.01)\n x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)\n expected_variance = 1 - jnp.sqrt(x ** 2 + y ** 2)\n assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=0.01\n )\n elif jax_dist in [dist.SineBivariateVonMises]:\n phi_loc = _circ_mean(samples[..., 0])\n psi_loc = _circ_mean(samples[..., 1])\n assert_allclose(d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1),\n rtol=0.05, atol=0.01)\n elif jax_dist in [dist.MatrixNormal]:\n sample_shape = 200000,\n if len(d_jax.batch_shape) > 0:\n axes = [(len(sample_shape) + i) for i in range(len(d_jax.\n batch_shape))]\n axes = tuple(axes)\n samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))\n subshape = samples_re.shape[:len(axes)]\n ixi = product(*[range(k) for k in subshape])\n for ix in ixi:\n\n def get_min_shape(ix, batch_shape):\n return min(ix, tuple(map(lambda x: x - 1, batch_shape)))\n ix_loc = get_min_shape(ix, d_jax.loc.shape[:len(ix)])\n jnp.allclose(jnp.mean(samples_re[ix], 0), jnp.squeeze(d_jax\n .mean[ix_loc]), rtol=0.5, atol=0.01)\n samples_mvn = jnp.squeeze(samples_re[ix]).reshape(\n sample_shape + (-1,), order='F')\n ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[:\n len(ix)])\n ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[:len(ix)]\n )\n scale_tril = my_kron(d_jax.scale_tril_column[ix_col], d_jax\n .scale_tril_row[ix_row])\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01\n )\n else:\n jnp.allclose(jnp.mean(samples, 0), jnp.squeeze(d_jax.mean),\n rtol=0.5, atol=0.01)\n samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,),\n order='F')\n scale_tril = my_kron(jnp.squeeze(d_jax.scale_tril_column), jnp.\n squeeze(d_jax.scale_tril_row))\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01)\n else:\n if jnp.all(jnp.isfinite(d_jax.mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,\n atol=0.01)\n if isinstance(d_jax, dist.CAR):\n pytest.skip(\n 'CAR distribution does not have `variance` implemented.')\n if isinstance(d_jax, dist.Gompertz):\n pytest.skip(\n 'Gompertz distribution does not have `variance` implemented.')\n if jnp.all(jnp.isfinite(d_jax.variance)):\n assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),\n rtol=0.05, atol=0.01)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\n@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,\n _Gaussian2DMixture, _GeneralMixture, _General2DMixture):\n pytest.skip(f'{jax_dist.__name__} is a function, not a class')\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky\n ) and dist_args[i] != 'concentration':\n continue\n if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':\n continue\n if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i\n ] == 'base_dist':\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':\n continue\n if jax_dist is dist.SineBivariateVonMises and dist_args[i\n ] == 'weighted_correlation':\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n if jax_dist is dist.MultivariateStudentT:\n valid_params[0] += 1\n if jax_dist is dist.LogUniform:\n valid_params[1] += valid_params[0]\n assert jax_dist(*oob_params)\n if not dependent_constraint and (jax_dist is not _ImproperWrapper and \n 'SineSkewed' not in jax_dist.__name__):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n with pytest.raises(ValueError):\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n jax.jit(dist_gen_fn)()\n d = jax_dist(*valid_params, validate_args=True)\n if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and\n prepend_shape):\n valid_samples = gen_values_within_bounds(d.support, size=\n prepend_shape + d.batch_shape + d.event_shape)\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,\n rtol=1e-05)\n oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +\n d.batch_shape + d.event_shape)\n with pytest.warns(UserWarning, match='Out-of-support'):\n d.log_prob(oob_samples)\n with pytest.warns(UserWarning, match='Out-of-support'):\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n with pytest.raises(ValueError, match='got invalid'):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n with pytest.warns(UserWarning, match='Out-of-support'):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(\n data).sum()\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data\n ).sum()\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-06)\n assert_allclose(grad_fx, grad_gx, atol=0.0001)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(ValueError,\n match='^BetaProportion distribution got invalid mean parameter\\\\.$'):\n dist.BetaProportion(1.0, 1.0)\n\n\n@pytest.mark.parametrize('constraint, x, expected', [(constraints.boolean,\n np.array([True, False]), np.array([True, True])), (constraints.boolean,\n np.array([1, 1]), np.array([True, True])), (constraints.boolean, np.\n array([-1, 1]), np.array([False, True])), (constraints.corr_cholesky,\n np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False\n ])), (constraints.corr_cholesky, np.array([[[1, 0], [1, 0]], [[1, 0], [\n 0.5, 0.5]]]), np.array([False, False])), (constraints.corr_matrix, np.\n array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False])),\n (constraints.corr_matrix, np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, \n 0.5]]]), np.array([False, False])), (constraints.greater_than(1), 3, \n True), (constraints.greater_than(1), np.array([-1, 1, 5]), np.array([\n False, False, True])), (constraints.integer_interval(-3, 5), 0, True),\n (constraints.integer_interval(-3, 5), np.array([-5, -3, 0, 1.1, 5, 7]),\n np.array([False, True, True, False, True, False])), (constraints.\n interval(-3, 5), 0, True), (constraints.interval(-3, 5), np.array([-5, \n -3, 0, 5, 7]), np.array([False, True, True, True, False])), (\n constraints.less_than(1), -2, True), (constraints.less_than(1), np.\n array([-1, 1, 5]), np.array([True, False, False])), (constraints.\n lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True), (\n constraints.lower_cholesky, np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0,\n 0.1], [2.0, 0.2]]]), np.array([False, False])), (constraints.\n nonnegative_integer, 3, True), (constraints.nonnegative_integer, np.\n array([-1.0, 0.0, 5.0]), np.array([False, True, True])), (constraints.\n positive, 3, True), (constraints.positive, np.array([-1, 0, 5]), np.\n array([False, False, True])), (constraints.positive_definite, np.array(\n [[1.0, 0.3], [0.3, 1.0]]), True), (constraints.positive_definite, np.\n array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]), np.array([\n False, False])), (constraints.positive_integer, 3, True), (constraints.\n positive_integer, np.array([-1.0, 0.0, 5.0]), np.array([False, False, \n True])), (constraints.real, -1, True), (constraints.real, np.array([np.\n inf, -np.inf, np.nan, np.pi]), np.array([False, False, False, True])),\n (constraints.simplex, np.array([0.1, 0.3, 0.6]), True), (constraints.\n simplex, np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),\n np.array([True, False, False])), (constraints.softplus_positive, 3, \n True), (constraints.softplus_positive, np.array([-1, 0, 5]), np.array([\n False, False, True])), (constraints.softplus_lower_cholesky, np.array([\n [1.0, 0.0], [-2.0, 0.1]]), True), (constraints.softplus_lower_cholesky,\n np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]), np.\n array([False, False])), (constraints.unit_interval, 0.1, True), (\n constraints.unit_interval, np.array([-5, 0, 0.5, 1, 7]), np.array([\n False, True, True, True, False])), (constraints.sphere, np.array([[1, 0,\n 0], [0.5, 0.5, 0]]), np.array([True, False])), (constraints.\n open_interval(0.0, 1.0), np.array([-5, 0, 0.5, 1, 7]), np.array([False,\n False, True, False, False]))])\ndef test_constraints(constraint, x, expected):\n v = constraint.feasible_like(x)\n if jnp.result_type(v) == 'float32' or jnp.result_type(v) == 'float64':\n assert not constraint.is_discrete\n assert_array_equal(constraint(x), expected)\n feasible_value = constraint.feasible_like(x)\n assert jnp.shape(feasible_value) == jnp.shape(x)\n assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected\n ), True))\n try:\n inverse = biject_to(constraint).inv(feasible_value)\n except NotImplementedError:\n pass\n else:\n assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-07)\n\n\n@pytest.mark.parametrize('constraint', [constraints.corr_cholesky,\n constraints.corr_matrix, constraints.greater_than(2), constraints.\n interval(-3, 5), constraints.l1_ball, constraints.less_than(1),\n constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector, constraints.positive, constraints.\n positive_definite, constraints.positive_ordered_vector, constraints.\n real, constraints.real_vector, constraints.simplex, constraints.\n softplus_positive, constraints.softplus_lower_cholesky, constraints.\n unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.\n __class__)\n@pytest.mark.parametrize('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,\n 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=\n jnp.bool_))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-05, rtol=1e-05)\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)\n [:, :-1])[1]\n elif constraint in [constraints.real_vector, constraints.\n ordered_vector, constraints.positive_ordered_vector,\n constraints.l1_ball]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x),\n diagonal=-1)\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1\n ) + jnp.identity(matrix.shape[-1])\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n elif constraint in [constraints.lower_cholesky, constraints.\n scaled_unit_lower_cholesky, constraints.positive_definite,\n constraints.softplus_lower_cholesky]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x))\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(\n jnp.diag(matrix))\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)\n assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)\n\n\n@pytest.mark.parametrize('transform, event_shape', [(PermuteTransform(np.\n array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (\n SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np\n .array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(\n [biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv]), (5,))])\n@pytest.mark.parametrize('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1, \n 3), (5, 3)])\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-06, rtol=0.0001)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-06)\n assert_allclose(actual, -inv_expected, atol=1e-06)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (5,)])\ndef test_composed_transform(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t1])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 2\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2\n ) + jnp.log(2) * 9\n assert_allclose(log_det, expected_log_det)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z\n ) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n assert_allclose(log_det, expected_log_det)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (5,)])\n@pytest.mark.parametrize('prepend_event_shape', [(), (4,)])\n@pytest.mark.parametrize('sample_shape', [(), (7,)])\ndef test_transformed_distribution(batch_shape, prepend_event_shape,\n sample_shape):\n base_dist = dist.Normal(0, 1).expand(batch_shape + prepend_event_shape +\n (6,)).to_event(1 + len(prepend_event_shape))\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n d = dist.TransformedDistribution(base_dist, [t1, t2, t1])\n assert d.event_dim == 2 + len(prepend_event_shape)\n y = d.sample(random.PRNGKey(0), sample_shape)\n t = transforms.ComposeTransform([t1, t2, t1])\n x = t.inv(y)\n assert x.shape == sample_shape + base_dist.shape()\n log_prob = d.log_prob(y)\n assert log_prob.shape == sample_shape + batch_shape\n t_log_det = t.log_abs_det_jacobian(x, y)\n if prepend_event_shape:\n t_log_det = t_log_det.sum(-1)\n expected_log_prob = base_dist.log_prob(x) - t_log_det\n assert_allclose(log_prob, expected_log_prob, atol=1e-05)\n\n\n@pytest.mark.parametrize('transformed_dist', [dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),\n dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms\n .PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(random\n .PRNGKey(1))\n assert_allclose(transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample))\n\n\ndef test_transformed_transformed_distribution():\n loc, scale = -2, 3\n dist1 = dist.TransformedDistribution(dist.Normal(2, 3), transforms.\n PowerTransform(2.0))\n dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(\n -2, 3))\n assert isinstance(dist2.base_dist, dist.Normal)\n assert len(dist2.transforms) == 2\n assert isinstance(dist2.transforms[0], transforms.PowerTransform)\n assert isinstance(dist2.transforms[1], transforms.AffineTransform)\n rng_key = random.PRNGKey(0)\n assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))\n intermediates = dist2.sample_with_intermediates(rng_key)\n assert len(intermediates) == 2\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('ts', [[transforms.PowerTransform(0.7), transforms\n .AffineTransform(2.0, 3.0)], [transforms.ExpTransform()], [transforms.\n ComposeTransform([transforms.AffineTransform(-2, 3), transforms.\n ExpTransform()]), transforms.PowerTransform(3.0)], [_make_iaf(5,\n hidden_dims=[10], rng_key=random.PRNGKey(0)), transforms.\n PermuteTransform(jnp.arange(5)[::-1]), _make_iaf(5, hidden_dims=[10],\n rng_key=random.PRNGKey(1))]])\ndef test_compose_transform_with_intermediates(ts):\n transform = transforms.ComposeTransform(ts)\n x = random.normal(random.PRNGKey(2), (7, 5))\n y, intermediates = transform.call_with_intermediates(x)\n logdet = transform.log_abs_det_jacobian(x, y, intermediates)\n assert_allclose(y, transform(x))\n assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))\n\n\n@pytest.mark.parametrize('x_dim, y_dim', [(3, 3), (3, 4)])\ndef test_unpack_transform(x_dim, y_dim):\n xy = np.random.randn(x_dim + y_dim)\n unpack_fn = lambda xy: {'x': xy[:x_dim], 'y': xy[x_dim:]}\n transform = transforms.UnpackTransform(unpack_fn)\n z = transform(xy)\n if x_dim == y_dim:\n with pytest.warns(UserWarning, match='UnpackTransform.inv'):\n t = transform.inv(z)\n else:\n t = transform.inv(z)\n assert_allclose(t, xy)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=\n 100000, key=random.PRNGKey(11)):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n '{} sampling method taken from upstream, no need totest generated samples.'\n .format(jax_dist.__name__))\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\n@pytest.mark.parametrize('jax_dist, params, support', [(dist.\n BernoulliLogits, (5.0,), jnp.arange(2)), (dist.BernoulliProbs, (0.5,),\n jnp.arange(2)), (dist.BinomialLogits, (4.5, 10), jnp.arange(11)), (dist\n .BinomialProbs, (0.5, 11), jnp.arange(12)), (dist.BetaBinomial, (2.0, \n 0.5, 12), jnp.arange(13)), (dist.CategoricalLogits, (np.array([3.0, 4.0,\n 5.0]),), jnp.arange(3)), (dist.CategoricalProbs, (np.array([0.1, 0.5, \n 0.4]),), jnp.arange(3))])\n@pytest.mark.parametrize('batch_shape', [(5,), ()])\n@pytest.mark.parametrize('expand', [False, True])\ndef test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand\n ):\n p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))\n actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)\n expected = support.reshape((-1,) + (1,) * len(batch_shape))\n if expand:\n expected = jnp.broadcast_to(expected, support.shape + batch_shape)\n assert_allclose(actual, expected)\n\n\ndef test_zero_inflated_enumerate_support():\n base_dist = dist.Bernoulli(0.5)\n d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)\n assert d.has_enumerate_support\n assert_allclose(d.enumerate_support(), base_dist.enumerate_support())\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)\n@pytest.mark.parametrize('prepend_shape', [(), (2, 3)])\n@pytest.mark.parametrize('sample_shape', [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples\n ).shape == sample_shape + new_batch_shape\n assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,\n ) + new_batch_shape\n if prepend_shape:\n with pytest.raises(ValueError, match=\n 'Cannot broadcast distribution of shape'):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\n@pytest.mark.parametrize('base_shape', [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1,\n 5)])\n@pytest.mark.parametrize('event_dim', [0, 1, 2, 3])\n@pytest.mark.parametrize('sample_shape', [(1000,), (1000, 7, 1), (1000, 1, 7)])\ndef test_expand_shuffle_regression(base_shape, event_dim, sample_shape):\n expand_shape = 2, 3, 5\n event_dim = min(event_dim, len(base_shape))\n loc = random.normal(random.PRNGKey(0), base_shape) * 10\n base_dist = dist.Normal(loc, 0.1).to_event(event_dim)\n expanded_dist = base_dist.expand(expand_shape[:len(expand_shape) -\n event_dim])\n samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)\n expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.\n shape())\n assert_allclose(samples.mean(0), expected_mean, atol=0.1)\n\n\n<mask token>\n\n\ndef test_sine_bivariate_von_mises_sample_mean():\n loc = jnp.array([[2.0, -1.0], [-2, 1.0]])\n sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)\n samples = sine.sample(random.PRNGKey(0), (5000,))\n assert_allclose(_circ_mean(samples).T, loc, rtol=0.005)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,)])\ndef test_polya_gamma(batch_shape, num_points=20000):\n d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)\n rng_key = random.PRNGKey(0)\n x = jnp.linspace(1e-06, d.truncation_point, num_points)\n prob = d.truncation_point / num_points * jnp.exp(logsumexp(d.log_prob(x\n ), axis=-1))\n assert_allclose(prob, jnp.ones(batch_shape), rtol=0.0001)\n z = d.sample(rng_key, sample_shape=(3000,))\n mean = jnp.mean(z, axis=-1)\n assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)\n\n\n@pytest.mark.parametrize('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)\n ), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = 4, 1, 1, 1, 6\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\n@pytest.mark.parametrize('batch_shape, mask_shape', [((), ()), ((2,), ()),\n ((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))])\n@pytest.mark.parametrize('event_shape', [(), (3,)])\ndef test_mask(batch_shape, event_shape, mask_shape):\n jax_dist = dist.Normal().expand(batch_shape + event_shape).to_event(len\n (event_shape))\n mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)\n if mask_shape == ():\n mask = bool(mask)\n samples = jax_dist.sample(random.PRNGKey(1))\n actual = jax_dist.mask(mask).log_prob(samples)\n assert_allclose(actual != 0, jnp.broadcast_to(mask, lax.\n broadcast_shapes(batch_shape, mask_shape)))\n\n\n@pytest.mark.parametrize('event_shape', [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(\n event_shape))))\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)\n ]\n return log_prob.sum()\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_dist_pytree(jax_dist, sp_dist, params):\n\n def f(x):\n return jax_dist(*params)\n if jax_dist is _ImproperWrapper:\n pytest.skip('Cannot flattening ImproperUniform')\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\"EulerMaruyama doesn't define flatten/unflatten\")\n jax.jit(f)(0)\n lax.map(f, np.ones(3))\n expected_dist = f(0)\n actual_dist = jax.jit(f)(0)\n expected_sample = expected_dist.sample(random.PRNGKey(0))\n actual_sample = actual_dist.sample(random.PRNGKey(0))\n expected_log_prob = expected_dist.log_prob(expected_sample)\n actual_log_prob = actual_dist.log_prob(actual_sample)\n assert_allclose(actual_sample, expected_sample, rtol=1e-06)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-06)\n\n\n<mask token>\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)\n@pytest.mark.parametrize('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)\n@pytest.mark.parametrize('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_expanded_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\n@pytest.mark.parametrize('shape', [(), (4,), (2, 3)], ids=str)\n@pytest.mark.parametrize('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.\n Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.\n Normal), (dist.Weibull, dist.Gamma)])\ndef test_kl_univariate(shape, p_dist, q_dist):\n\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f'Missing pattern for param {k}.')\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize('shape', [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\n<mask token>\n\n\ndef _allclose_or_equal(a1, a2):\n if isinstance(a1, np.ndarray):\n return np.allclose(a2, a1)\n elif isinstance(a1, jnp.ndarray):\n return jnp.allclose(a2, a1)\n elif isinstance(a1, csr_matrix):\n return np.allclose(a2.todense(), a1.todense())\n else:\n return a2 == a1 or a2 is a1\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[:len(params)]\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n d = make_jax_dist(*params)\n if isinstance(d, _SparseCAR) and d.is_sparse:\n return\n in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for\n i in range(len(params))), 0), *(([(0 if i == idx else None) for i in\n range(len(params))], 0) for idx in vmappable_param_idxs if params[\n idx] is not None), *(([(0 if i == idx else None) for i in range(len\n (params))], vmap_over(d, **{param_names[idx]: 0})) for idx in\n vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==\n idx else None) for i in range(len(params))], vmap_over(d, **{\n param_names[idx]: 1})) for idx in vmappable_param_idxs if \n isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).\n ndim > 0 and jax_dist is not _GeneralMixture)]\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),\n arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,\n in_axes)]\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes\n )(*batched_params)\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d)\n assert eq == jnp.array([True])\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10\n ).sample(key)\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-06)\n\n\ndef test_normal_log_cdf():\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)\n\n\n@pytest.mark.parametrize('value', [-15.0, jnp.array([[-15.0], [-10.0], [-\n 5.0]]), jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]])]\n )\ndef test_truncated_normal_log_prob_in_tail(value):\n loc = 1.35\n scale = jnp.geomspace(0.01, 1, 10)\n low, high = -20, -1.0\n a, b = (low - loc) / scale, (high - loc) / scale\n numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high\n ).log_prob(value)\n jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)\n assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)\n\n\ndef test_sample_truncated_normal_in_tail():\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))\n assert ~jnp.isinf(samples).any()\n\n\n@jax.enable_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-4": "<mask token>\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\ndef _circ_mean(angles):\n return jnp.arctan2(jnp.mean(jnp.sin(angles), axis=0), jnp.mean(jnp.cos(\n angles), axis=0))\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\n<mask token>\n\n\nclass T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):\n\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\ndef _mvn_to_scipy(loc, cov, prec, tril):\n jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < '1.6.0':\n pytest.skip(\n 'Multivariate Student-T distribution is not available in scipy < 1.6'\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\n<mask token>\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\ndef _TruncatedCauchy(loc, scale, low, high):\n return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)\n\n\n<mask token>\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,\n math.pi])\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0]), np.array([1.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n<mask token>\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(self:\n SineSkewedVonMisesBatched, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc,\n scale=scale)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix\n =covariance_matrix)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,\n scale_ in zip(locs, scales)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in\n self.component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [dist.MultivariateNormal(loc=loc_,\n covariance_matrix=covariance_matrix) for loc_,\n covariance_matrix in zip(locs, covariance_matrices)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [vmap_over(d, loc=locs) for d in self.\n component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {'rate': constraints.positive, 'gate_logits':\n constraints.real}\n pytree_data_fields = 'rate',\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=\n validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_zero_inflated_poisson_logits(self: ZeroInflatedPoissonLogits,\n rate=None, gate_logits=None):\n dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(self,\n base_dist=vmap_over(self.base_dist, rate=rate), gate_logits=\n gate_logits, gate=gate_logits)\n dist_axes.rate = rate\n return dist_axes\n\n\nclass SparsePoisson(dist.Poisson):\n\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=\n vmap_over(self.base_dist, loc=loc, scale=scale))\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = ['loc', 'correlation', 'conditional_precision']\n\n def __init__(self, loc, correlation, conditional_precision, adj_matrix,\n *, is_sparse=True, validate_args=None):\n super().__init__(loc, correlation, conditional_precision,\n adj_matrix, is_sparse=True, validate_args=validate_args)\n\n\n<mask token>\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\n<mask token>\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\ndef gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):\n eps = 1e-06\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size)\n elif isinstance(constraint, constraints.greater_than):\n return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.randint(key, size, lower_bound, upper_bound + 1)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound + random.poisson(key, np.array(5),\n shape=size)\n elif isinstance(constraint, constraints.interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=lower_bound, maxval=upper_bound\n )\n elif constraint in (constraints.real, constraints.real_vector):\n return random.normal(key, size)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.\n upper_bound, shape=size[:-1])\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (\n size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))\n elif constraint is constraints.corr_matrix:\n cholesky = signed_stick_breaking_tril(random.uniform(key, size[:-2] +\n (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return jnp.tril(random.uniform(key, size))\n elif constraint is constraints.positive_definite:\n x = random.normal(key, size)\n return jnp.matmul(x, jnp.swapaxes(x, -2, -1))\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x - random.normal(key, size[:-1] + (1,))\n elif isinstance(constraint, constraints.independent):\n return gen_values_within_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n return x / jnp.linalg.norm(x, axis=-1)\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [0, (-1) ** sign * 0.5]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError('{} not implemented.'.format(constraint))\n\n\ndef gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size) - 2\n elif isinstance(constraint, constraints.greater_than):\n return constraint.lower_bound - jnp.exp(random.normal(key, size))\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n return random.randint(key, size, lower_bound - 1, lower_bound)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound - random.poisson(key, np.array(5),\n shape=size)\n elif isinstance(constraint, constraints.interval):\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=upper_bound, maxval=\n upper_bound + 1.0)\n elif constraint in [constraints.real, constraints.real_vector]:\n return lax.full(size, np.nan)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]\n ) + 0.01\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.\n upper_bound, shape=size[:-1]) + 1\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (\n size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)) + 0.01\n elif constraint is constraints.corr_matrix:\n cholesky = 0.01 + signed_stick_breaking_tril(random.uniform(key, \n size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)\n )\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return random.uniform(key, size)\n elif constraint is constraints.positive_definite:\n return random.normal(key, size)\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x[..., ::-1]\n elif isinstance(constraint, constraints.independent):\n return gen_values_outside_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)\n return 2 * x\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError('{} not implemented.'.format(constraint))\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\n@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.\n event_shape)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = 1 if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\n \"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.\n MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.\n covariance_matrix), rtol=1e-06)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, 'shape', ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist\n ).infer_shapes(*shapes)\n except NotImplementedError:\n pytest.skip(\n f'{type(jax_dist).__name__}.infer_shapes() is not implemented')\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(\n constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.\n reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,), (3, 2)])\ndef test_unit(batch_shape):\n log_factor = random.normal(random.PRNGKey(0), batch_shape)\n d = dist.Unit(log_factor=log_factor)\n x = d.sample(random.PRNGKey(1))\n assert x.shape == batch_shape + (0,)\n assert (d.log_prob(x) == log_factor).all()\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [\n 'concentration1', 'concentration0'], 'BetaProportion': ['mean',\n 'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],\n 'InverseGamma': ['concentration'], 'LKJ': ['concentration'],\n 'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.\n __name__, [])\n dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1\n :] if inspect.isclass(jax_dist) else inspect.getfullargspec(\n jax_dist)[0])]\n params_dict = dict(zip(dist_args[:len(params)], params))\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [p for p in jax_class.reparametrized_params if \n p not in gamma_derived_params]\n if not reparametrized_params:\n pytest.skip('{} not reparametrized.'.format(jax_class.__name__))\n nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in\n reparametrized_params}\n repara_params = tuple(v for k, v in params_dict.items() if k in\n reparametrized_params)\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).\n sample(key=rng_key))\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n eps = 0.001\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(\n repara_params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(\n repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,\n atol=0.03)\n\n\n@pytest.mark.parametrize('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.\n Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,\n (0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,\n (5.0, 2.0, 4.0))])\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z ** 2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean ** 2\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\n@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])\n@pytest.mark.parametrize('jit', [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples\n ).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (dist.LeftTruncatedDistribution, dist.\n RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n loc, scale, low, high = params[0].loc, params[0].scale, params[\n 1], params[2]\n else:\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -\n sp_dist.cdf(low))\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,\n atol=1e-05)\n return\n pytest.skip('no corresponding scipy distn.')\n if _is_batched_multivariate(jax_dist):\n pytest.skip('batching not allowed in multivariate distns.')\n if jax_dist.event_shape and prepend_shape:\n pytest.skip(\n 'batched samples cannot be scored by multivariate distributions.')\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e\n ):\n samples = jax.device_get(samples).astype('float64')\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist\n .Normal(0, 1).expand([2]))\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.\n Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])\n@pytest.mark.filterwarnings('ignore:overflow encountered:RuntimeWarning')\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\n 'skip testing cdf/icdf methods of multivariate distributions')\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05\n if d.shape() == () and not d.is_discrete:\n assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.\n log_prob(samples)), atol=1e-05, rtol=rtol)\n assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(\n -d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,\n rtol=1e-05)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)\n except NotImplementedError:\n pass\n if not sp_dist:\n pytest.skip('no corresponding scipy distn.')\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)\n except NotImplementedError:\n pass\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DIRECTIONAL)\ndef test_gof(jax_dist, sp_dist, params):\n if 'Improper' in jax_dist.__name__:\n pytest.skip('distribution has improper .log_prob()')\n if 'LKJ' in jax_dist.__name__:\n pytest.xfail('incorrect submanifold scaling')\n if jax_dist is dist.EulerMaruyama:\n d = jax_dist(*params)\n if d.event_dim > 1:\n pytest.skip(\n 'EulerMaruyama skip test when event shape is non-trivial.')\n num_samples = 10000\n if 'BetaProportion' in jax_dist.__name__:\n num_samples = 20000\n rng_key = random.PRNGKey(0)\n d = jax_dist(*params)\n samples = d.sample(key=rng_key, sample_shape=(num_samples,))\n probs = np.exp(d.log_prob(samples))\n dim = None\n if jax_dist is dist.ProjectedNormal:\n dim = samples.shape[-1] - 1\n probs = probs.reshape(num_samples, -1)\n samples = samples.reshape(probs.shape + d.event_shape)\n if 'Dirichlet' in jax_dist.__name__:\n samples = samples[..., :-1]\n for b in range(probs.shape[1]):\n try:\n gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)\n except InvalidTest:\n pytest.skip('expensive test')\n else:\n assert gof > TEST_FAILURE_RATE\n\n\n<mask token>\n\n\ndef _tril_cholesky_to_tril_corr(x):\n w = vec_to_tril_matrix(x, diagonal=-1)\n diag = jnp.sqrt(1 - jnp.sum(w ** 2, axis=-1))\n cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])\n corr = jnp.matmul(cholesky, cholesky.T)\n return matrix_to_tril_vec(corr, diagonal=-1)\n\n\n@pytest.mark.parametrize('dimension', [2, 3, 5])\ndef test_log_prob_LKJCholesky_uniform(dimension):\n d = dist.LKJCholesky(dimension=dimension, concentration=1)\n N = 5\n corr_log_prob = []\n for i in range(N):\n sample = d.sample(random.PRNGKey(i))\n log_prob = d.log_prob(sample)\n sample_tril = matrix_to_tril_vec(sample, diagonal=-1)\n cholesky_to_corr_jac = np.linalg.slogdet(jax.jacobian(\n _tril_cholesky_to_tril_corr)(sample_tril))[1]\n corr_log_prob.append(log_prob - cholesky_to_corr_jac)\n corr_log_prob = np.array(corr_log_prob)\n assert_allclose(corr_log_prob, jnp.broadcast_to(corr_log_prob[0],\n corr_log_prob.shape), rtol=1e-06)\n if dimension == 2:\n assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-06)\n\n\n@pytest.mark.parametrize('dimension', [2, 3, 5])\n@pytest.mark.parametrize('concentration', [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(\n partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky\n ).log_abs_det_jacobian(unconstrained, sample)\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = (beta_log_prob - affine_logdet -\n signed_stick_breaking_logdet)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06\n )\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(0, 20, (1000, 100))\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('total_count', [1, 2, 3, 10])\n@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(\n num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.BetaBinomial(concentration1, concentration0, total_count\n ).log_prob(value)\n assert_allclose(actual, expected, rtol=0.02)\n\n\n@pytest.mark.parametrize('total_count', [1, 2, 3, 10])\n@pytest.mark.parametrize('batch_shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = 3,\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1\n ,) * len(batch_shape) + event_shape)\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (\n num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(\n value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n num_samples = 300000\n poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(\n num_samples,) + shape)\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip('we have separated tests for LKJCholesky distribution')\n if jax_dist is _ImproperWrapper:\n pytest.skip(\n 'no param for ImproperUniform to test for log_prob gradient')\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n eps = 0.001\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n continue\n if jax_dist is _SparseCAR and i == 3:\n continue\n if isinstance(params[i], dist.Distribution):\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,\n jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,\n atol=0.01)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_mean_var(jax_dist, sp_dist, params):\n if jax_dist is _ImproperWrapper:\n pytest.skip('Improper distribution does not has mean/var implemented')\n if jax_dist is FoldedNormal:\n pytest.skip('Folded distribution does not has mean/var implemented')\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\n 'EulerMaruyama distribution does not has mean/var implemented')\n if jax_dist is dist.RelaxedBernoulliLogits:\n pytest.skip(\n 'RelaxedBernoulli distribution does not has mean/var implemented')\n if 'SineSkewed' in jax_dist.__name__:\n pytest.skip('Skewed Distribution are not symmetric about location.')\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, dist.\n LeftTruncatedDistribution, dist.RightTruncatedDistribution, dist.\n TwoSidedTruncatedDistribution):\n pytest.skip('Truncated distributions do not has mean/var implemented')\n if jax_dist is dist.ProjectedNormal:\n pytest.skip('Mean is defined in submanifold')\n n = 20000 if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.\n SineBivariateVonMises] else 200000\n d_jax = jax_dist(*params)\n k = random.PRNGKey(0)\n samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)\n if sp_dist and not _is_batched_multivariate(d_jax) and jax_dist not in [\n dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]:\n d_sp = sp_dist(*params)\n try:\n sp_mean = d_sp.mean()\n except TypeError:\n sp_mean = d_sp.mean\n if d_jax.event_shape:\n try:\n sp_var = jnp.diag(d_sp.cov())\n except TypeError:\n sp_var = jnp.diag(d_sp.cov)\n except AttributeError:\n sp_var = d_sp.var()\n else:\n sp_var = d_sp.var()\n assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-07)\n assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-07)\n if jnp.all(jnp.isfinite(sp_mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,\n atol=0.01)\n if jnp.all(jnp.isfinite(sp_var)):\n assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),\n rtol=0.05, atol=0.01)\n elif jax_dist in [dist.LKJ, dist.LKJCholesky]:\n if jax_dist is dist.LKJCholesky:\n corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))\n else:\n corr_samples = samples\n dimension, concentration, _ = params\n marginal = dist.Beta(concentration + 0.5 * (dimension - 2), \n concentration + 0.5 * (dimension - 2))\n marginal_mean = 2 * marginal.mean - 1\n marginal_std = 2 * jnp.sqrt(marginal.variance)\n expected_mean = jnp.broadcast_to(jnp.reshape(marginal_mean, jnp.\n shape(marginal_mean) + (1, 1)), jnp.shape(marginal_mean) +\n d_jax.event_shape)\n expected_std = jnp.broadcast_to(jnp.reshape(marginal_std, jnp.shape\n (marginal_std) + (1, 1)), jnp.shape(marginal_std) + d_jax.\n event_shape)\n expected_mean = expected_mean * (1 - jnp.identity(dimension)\n ) + jnp.identity(dimension)\n expected_std = expected_std * (1 - jnp.identity(dimension))\n assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol\n =0.01)\n assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)\n elif jax_dist in [dist.VonMises]:\n assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=0.01)\n x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)\n expected_variance = 1 - jnp.sqrt(x ** 2 + y ** 2)\n assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=0.01\n )\n elif jax_dist in [dist.SineBivariateVonMises]:\n phi_loc = _circ_mean(samples[..., 0])\n psi_loc = _circ_mean(samples[..., 1])\n assert_allclose(d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1),\n rtol=0.05, atol=0.01)\n elif jax_dist in [dist.MatrixNormal]:\n sample_shape = 200000,\n if len(d_jax.batch_shape) > 0:\n axes = [(len(sample_shape) + i) for i in range(len(d_jax.\n batch_shape))]\n axes = tuple(axes)\n samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))\n subshape = samples_re.shape[:len(axes)]\n ixi = product(*[range(k) for k in subshape])\n for ix in ixi:\n\n def get_min_shape(ix, batch_shape):\n return min(ix, tuple(map(lambda x: x - 1, batch_shape)))\n ix_loc = get_min_shape(ix, d_jax.loc.shape[:len(ix)])\n jnp.allclose(jnp.mean(samples_re[ix], 0), jnp.squeeze(d_jax\n .mean[ix_loc]), rtol=0.5, atol=0.01)\n samples_mvn = jnp.squeeze(samples_re[ix]).reshape(\n sample_shape + (-1,), order='F')\n ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[:\n len(ix)])\n ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[:len(ix)]\n )\n scale_tril = my_kron(d_jax.scale_tril_column[ix_col], d_jax\n .scale_tril_row[ix_row])\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01\n )\n else:\n jnp.allclose(jnp.mean(samples, 0), jnp.squeeze(d_jax.mean),\n rtol=0.5, atol=0.01)\n samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,),\n order='F')\n scale_tril = my_kron(jnp.squeeze(d_jax.scale_tril_column), jnp.\n squeeze(d_jax.scale_tril_row))\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01)\n else:\n if jnp.all(jnp.isfinite(d_jax.mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,\n atol=0.01)\n if isinstance(d_jax, dist.CAR):\n pytest.skip(\n 'CAR distribution does not have `variance` implemented.')\n if isinstance(d_jax, dist.Gompertz):\n pytest.skip(\n 'Gompertz distribution does not have `variance` implemented.')\n if jnp.all(jnp.isfinite(d_jax.variance)):\n assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),\n rtol=0.05, atol=0.01)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\n@pytest.mark.parametrize('prepend_shape', [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,\n _Gaussian2DMixture, _GeneralMixture, _General2DMixture):\n pytest.skip(f'{jax_dist.__name__} is a function, not a class')\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky\n ) and dist_args[i] != 'concentration':\n continue\n if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':\n continue\n if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i\n ] == 'base_dist':\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':\n continue\n if jax_dist is dist.SineBivariateVonMises and dist_args[i\n ] == 'weighted_correlation':\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n if jax_dist is dist.MultivariateStudentT:\n valid_params[0] += 1\n if jax_dist is dist.LogUniform:\n valid_params[1] += valid_params[0]\n assert jax_dist(*oob_params)\n if not dependent_constraint and (jax_dist is not _ImproperWrapper and \n 'SineSkewed' not in jax_dist.__name__):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n with pytest.raises(ValueError):\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n jax.jit(dist_gen_fn)()\n d = jax_dist(*valid_params, validate_args=True)\n if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and\n prepend_shape):\n valid_samples = gen_values_within_bounds(d.support, size=\n prepend_shape + d.batch_shape + d.event_shape)\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,\n rtol=1e-05)\n oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +\n d.batch_shape + d.event_shape)\n with pytest.warns(UserWarning, match='Out-of-support'):\n d.log_prob(oob_samples)\n with pytest.warns(UserWarning, match='Out-of-support'):\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n with pytest.raises(ValueError, match='got invalid'):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n with pytest.warns(UserWarning, match='Out-of-support'):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(\n data).sum()\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data\n ).sum()\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-06)\n assert_allclose(grad_fx, grad_gx, atol=0.0001)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(ValueError,\n match='^BetaProportion distribution got invalid mean parameter\\\\.$'):\n dist.BetaProportion(1.0, 1.0)\n\n\n@pytest.mark.parametrize('constraint, x, expected', [(constraints.boolean,\n np.array([True, False]), np.array([True, True])), (constraints.boolean,\n np.array([1, 1]), np.array([True, True])), (constraints.boolean, np.\n array([-1, 1]), np.array([False, True])), (constraints.corr_cholesky,\n np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False\n ])), (constraints.corr_cholesky, np.array([[[1, 0], [1, 0]], [[1, 0], [\n 0.5, 0.5]]]), np.array([False, False])), (constraints.corr_matrix, np.\n array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False])),\n (constraints.corr_matrix, np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, \n 0.5]]]), np.array([False, False])), (constraints.greater_than(1), 3, \n True), (constraints.greater_than(1), np.array([-1, 1, 5]), np.array([\n False, False, True])), (constraints.integer_interval(-3, 5), 0, True),\n (constraints.integer_interval(-3, 5), np.array([-5, -3, 0, 1.1, 5, 7]),\n np.array([False, True, True, False, True, False])), (constraints.\n interval(-3, 5), 0, True), (constraints.interval(-3, 5), np.array([-5, \n -3, 0, 5, 7]), np.array([False, True, True, True, False])), (\n constraints.less_than(1), -2, True), (constraints.less_than(1), np.\n array([-1, 1, 5]), np.array([True, False, False])), (constraints.\n lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True), (\n constraints.lower_cholesky, np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0,\n 0.1], [2.0, 0.2]]]), np.array([False, False])), (constraints.\n nonnegative_integer, 3, True), (constraints.nonnegative_integer, np.\n array([-1.0, 0.0, 5.0]), np.array([False, True, True])), (constraints.\n positive, 3, True), (constraints.positive, np.array([-1, 0, 5]), np.\n array([False, False, True])), (constraints.positive_definite, np.array(\n [[1.0, 0.3], [0.3, 1.0]]), True), (constraints.positive_definite, np.\n array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]), np.array([\n False, False])), (constraints.positive_integer, 3, True), (constraints.\n positive_integer, np.array([-1.0, 0.0, 5.0]), np.array([False, False, \n True])), (constraints.real, -1, True), (constraints.real, np.array([np.\n inf, -np.inf, np.nan, np.pi]), np.array([False, False, False, True])),\n (constraints.simplex, np.array([0.1, 0.3, 0.6]), True), (constraints.\n simplex, np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),\n np.array([True, False, False])), (constraints.softplus_positive, 3, \n True), (constraints.softplus_positive, np.array([-1, 0, 5]), np.array([\n False, False, True])), (constraints.softplus_lower_cholesky, np.array([\n [1.0, 0.0], [-2.0, 0.1]]), True), (constraints.softplus_lower_cholesky,\n np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]), np.\n array([False, False])), (constraints.unit_interval, 0.1, True), (\n constraints.unit_interval, np.array([-5, 0, 0.5, 1, 7]), np.array([\n False, True, True, True, False])), (constraints.sphere, np.array([[1, 0,\n 0], [0.5, 0.5, 0]]), np.array([True, False])), (constraints.\n open_interval(0.0, 1.0), np.array([-5, 0, 0.5, 1, 7]), np.array([False,\n False, True, False, False]))])\ndef test_constraints(constraint, x, expected):\n v = constraint.feasible_like(x)\n if jnp.result_type(v) == 'float32' or jnp.result_type(v) == 'float64':\n assert not constraint.is_discrete\n assert_array_equal(constraint(x), expected)\n feasible_value = constraint.feasible_like(x)\n assert jnp.shape(feasible_value) == jnp.shape(x)\n assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected\n ), True))\n try:\n inverse = biject_to(constraint).inv(feasible_value)\n except NotImplementedError:\n pass\n else:\n assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-07)\n\n\n@pytest.mark.parametrize('constraint', [constraints.corr_cholesky,\n constraints.corr_matrix, constraints.greater_than(2), constraints.\n interval(-3, 5), constraints.l1_ball, constraints.less_than(1),\n constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector, constraints.positive, constraints.\n positive_definite, constraints.positive_ordered_vector, constraints.\n real, constraints.real_vector, constraints.simplex, constraints.\n softplus_positive, constraints.softplus_lower_cholesky, constraints.\n unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.\n __class__)\n@pytest.mark.parametrize('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,\n 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=\n jnp.bool_))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-05, rtol=1e-05)\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)\n [:, :-1])[1]\n elif constraint in [constraints.real_vector, constraints.\n ordered_vector, constraints.positive_ordered_vector,\n constraints.l1_ball]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x),\n diagonal=-1)\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1\n ) + jnp.identity(matrix.shape[-1])\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n elif constraint in [constraints.lower_cholesky, constraints.\n scaled_unit_lower_cholesky, constraints.positive_definite,\n constraints.softplus_lower_cholesky]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x))\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(\n jnp.diag(matrix))\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)\n assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)\n\n\n@pytest.mark.parametrize('transform, event_shape', [(PermuteTransform(np.\n array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (\n SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np\n .array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(\n [biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv]), (5,))])\n@pytest.mark.parametrize('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1, \n 3), (5, 3)])\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-06, rtol=0.0001)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-06)\n assert_allclose(actual, -inv_expected, atol=1e-06)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (5,)])\ndef test_composed_transform(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t1])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 2\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2\n ) + jnp.log(2) * 9\n assert_allclose(log_det, expected_log_det)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z\n ) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n assert_allclose(log_det, expected_log_det)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (5,)])\n@pytest.mark.parametrize('prepend_event_shape', [(), (4,)])\n@pytest.mark.parametrize('sample_shape', [(), (7,)])\ndef test_transformed_distribution(batch_shape, prepend_event_shape,\n sample_shape):\n base_dist = dist.Normal(0, 1).expand(batch_shape + prepend_event_shape +\n (6,)).to_event(1 + len(prepend_event_shape))\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n d = dist.TransformedDistribution(base_dist, [t1, t2, t1])\n assert d.event_dim == 2 + len(prepend_event_shape)\n y = d.sample(random.PRNGKey(0), sample_shape)\n t = transforms.ComposeTransform([t1, t2, t1])\n x = t.inv(y)\n assert x.shape == sample_shape + base_dist.shape()\n log_prob = d.log_prob(y)\n assert log_prob.shape == sample_shape + batch_shape\n t_log_det = t.log_abs_det_jacobian(x, y)\n if prepend_event_shape:\n t_log_det = t_log_det.sum(-1)\n expected_log_prob = base_dist.log_prob(x) - t_log_det\n assert_allclose(log_prob, expected_log_prob, atol=1e-05)\n\n\n@pytest.mark.parametrize('transformed_dist', [dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),\n dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms\n .PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(random\n .PRNGKey(1))\n assert_allclose(transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample))\n\n\ndef test_transformed_transformed_distribution():\n loc, scale = -2, 3\n dist1 = dist.TransformedDistribution(dist.Normal(2, 3), transforms.\n PowerTransform(2.0))\n dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(\n -2, 3))\n assert isinstance(dist2.base_dist, dist.Normal)\n assert len(dist2.transforms) == 2\n assert isinstance(dist2.transforms[0], transforms.PowerTransform)\n assert isinstance(dist2.transforms[1], transforms.AffineTransform)\n rng_key = random.PRNGKey(0)\n assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))\n intermediates = dist2.sample_with_intermediates(rng_key)\n assert len(intermediates) == 2\n\n\ndef _make_iaf(input_dim, hidden_dims, rng_key):\n arn_init, arn = AutoregressiveNN(input_dim, hidden_dims, param_dims=[1, 1])\n _, init_params = arn_init(rng_key, (input_dim,))\n return InverseAutoregressiveTransform(partial(arn, init_params))\n\n\n@pytest.mark.parametrize('ts', [[transforms.PowerTransform(0.7), transforms\n .AffineTransform(2.0, 3.0)], [transforms.ExpTransform()], [transforms.\n ComposeTransform([transforms.AffineTransform(-2, 3), transforms.\n ExpTransform()]), transforms.PowerTransform(3.0)], [_make_iaf(5,\n hidden_dims=[10], rng_key=random.PRNGKey(0)), transforms.\n PermuteTransform(jnp.arange(5)[::-1]), _make_iaf(5, hidden_dims=[10],\n rng_key=random.PRNGKey(1))]])\ndef test_compose_transform_with_intermediates(ts):\n transform = transforms.ComposeTransform(ts)\n x = random.normal(random.PRNGKey(2), (7, 5))\n y, intermediates = transform.call_with_intermediates(x)\n logdet = transform.log_abs_det_jacobian(x, y, intermediates)\n assert_allclose(y, transform(x))\n assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))\n\n\n@pytest.mark.parametrize('x_dim, y_dim', [(3, 3), (3, 4)])\ndef test_unpack_transform(x_dim, y_dim):\n xy = np.random.randn(x_dim + y_dim)\n unpack_fn = lambda xy: {'x': xy[:x_dim], 'y': xy[x_dim:]}\n transform = transforms.UnpackTransform(unpack_fn)\n z = transform(xy)\n if x_dim == y_dim:\n with pytest.warns(UserWarning, match='UnpackTransform.inv'):\n t = transform.inv(z)\n else:\n t = transform.inv(z)\n assert_allclose(t, xy)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=\n 100000, key=random.PRNGKey(11)):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n '{} sampling method taken from upstream, no need totest generated samples.'\n .format(jax_dist.__name__))\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\n@pytest.mark.parametrize('jax_dist, params, support', [(dist.\n BernoulliLogits, (5.0,), jnp.arange(2)), (dist.BernoulliProbs, (0.5,),\n jnp.arange(2)), (dist.BinomialLogits, (4.5, 10), jnp.arange(11)), (dist\n .BinomialProbs, (0.5, 11), jnp.arange(12)), (dist.BetaBinomial, (2.0, \n 0.5, 12), jnp.arange(13)), (dist.CategoricalLogits, (np.array([3.0, 4.0,\n 5.0]),), jnp.arange(3)), (dist.CategoricalProbs, (np.array([0.1, 0.5, \n 0.4]),), jnp.arange(3))])\n@pytest.mark.parametrize('batch_shape', [(5,), ()])\n@pytest.mark.parametrize('expand', [False, True])\ndef test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand\n ):\n p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))\n actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)\n expected = support.reshape((-1,) + (1,) * len(batch_shape))\n if expand:\n expected = jnp.broadcast_to(expected, support.shape + batch_shape)\n assert_allclose(actual, expected)\n\n\ndef test_zero_inflated_enumerate_support():\n base_dist = dist.Bernoulli(0.5)\n d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)\n assert d.has_enumerate_support\n assert_allclose(d.enumerate_support(), base_dist.enumerate_support())\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)\n@pytest.mark.parametrize('prepend_shape', [(), (2, 3)])\n@pytest.mark.parametrize('sample_shape', [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples\n ).shape == sample_shape + new_batch_shape\n assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,\n ) + new_batch_shape\n if prepend_shape:\n with pytest.raises(ValueError, match=\n 'Cannot broadcast distribution of shape'):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\n@pytest.mark.parametrize('base_shape', [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1,\n 5)])\n@pytest.mark.parametrize('event_dim', [0, 1, 2, 3])\n@pytest.mark.parametrize('sample_shape', [(1000,), (1000, 7, 1), (1000, 1, 7)])\ndef test_expand_shuffle_regression(base_shape, event_dim, sample_shape):\n expand_shape = 2, 3, 5\n event_dim = min(event_dim, len(base_shape))\n loc = random.normal(random.PRNGKey(0), base_shape) * 10\n base_dist = dist.Normal(loc, 0.1).to_event(event_dim)\n expanded_dist = base_dist.expand(expand_shape[:len(expand_shape) -\n event_dim])\n samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)\n expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.\n shape())\n assert_allclose(samples.mean(0), expected_mean, atol=0.1)\n\n\n<mask token>\n\n\ndef test_sine_bivariate_von_mises_sample_mean():\n loc = jnp.array([[2.0, -1.0], [-2, 1.0]])\n sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)\n samples = sine.sample(random.PRNGKey(0), (5000,))\n assert_allclose(_circ_mean(samples).T, loc, rtol=0.005)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,)])\ndef test_polya_gamma(batch_shape, num_points=20000):\n d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)\n rng_key = random.PRNGKey(0)\n x = jnp.linspace(1e-06, d.truncation_point, num_points)\n prob = d.truncation_point / num_points * jnp.exp(logsumexp(d.log_prob(x\n ), axis=-1))\n assert_allclose(prob, jnp.ones(batch_shape), rtol=0.0001)\n z = d.sample(rng_key, sample_shape=(3000,))\n mean = jnp.mean(z, axis=-1)\n assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)\n\n\n@pytest.mark.parametrize('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)\n ), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = 4, 1, 1, 1, 6\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\n@pytest.mark.parametrize('batch_shape, mask_shape', [((), ()), ((2,), ()),\n ((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))])\n@pytest.mark.parametrize('event_shape', [(), (3,)])\ndef test_mask(batch_shape, event_shape, mask_shape):\n jax_dist = dist.Normal().expand(batch_shape + event_shape).to_event(len\n (event_shape))\n mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)\n if mask_shape == ():\n mask = bool(mask)\n samples = jax_dist.sample(random.PRNGKey(1))\n actual = jax_dist.mask(mask).log_prob(samples)\n assert_allclose(actual != 0, jnp.broadcast_to(mask, lax.\n broadcast_shapes(batch_shape, mask_shape)))\n\n\n@pytest.mark.parametrize('event_shape', [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(\n event_shape))))\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)\n ]\n return log_prob.sum()\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_dist_pytree(jax_dist, sp_dist, params):\n\n def f(x):\n return jax_dist(*params)\n if jax_dist is _ImproperWrapper:\n pytest.skip('Cannot flattening ImproperUniform')\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\"EulerMaruyama doesn't define flatten/unflatten\")\n jax.jit(f)(0)\n lax.map(f, np.ones(3))\n expected_dist = f(0)\n actual_dist = jax.jit(f)(0)\n expected_sample = expected_dist.sample(random.PRNGKey(0))\n actual_sample = actual_dist.sample(random.PRNGKey(0))\n expected_log_prob = expected_dist.log_prob(expected_sample)\n actual_log_prob = actual_dist.log_prob(actual_sample)\n assert_allclose(actual_sample, expected_sample, rtol=1e-06)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-06)\n\n\n<mask token>\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)\n@pytest.mark.parametrize('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\n@pytest.mark.parametrize('batch_shape', [(), (4,), (2, 3)], ids=str)\n@pytest.mark.parametrize('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_expanded_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\n@pytest.mark.parametrize('shape', [(), (4,), (2, 3)], ids=str)\n@pytest.mark.parametrize('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.\n Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.\n Normal), (dist.Weibull, dist.Gamma)])\ndef test_kl_univariate(shape, p_dist, q_dist):\n\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f'Missing pattern for param {k}.')\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize('shape', [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\n<mask token>\n\n\ndef _allclose_or_equal(a1, a2):\n if isinstance(a1, np.ndarray):\n return np.allclose(a2, a1)\n elif isinstance(a1, jnp.ndarray):\n return jnp.allclose(a2, a1)\n elif isinstance(a1, csr_matrix):\n return np.allclose(a2.todense(), a1.todense())\n else:\n return a2 == a1 or a2 is a1\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\n@pytest.mark.parametrize('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[:len(params)]\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n d = make_jax_dist(*params)\n if isinstance(d, _SparseCAR) and d.is_sparse:\n return\n in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for\n i in range(len(params))), 0), *(([(0 if i == idx else None) for i in\n range(len(params))], 0) for idx in vmappable_param_idxs if params[\n idx] is not None), *(([(0 if i == idx else None) for i in range(len\n (params))], vmap_over(d, **{param_names[idx]: 0})) for idx in\n vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==\n idx else None) for i in range(len(params))], vmap_over(d, **{\n param_names[idx]: 1})) for idx in vmappable_param_idxs if \n isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).\n ndim > 0 and jax_dist is not _GeneralMixture)]\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),\n arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,\n in_axes)]\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes\n )(*batched_params)\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d)\n assert eq == jnp.array([True])\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10\n ).sample(key)\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-06)\n\n\ndef test_normal_log_cdf():\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)\n\n\n@pytest.mark.parametrize('value', [-15.0, jnp.array([[-15.0], [-10.0], [-\n 5.0]]), jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]])]\n )\ndef test_truncated_normal_log_prob_in_tail(value):\n loc = 1.35\n scale = jnp.geomspace(0.01, 1, 10)\n low, high = -20, -1.0\n a, b = (low - loc) / scale, (high - loc) / scale\n numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high\n ).log_prob(value)\n jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)\n assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)\n\n\ndef test_sample_truncated_normal_in_tail():\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))\n assert ~jnp.isinf(samples).any()\n\n\n@jax.enable_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-5": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom collections import namedtuple\nfrom functools import partial\nimport inspect\nfrom itertools import product\nimport math\nimport os\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_array_equal\nimport pytest\nimport scipy\nfrom scipy.sparse import csr_matrix\nimport scipy.stats as osp\n\nimport jax\nfrom jax import grad, lax, vmap\nimport jax.numpy as jnp\nimport jax.random as random\nfrom jax.scipy.special import expit, logsumexp\nfrom jax.scipy.stats import norm as jax_norm, truncnorm as jax_truncnorm\n\nimport numpyro.distributions as dist\nfrom numpyro.distributions import (\n SineBivariateVonMises,\n constraints,\n kl_divergence,\n transforms,\n)\nfrom numpyro.distributions.batch_util import vmap_over\nfrom numpyro.distributions.discrete import _to_probs_bernoulli, _to_probs_multinom\nfrom numpyro.distributions.flows import InverseAutoregressiveTransform\nfrom numpyro.distributions.gof import InvalidTest, auto_goodness_of_fit\nfrom numpyro.distributions.transforms import (\n LowerCholeskyAffine,\n PermuteTransform,\n PowerTransform,\n SimplexToOrderedTransform,\n SoftplusTransform,\n biject_to,\n)\nfrom numpyro.distributions.util import (\n matrix_to_tril_vec,\n multinomial,\n signed_stick_breaking_tril,\n sum_rightmost,\n vec_to_tril_matrix,\n)\nfrom numpyro.nn import AutoregressiveNN\n\nTEST_FAILURE_RATE = 2e-5 # For all goodness-of-fit tests.\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = (*ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1])\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\ndef _circ_mean(angles):\n return jnp.arctan2(\n jnp.mean(jnp.sin(angles), axis=0), jnp.mean(jnp.cos(angles), axis=0)\n )\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\ndef sde_fn2(xy, _):\n tau, a = 2.0, 1.1\n x, y = xy[0], xy[1]\n dx = tau * (x - x**3.0 / 3.0 + y)\n dy = (1.0 / tau) * (a - x)\n dxy = jnp.vstack([dx, dy]).reshape(xy.shape)\n\n sigma2 = 0.1\n return dxy, sigma2\n\n\nclass T(namedtuple(\"TestCase\", [\"jax_dist\", \"sp_dist\", \"params\"])):\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\ndef _mvn_to_scipy(loc, cov, prec, tril):\n jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < \"1.6.0\":\n pytest.skip(\n \"Multivariate Student-T distribution is not available in scipy < 1.6\"\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\ndef _truncnorm_to_scipy(loc, scale, low, high):\n if low is None:\n a = -np.inf\n else:\n a = (low - loc) / scale\n if high is None:\n b = np.inf\n else:\n b = (high - loc) / scale\n return osp.truncnorm(a, b, loc=loc, scale=scale)\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\ndef _TruncatedCauchy(loc, scale, low, high):\n return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)\n\n\n_TruncatedNormal.arg_constraints = {}\n_TruncatedNormal.reparametrized_params = []\n_TruncatedNormal.infer_shapes = lambda *args: (lax.broadcast_shapes(*args), ())\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n def __init__(self, skewness, **kwargs):\n lower, upper = (np.array([-math.pi, -math.pi]), np.array([math.pi, math.pi]))\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = (np.array([0.0]), np.array([1.0]))\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises(self: SineSkewedVonMises, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = (np.array([0.0, -1.234]), np.array([1.0, 10.0]))\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(\n self: SineSkewedVonMisesBatched, skewness=None\n):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(\n mixing_distribution=mixing_distribution,\n component_distribution=component_dist,\n )\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(\n self.component_distribution, loc=loc, scale=scale\n )\n return vmap_over.dispatch(dist.MixtureSameFamily)(\n self, _component_distribution=component_distribution\n )\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(\n loc=loc, covariance_matrix=covariance_matrix\n )\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(\n mixing_distribution=mixing_distribution,\n component_distribution=component_dist,\n )\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(\n self, _component_distribution=component_distribution\n )\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [\n dist.Normal(loc=loc_, scale=scale_) for loc_, scale_ in zip(locs, scales)\n ]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(\n mixing_distribution=mixing_distribution,\n component_distributions=component_dists,\n )\n\n @property\n def locs(self):\n # hotfix for vmapping tests, which cannot easily check non-array attributes\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [\n vmap_over(d, loc=locs, scale=scales) for d in self.component_distributions\n ]\n return vmap_over.dispatch(dist.MixtureGeneral)(\n self, _component_distributions=component_distributions\n )\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [\n dist.MultivariateNormal(loc=loc_, covariance_matrix=covariance_matrix)\n for loc_, covariance_matrix in zip(locs, covariance_matrices)\n ]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(\n mixing_distribution=mixing_distribution,\n component_distributions=component_dists,\n )\n\n @property\n def locs(self):\n # hotfix for vmapping tests, which cannot easily check non-array attributes\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [\n vmap_over(d, loc=locs) for d in self.component_distributions\n ]\n return vmap_over.dispatch(dist.MixtureGeneral)(\n self, _component_distributions=component_distributions\n )\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {\"rate\": constraints.positive, \"gate_logits\": constraints.real}\n pytree_data_fields = (\"rate\",)\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_zero_inflated_poisson_logits(\n self: ZeroInflatedPoissonLogits, rate=None, gate_logits=None\n):\n dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(\n self,\n base_dist=vmap_over(self.base_dist, rate=rate),\n gate_logits=gate_logits,\n gate=gate_logits,\n )\n dist_axes.rate = rate\n return dist_axes\n\n\nclass SparsePoisson(dist.Poisson):\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {\"loc\": constraints.real, \"scale\": constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: \"FoldedNormal\", loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(\n self, base_dist=vmap_over(self.base_dist, loc=loc, scale=scale)\n )\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = [\"loc\", \"correlation\", \"conditional_precision\"]\n\n def __init__(\n self,\n loc,\n correlation,\n conditional_precision,\n adj_matrix,\n *,\n is_sparse=True,\n validate_args=None,\n ):\n super().__init__(\n loc,\n correlation,\n conditional_precision,\n adj_matrix,\n is_sparse=True,\n validate_args=validate_args,\n )\n\n\n_DIST_MAP = {\n dist.AsymmetricLaplace: lambda loc, scale, asymmetry: osp.laplace_asymmetric(\n asymmetry, loc=loc, scale=scale\n ),\n dist.BernoulliProbs: lambda probs: osp.bernoulli(p=probs),\n dist.BernoulliLogits: lambda logits: osp.bernoulli(p=_to_probs_bernoulli(logits)),\n dist.Beta: lambda con1, con0: osp.beta(con1, con0),\n dist.BetaProportion: lambda mu, kappa: osp.beta(mu * kappa, (1 - mu) * kappa),\n dist.BinomialProbs: lambda probs, total_count: osp.binom(n=total_count, p=probs),\n dist.BinomialLogits: lambda logits, total_count: osp.binom(\n n=total_count, p=_to_probs_bernoulli(logits)\n ),\n dist.Cauchy: lambda loc, scale: osp.cauchy(loc=loc, scale=scale),\n dist.Chi2: lambda df: osp.chi2(df),\n dist.Dirichlet: lambda conc: osp.dirichlet(conc),\n dist.Exponential: lambda rate: osp.expon(scale=jnp.reciprocal(rate)),\n dist.Gamma: lambda conc, rate: osp.gamma(conc, scale=1.0 / rate),\n dist.GeometricProbs: lambda probs: osp.geom(p=probs, loc=-1),\n dist.GeometricLogits: lambda logits: osp.geom(\n p=_to_probs_bernoulli(logits), loc=-1\n ),\n dist.Gumbel: lambda loc, scale: osp.gumbel_r(loc=loc, scale=scale),\n dist.HalfCauchy: lambda scale: osp.halfcauchy(scale=scale),\n dist.HalfNormal: lambda scale: osp.halfnorm(scale=scale),\n dist.InverseGamma: lambda conc, rate: osp.invgamma(conc, scale=rate),\n dist.Laplace: lambda loc, scale: osp.laplace(loc=loc, scale=scale),\n dist.LogNormal: lambda loc, scale: osp.lognorm(s=scale, scale=jnp.exp(loc)),\n dist.LogUniform: lambda a, b: osp.loguniform(a, b),\n dist.MultinomialProbs: lambda probs, total_count: osp.multinomial(\n n=total_count, p=probs\n ),\n dist.MultinomialLogits: lambda logits, total_count: osp.multinomial(\n n=total_count, p=_to_probs_multinom(logits)\n ),\n dist.MultivariateNormal: _mvn_to_scipy,\n dist.MultivariateStudentT: _multivariate_t_to_scipy,\n dist.LowRankMultivariateNormal: _lowrank_mvn_to_scipy,\n dist.Normal: lambda loc, scale: osp.norm(loc=loc, scale=scale),\n dist.Pareto: lambda scale, alpha: osp.pareto(alpha, scale=scale),\n dist.Poisson: lambda rate: osp.poisson(rate),\n dist.StudentT: lambda df, loc, scale: osp.t(df=df, loc=loc, scale=scale),\n dist.Uniform: lambda a, b: osp.uniform(a, b - a),\n dist.Logistic: lambda loc, scale: osp.logistic(loc=loc, scale=scale),\n dist.VonMises: lambda loc, conc: osp.vonmises(\n loc=np.array(loc, dtype=np.float64), kappa=np.array(conc, dtype=np.float64)\n ),\n dist.Weibull: lambda scale, conc: osp.weibull_min(\n c=conc,\n scale=scale,\n ),\n _TruncatedNormal: _truncnorm_to_scipy,\n}\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\nCONTINUOUS = [\n T(dist.AsymmetricLaplace, 1.0, 0.5, 1.0),\n T(dist.AsymmetricLaplace, np.array([1.0, 2.0]), 2.0, 2.0),\n T(dist.AsymmetricLaplace, np.array([[1.0], [2.0]]), 2.0, np.array([3.0, 5.0])),\n T(dist.AsymmetricLaplaceQuantile, 0.0, 1.0, 0.5),\n T(dist.AsymmetricLaplaceQuantile, np.array([1.0, 2.0]), 2.0, 0.7),\n T(\n dist.AsymmetricLaplaceQuantile,\n np.array([[1.0], [2.0]]),\n 2.0,\n np.array([0.2, 0.8]),\n ),\n T(dist.Beta, 0.2, 1.1),\n T(dist.Beta, 1.0, np.array([2.0, 2.0])),\n T(dist.Beta, 1.0, np.array([[1.0, 1.0], [2.0, 2.0]])),\n T(dist.BetaProportion, 0.2, 10.0),\n T(dist.BetaProportion, 0.51, np.array([2.0, 1.0])),\n T(dist.BetaProportion, 0.5, np.array([[4.0, 4.0], [2.0, 2.0]])),\n T(dist.Chi2, 2.0),\n T(dist.Chi2, np.array([0.3, 1.3])),\n T(dist.Cauchy, 0.0, 1.0),\n T(dist.Cauchy, 0.0, np.array([1.0, 2.0])),\n T(dist.Cauchy, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),\n T(dist.Dirichlet, np.array([1.7])),\n T(dist.Dirichlet, np.array([0.2, 1.1])),\n T(dist.Dirichlet, np.array([[0.2, 1.1], [2.0, 2.0]])),\n T(\n dist.EulerMaruyama,\n np.array([0.0, 0.1, 0.2]),\n sde_fn1,\n dist.Normal(0.1, 1.0),\n ),\n T(\n dist.EulerMaruyama,\n np.array([0.0, 0.1, 0.2]),\n sde_fn2,\n dist.Normal(jnp.array([0.0, 1.0]), 1e-3).to_event(1),\n ),\n T(\n dist.EulerMaruyama,\n np.array([[0.0, 0.1, 0.2], [10.0, 10.1, 10.2]]),\n sde_fn2,\n dist.Normal(jnp.array([0.0, 1.0]), 1e-3).to_event(1),\n ),\n T(\n dist.EulerMaruyama,\n np.array([[0.0, 0.1, 0.2], [10.0, 10.1, 10.2]]),\n sde_fn2,\n dist.Normal(jnp.array([[0.0, 1.0], [2.0, 3.0]]), 1e-2).to_event(1),\n ),\n T(dist.Exponential, 2.0),\n T(dist.Exponential, np.array([4.0, 2.0])),\n T(dist.Gamma, np.array([1.7]), np.array([[2.0], [3.0]])),\n T(dist.Gamma, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),\n T(dist.GaussianRandomWalk, 0.1, 10),\n T(dist.GaussianRandomWalk, np.array([0.1, 0.3, 0.25]), 10),\n T(\n dist.GaussianCopulaBeta,\n np.array([7.0, 2.0]),\n np.array([4.0, 10.0]),\n np.array([[1.0, 0.75], [0.75, 1.0]]),\n ),\n T(dist.GaussianCopulaBeta, 2.0, 1.5, np.eye(3)),\n T(dist.GaussianCopulaBeta, 2.0, 1.5, np.full((5, 3, 3), np.eye(3))),\n T(dist.Gompertz, np.array([1.7]), np.array([[2.0], [3.0]])),\n T(dist.Gompertz, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),\n T(dist.Gumbel, 0.0, 1.0),\n T(dist.Gumbel, 0.5, 2.0),\n T(dist.Gumbel, np.array([0.0, 0.5]), np.array([1.0, 2.0])),\n T(FoldedNormal, 2.0, 4.0),\n T(FoldedNormal, np.array([2.0, 50.0]), np.array([4.0, 100.0])),\n T(dist.HalfCauchy, 1.0),\n T(dist.HalfCauchy, np.array([1.0, 2.0])),\n T(dist.HalfNormal, 1.0),\n T(dist.HalfNormal, np.array([1.0, 2.0])),\n T(_ImproperWrapper, constraints.positive, (), (3,)),\n T(dist.InverseGamma, np.array([1.7]), np.array([[2.0], [3.0]])),\n T(dist.InverseGamma, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),\n T(dist.Kumaraswamy, 10.0, np.array([2.0, 3.0])),\n T(dist.Kumaraswamy, np.array([1.7]), np.array([[2.0], [3.0]])),\n T(dist.Kumaraswamy, 0.6, 0.5),\n T(dist.Laplace, 0.0, 1.0),\n T(dist.Laplace, 0.5, np.array([1.0, 2.5])),\n T(dist.Laplace, np.array([1.0, -0.5]), np.array([2.3, 3.0])),\n T(dist.LKJ, 2, 0.5, \"onion\"),\n T(dist.LKJ, 5, np.array([0.5, 1.0, 2.0]), \"cvine\"),\n T(dist.LKJCholesky, 2, 0.5, \"onion\"),\n T(dist.LKJCholesky, 2, 0.5, \"cvine\"),\n T(dist.LKJCholesky, 5, np.array([0.5, 1.0, 2.0]), \"onion\"),\n pytest.param(\n *T(dist.LKJCholesky, 5, np.array([0.5, 1.0, 2.0]), \"cvine\"),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n pytest.param(\n *T(dist.LKJCholesky, 3, np.array([[3.0, 0.6], [0.2, 5.0]]), \"onion\"),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n T(dist.LKJCholesky, 3, np.array([[3.0, 0.6], [0.2, 5.0]]), \"cvine\"),\n T(dist.Logistic, 0.0, 1.0),\n T(dist.Logistic, 1.0, np.array([1.0, 2.0])),\n T(dist.Logistic, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),\n T(dist.LogNormal, 1.0, 0.2),\n T(dist.LogNormal, -1.0, np.array([0.5, 1.3])),\n T(dist.LogNormal, np.array([0.5, -0.7]), np.array([[0.1, 0.4], [0.5, 0.1]])),\n T(dist.LogUniform, 1.0, 2.0),\n T(dist.LogUniform, 1.0, np.array([2.0, 3.0])),\n T(dist.LogUniform, np.array([1.0, 2.0]), np.array([[3.0], [4.0]])),\n T(\n dist.MatrixNormal,\n 1.0 * np.arange(6).reshape(3, 2),\n np.array([[1.0, 0, 0], [0.3, 0.36, 0], [0.4, 0.49, 4]]),\n np.array([[1.0, 0], [0.4, 1]]),\n ),\n T(\n dist.MatrixNormal,\n 1.0 * np.arange(12).reshape((2, 3, 2)),\n np.array([[1.0, 0, 0], [0.3, 0.36, 0], [0.4, 0.49, 4]]) * np.ones((2, 3, 3)),\n np.array([[1.0, 0], [0.4, 0.5]]) * np.ones((2, 2, 2)),\n ),\n T(\n dist.MatrixNormal,\n 1.0 * np.arange(36).reshape((2, 3, 3, 2)),\n np.identity(3),\n np.identity(2),\n ),\n T(dist.MultivariateNormal, 0.0, np.array([[1.0, 0.5], [0.5, 1.0]]), None, None),\n T(\n dist.MultivariateNormal,\n np.array([1.0, 3.0]),\n None,\n np.array([[1.0, 0.5], [0.5, 1.0]]),\n None,\n ),\n T(\n dist.MultivariateNormal,\n np.array([1.0, 3.0]),\n None,\n np.array([[[1.0, 0.5], [0.5, 1.0]]]),\n None,\n ),\n T(\n dist.MultivariateNormal,\n np.array([2.0]),\n None,\n None,\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateNormal,\n np.arange(6, dtype=np.float32).reshape((3, 2)),\n None,\n None,\n np.array([[1.0, 0.0], [0.0, 1.0]]),\n ),\n T(\n dist.MultivariateNormal,\n 0.0,\n None,\n np.broadcast_to(np.identity(3), (2, 3, 3)),\n None,\n ),\n T(\n dist.CAR,\n 1.2,\n np.array([-0.2, 0.3]),\n 0.1,\n np.array(\n [\n [0.0, 1.0, 1.0, 0.0],\n [1.0, 0.0, 0.0, 1.0],\n [1.0, 0.0, 0.0, 1.0],\n [0.0, 1.0, 1.0, 0.0],\n ]\n ),\n ),\n T(\n dist.CAR,\n np.array([0.0, 1.0, 3.0, 4.0]),\n 0.1,\n np.array([0.3, 0.7]),\n np.array(\n [\n [0.0, 1.0, 1.0, 0.0],\n [1.0, 0.0, 0.0, 1.0],\n [1.0, 0.0, 0.0, 1.0],\n [0.0, 1.0, 1.0, 0.0],\n ]\n ),\n ),\n T(\n _SparseCAR,\n np.array([[0.0, 1.0, 3.0, 4.0], [2.0, -1.0, -3.0, 2.0]]),\n 0.0,\n 0.1,\n np.array(\n [\n [0.0, 1.0, 1.0, 0.0],\n [1.0, 0.0, 0.0, 1.0],\n [1.0, 0.0, 0.0, 1.0],\n [0.0, 1.0, 1.0, 0.0],\n ]\n ),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n 0.0,\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.array([1.0, 3.0]),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.array([1.0, 3.0]),\n np.array([[[1.0, 0.0], [0.5, 1.0]]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.array([3.0]),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.arange(6, dtype=np.float32).reshape((3, 2)),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.ones(3),\n np.broadcast_to(np.identity(3), (2, 3, 3)),\n ),\n T(\n dist.MultivariateStudentT,\n np.array(7.0),\n np.array([1.0, 3.0]),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n np.arange(20, 22, dtype=jnp.float32),\n np.ones(3),\n np.broadcast_to(jnp.identity(3), (2, 3, 3)),\n ),\n T(\n dist.MultivariateStudentT,\n np.arange(20, 26, dtype=jnp.float32).reshape((3, 2)),\n np.ones(2),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.LowRankMultivariateNormal,\n np.zeros(2),\n np.array([[1.0], [0.0]]),\n np.array([1.0, 1.0]),\n ),\n T(\n dist.LowRankMultivariateNormal,\n np.arange(6, dtype=jnp.float32).reshape((2, 3)),\n np.arange(6, dtype=jnp.float32).reshape((3, 2)),\n np.array([1.0, 2.0, 3.0]),\n ),\n T(dist.Normal, 0.0, 1.0),\n T(dist.Normal, 1.0, np.array([1.0, 2.0])),\n T(dist.Normal, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),\n T(dist.Pareto, 1.0, 2.0),\n T(dist.Pareto, np.array([1.0, 0.5]), np.array([0.3, 2.0])),\n T(dist.Pareto, np.array([[1.0], [3.0]]), np.array([1.0, 0.5])),\n T(dist.RelaxedBernoulliLogits, 2.0, -10.0),\n T(dist.RelaxedBernoulliLogits, np.array([1.0, 3.0]), np.array([3.0, 8.0])),\n T(dist.SoftLaplace, 1.0, 1.0),\n T(dist.SoftLaplace, np.array([-1.0, 50.0]), np.array([4.0, 100.0])),\n T(dist.StudentT, 1.0, 1.0, 0.5),\n T(dist.StudentT, 2.0, np.array([1.0, 2.0]), 2.0),\n T(dist.StudentT, np.array([3.0, 5.0]), np.array([[1.0], [2.0]]), 2.0),\n T(_TruncatedCauchy, 0.0, 1.0, -1.0, None),\n T(_TruncatedCauchy, 0.0, np.array([1.0, 2.0]), 1.0, None),\n T(\n _TruncatedCauchy,\n np.array([0.0, 1.0]),\n np.array([[1.0], [2.0]]),\n np.array([-2.0, 2.0]),\n None,\n ),\n T(_TruncatedCauchy, 0.0, 1.0, None, 1.0),\n T(_TruncatedCauchy, 0.0, 1.0, -1.0, 1.0),\n T(_TruncatedNormal, 0.0, 1.0, -1.0, None),\n T(_TruncatedNormal, -1.0, np.array([1.0, 2.0]), 1.0, None),\n T(\n _TruncatedNormal,\n np.array([0.0, 1.0]),\n np.array([[1.0], [2.0]]),\n np.array([-2.0, 2.0]),\n None,\n ),\n T(_TruncatedNormal, -1.0, 2.0, 1.0, 5.0),\n T(_TruncatedNormal, np.array([-1.0, 4.0]), 2.0, None, 5.0),\n T(_TruncatedNormal, -1.0, np.array([2.0, 3.0]), 1.0, None),\n T(_TruncatedNormal, -1.0, 2.0, np.array([-6.0, 4.0]), np.array([-4.0, 6.0])),\n T(\n _TruncatedNormal,\n np.array([0.0, 1.0]),\n np.array([[1.0], [2.0]]),\n None,\n np.array([-2.0, 2.0]),\n ),\n T(dist.TwoSidedTruncatedDistribution, dist.Laplace(0.0, 1.0), -2.0, 3.0),\n T(dist.Uniform, 0.0, 2.0),\n T(dist.Uniform, 1.0, np.array([2.0, 3.0])),\n T(dist.Uniform, np.array([0.0, 0.0]), np.array([[2.0], [3.0]])),\n T(dist.Weibull, 0.2, 1.1),\n T(dist.Weibull, 2.8, np.array([2.0, 2.0])),\n T(dist.Weibull, 1.8, np.array([[1.0, 1.0], [2.0, 2.0]])),\n T(\n _GaussianMixture,\n np.ones(3) / 3.0,\n np.array([0.0, 7.7, 2.1]),\n np.array([4.2, 7.7, 2.1]),\n ),\n T(\n _Gaussian2DMixture,\n np.array([0.2, 0.5, 0.3]),\n np.array([[-1.2, 1.5], [2.0, 2.0], [-1, 4.0]]), # Mean\n np.array(\n [\n [\n [0.1, -0.2],\n [-0.2, 1.0],\n ],\n [\n [0.75, 0.0],\n [0.0, 0.75],\n ],\n [\n [1.0, 0.5],\n [0.5, 0.27],\n ],\n ]\n ), # Covariance\n ),\n T(\n _GeneralMixture,\n np.array([0.2, 0.3, 0.5]),\n np.array([0.0, 7.7, 2.1]),\n np.array([4.2, 1.7, 2.1]),\n ),\n T(\n _General2DMixture,\n np.array([0.2, 0.5, 0.3]),\n np.array([[-1.2, 1.5], [2.0, 2.0], [-1, 4.0]]), # Mean\n np.array(\n [\n [\n [0.1, -0.2],\n [-0.2, 1.0],\n ],\n [\n [0.75, 0.0],\n [0.0, 0.75],\n ],\n [\n [1.0, 0.5],\n [0.5, 0.27],\n ],\n ]\n ), # Covariance\n ),\n]\n\nDIRECTIONAL = [\n T(dist.VonMises, 2.0, 10.0),\n T(dist.VonMises, 2.0, np.array([150.0, 10.0])),\n T(dist.VonMises, np.array([1 / 3 * np.pi, -1.0]), np.array([20.0, 30.0])),\n pytest.param(\n *T(\n dist.SineBivariateVonMises,\n 0.0,\n 0.0,\n 5.0,\n 6.0,\n 2.0,\n ),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n T(\n dist.SineBivariateVonMises,\n 3.003,\n -1.343,\n 5.0,\n 6.0,\n 2.0,\n ),\n pytest.param(\n *T(\n dist.SineBivariateVonMises,\n -1.232,\n -1.3430,\n 3.4,\n 2.0,\n 1.0,\n ),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n pytest.param(\n *T(\n dist.SineBivariateVonMises,\n np.array([math.pi - 0.2, 1.0]),\n np.array([0.0, 1.0]),\n np.array([5.0, 5.0]),\n np.array([7.0, 0.5]),\n None,\n np.array([0.5, 0.1]),\n ),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n T(dist.ProjectedNormal, np.array([0.0, 0.0])),\n T(dist.ProjectedNormal, np.array([[2.0, 3.0]])),\n T(dist.ProjectedNormal, np.array([0.0, 0.0, 0.0])),\n T(dist.ProjectedNormal, np.array([[-1.0, 2.0, 3.0]])),\n T(SineSkewedUniform, np.array([-math.pi / 4, 0.1])),\n T(SineSkewedVonMises, np.array([0.342355])),\n T(SineSkewedVonMisesBatched, np.array([[0.342355, -0.0001], [0.91, 0.09]])),\n]\n\nDISCRETE = [\n T(dist.BetaBinomial, 2.0, 5.0, 10),\n T(\n dist.BetaBinomial,\n np.array([2.0, 4.0]),\n np.array([5.0, 3.0]),\n np.array([10, 12]),\n ),\n T(dist.BernoulliProbs, 0.2),\n T(dist.BernoulliProbs, np.array([0.2, 0.7])),\n T(dist.BernoulliLogits, np.array([-1.0, 3.0])),\n T(dist.BinomialProbs, np.array([0.2, 0.7]), np.array([10, 2])),\n T(dist.BinomialProbs, np.array([0.2, 0.7]), np.array([5, 8])),\n T(dist.BinomialLogits, np.array([-1.0, 3.0]), np.array([5, 8])),\n T(dist.CategoricalProbs, np.array([1.0])),\n T(dist.CategoricalProbs, np.array([0.1, 0.5, 0.4])),\n T(dist.CategoricalProbs, np.array([[0.1, 0.5, 0.4], [0.4, 0.4, 0.2]])),\n T(dist.CategoricalLogits, np.array([-5.0])),\n T(dist.CategoricalLogits, np.array([1.0, 2.0, -2.0])),\n T(dist.CategoricalLogits, np.array([[-1, 2.0, 3.0], [3.0, -4.0, -2.0]])),\n T(dist.Delta, 1),\n T(dist.Delta, np.array([0.0, 2.0])),\n T(dist.Delta, np.array([0.0, 2.0]), np.array([-2.0, -4.0])),\n T(dist.DirichletMultinomial, np.array([1.0, 2.0, 3.9]), 10),\n T(dist.DirichletMultinomial, np.array([0.2, 0.7, 1.1]), np.array([5, 5])),\n T(dist.GammaPoisson, 2.0, 2.0),\n T(dist.GammaPoisson, np.array([6.0, 2]), np.array([2.0, 8.0])),\n T(dist.GeometricProbs, 0.2),\n T(dist.GeometricProbs, np.array([0.2, 0.7])),\n T(dist.GeometricLogits, np.array([-1.0, 3.0])),\n T(dist.MultinomialProbs, np.array([0.2, 0.7, 0.1]), 10),\n T(dist.MultinomialProbs, np.array([0.2, 0.7, 0.1]), np.array([5, 8])),\n T(dist.MultinomialLogits, np.array([-1.0, 3.0]), np.array([[5], [8]])),\n T(dist.NegativeBinomialProbs, 10, 0.2),\n T(dist.NegativeBinomialProbs, 10, np.array([0.2, 0.6])),\n T(dist.NegativeBinomialProbs, np.array([4.2, 10.7, 2.1]), 0.2),\n T(\n dist.NegativeBinomialProbs,\n np.array([4.2, 10.7, 2.1]),\n np.array([0.2, 0.6, 0.5]),\n ),\n T(dist.NegativeBinomialLogits, 10, -2.1),\n T(dist.NegativeBinomialLogits, 10, np.array([-5.2, 2.1])),\n T(dist.NegativeBinomialLogits, np.array([4.2, 10.7, 2.1]), -5.2),\n T(\n dist.NegativeBinomialLogits,\n np.array([4.2, 7.7, 2.1]),\n np.array([4.2, 0.7, 2.1]),\n ),\n T(dist.NegativeBinomial2, 0.3, 10),\n T(dist.NegativeBinomial2, np.array([10.2, 7, 31]), 10),\n T(dist.NegativeBinomial2, np.array([10.2, 7, 31]), np.array([10.2, 20.7, 2.1])),\n T(dist.OrderedLogistic, -2, np.array([-10.0, 4.0, 9.0])),\n T(dist.OrderedLogistic, np.array([-4, 3, 4, 5]), np.array([-1.5])),\n T(dist.DiscreteUniform, -2, np.array([-1.0, 4.0, 9.0])),\n T(dist.DiscreteUniform, np.array([-4, 3, 4, 5]), np.array([6])),\n T(dist.Poisson, 2.0),\n T(dist.Poisson, np.array([2.0, 3.0, 5.0])),\n T(SparsePoisson, 2.0),\n T(SparsePoisson, np.array([2.0, 3.0, 5.0])),\n T(SparsePoisson, 2),\n T(dist.ZeroInflatedPoisson, 0.6, 2.0),\n T(dist.ZeroInflatedPoisson, np.array([0.2, 0.7, 0.3]), np.array([2.0, 3.0, 5.0])),\n T(ZeroInflatedPoissonLogits, 2.0, 3.0),\n T(\n ZeroInflatedPoissonLogits,\n np.array([0.2, 4.0, 0.3]),\n np.array([2.0, -3.0, 5.0]),\n ),\n]\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\ndef gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):\n eps = 1e-6\n\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size)\n elif isinstance(constraint, constraints.greater_than):\n return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.randint(key, size, lower_bound, upper_bound + 1)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound + random.poisson(key, np.array(5), shape=size)\n elif isinstance(constraint, constraints.interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=lower_bound, maxval=upper_bound)\n elif constraint in (constraints.real, constraints.real_vector):\n return random.normal(key, size)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(\n key, p=jnp.ones((n,)) / n, n=constraint.upper_bound, shape=size[:-1]\n )\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(\n random.uniform(\n key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1\n )\n )\n elif constraint is constraints.corr_matrix:\n cholesky = signed_stick_breaking_tril(\n random.uniform(\n key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1\n )\n )\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return jnp.tril(random.uniform(key, size))\n elif constraint is constraints.positive_definite:\n x = random.normal(key, size)\n return jnp.matmul(x, jnp.swapaxes(x, -2, -1))\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x - random.normal(key, size[:-1] + (1,))\n elif isinstance(constraint, constraints.independent):\n return gen_values_within_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n return x / jnp.linalg.norm(x, axis=-1)\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [0, (-1) ** sign * 0.5]\n return random.uniform(key, size, float, *sorted(bounds))\n\n else:\n raise NotImplementedError(\"{} not implemented.\".format(constraint))\n\n\ndef gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size) - 2\n elif isinstance(constraint, constraints.greater_than):\n return constraint.lower_bound - jnp.exp(random.normal(key, size))\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n return random.randint(key, size, lower_bound - 1, lower_bound)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound - random.poisson(key, np.array(5), shape=size)\n elif isinstance(constraint, constraints.interval):\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=upper_bound, maxval=upper_bound + 1.0)\n elif constraint in [constraints.real, constraints.real_vector]:\n return lax.full(size, np.nan)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]) + 1e-2\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return (\n multinomial(\n key, p=jnp.ones((n,)) / n, n=constraint.upper_bound, shape=size[:-1]\n )\n + 1\n )\n elif constraint is constraints.corr_cholesky:\n return (\n signed_stick_breaking_tril(\n random.uniform(\n key,\n size[:-2] + (size[-1] * (size[-1] - 1) // 2,),\n minval=-1,\n maxval=1,\n )\n )\n + 1e-2\n )\n elif constraint is constraints.corr_matrix:\n cholesky = 1e-2 + signed_stick_breaking_tril(\n random.uniform(\n key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1\n )\n )\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return random.uniform(key, size)\n elif constraint is constraints.positive_definite:\n return random.normal(key, size)\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x[..., ::-1]\n elif isinstance(constraint, constraints.independent):\n return gen_values_outside_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)\n return 2 * x\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError(\"{} not implemented.\".format(constraint))\n\n\n@pytest.mark.parametrize(\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\n@pytest.mark.parametrize(\"prepend_shape\", [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = prepend_shape + jax_dist.batch_shape + jax_dist.event_shape\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if (\n sp_dist\n and not _is_batched_multivariate(jax_dist)\n and not isinstance(jax_dist, dist.MultivariateStudentT)\n ):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif (\n sp_dist\n and not _is_batched_multivariate(jax_dist)\n and isinstance(jax_dist, dist.MultivariateStudentT)\n ):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = (1) if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(\n jax_dist.precision_matrix,\n jnp.linalg.inv(jax_dist.covariance_matrix),\n rtol=1e-6,\n )\n\n\n@pytest.mark.parametrize(\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, \"shape\", ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist).infer_shapes(\n *shapes\n )\n except NotImplementedError:\n pytest.skip(f\"{type(jax_dist).__name__}.infer_shapes() is not implemented\")\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\n@pytest.mark.parametrize(\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\n@pytest.mark.parametrize(\"batch_shape\", [(), (4,), (3, 2)])\ndef test_unit(batch_shape):\n log_factor = random.normal(random.PRNGKey(0), batch_shape)\n d = dist.Unit(log_factor=log_factor)\n x = d.sample(random.PRNGKey(1))\n assert x.shape == batch_shape + (0,)\n assert (d.log_prob(x) == log_factor).all()\n\n\n@pytest.mark.parametrize(\"jax_dist, sp_dist, params\", CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n # we have pathwise gradient for gamma sampler\n gamma_derived_params = {\n \"Gamma\": [\"concentration\"],\n \"Beta\": [\"concentration1\", \"concentration0\"],\n \"BetaProportion\": [\"mean\", \"concentration\"],\n \"Chi2\": [\"df\"],\n \"Dirichlet\": [\"concentration\"],\n \"InverseGamma\": [\"concentration\"],\n \"LKJ\": [\"concentration\"],\n \"LKJCholesky\": [\"concentration\"],\n \"StudentT\": [\"df\"],\n }.get(jax_dist.__name__, [])\n\n dist_args = [\n p\n for p in (\n inspect.getfullargspec(jax_dist.__init__)[0][1:]\n if inspect.isclass(jax_dist)\n # account the the case jax_dist is a function\n else inspect.getfullargspec(jax_dist)[0]\n )\n ]\n params_dict = dict(zip(dist_args[: len(params)], params))\n\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [\n p for p in jax_class.reparametrized_params if p not in gamma_derived_params\n ]\n if not reparametrized_params:\n pytest.skip(\"{} not reparametrized.\".format(jax_class.__name__))\n\n nonrepara_params_dict = {\n k: v for k, v in params_dict.items() if k not in reparametrized_params\n }\n repara_params = tuple(\n v for k, v in params_dict.items() if k in reparametrized_params\n )\n\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(\n jax_dist(**args_dict, **nonrepara_params_dict).sample(key=rng_key)\n )\n\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n\n eps = 1e-3\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [p if j != i else p - eps for j, p in enumerate(repara_params)]\n args_rhs = [p if j != i else p + eps for j, p in enumerate(repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n # finite diff approximation\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02, atol=0.03)\n\n\n@pytest.mark.parametrize(\n \"jax_dist, params\",\n [\n (dist.Gamma, (1.0,)),\n (dist.Gamma, (0.1,)),\n (dist.Gamma, (10.0,)),\n (dist.Chi2, (1.0,)),\n (dist.Chi2, (0.1,)),\n (dist.Chi2, (10.0,)),\n (dist.Beta, (1.0, 1.0)),\n (dist.StudentT, (5.0, 2.0, 4.0)),\n ],\n)\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z**2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean**2\n\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\n@pytest.mark.parametrize(\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_jit_log_likelihood(jax_dist, sp_dist, params):\n if jax_dist.__name__ in (\n \"EulerMaruyama\",\n \"GaussianRandomWalk\",\n \"_ImproperWrapper\",\n \"LKJ\",\n \"LKJCholesky\",\n \"_SparseCAR\",\n ):\n pytest.xfail(reason=\"non-jittable params\")\n\n rng_key = random.PRNGKey(0)\n samples = jax_dist(*params).sample(key=rng_key, sample_shape=(2, 3))\n\n def log_likelihood(*params):\n return jax_dist(*params).log_prob(samples)\n\n expected = log_likelihood(*params)\n actual = jax.jit(log_likelihood)(*params)\n assert_allclose(actual, expected, atol=2e-5, rtol=2e-5)\n\n\n@pytest.mark.parametrize(\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\n@pytest.mark.parametrize(\"prepend_shape\", [(), (2,), (2, 3)])\n@pytest.mark.parametrize(\"jit\", [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (\n dist.LeftTruncatedDistribution,\n dist.RightTruncatedDistribution,\n dist.TwoSidedTruncatedDistribution,\n )\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n # new api\n loc, scale, low, high = (\n params[0].loc,\n params[0].scale,\n params[1],\n params[2],\n )\n else:\n # old api\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(\n sp_dist.cdf(high) - sp_dist.cdf(low)\n )\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-5)\n return\n pytest.skip(\"no corresponding scipy distn.\")\n if _is_batched_multivariate(jax_dist):\n pytest.skip(\"batching not allowed in multivariate distns.\")\n if jax_dist.event_shape and prepend_shape:\n # >>> d = sp.dirichlet([1.1, 1.1])\n # >>> samples = d.rvs(size=(2,))\n # >>> d.logpdf(samples)\n # ValueError: The input vector 'x' must lie within the normal simplex ...\n pytest.skip(\"batched samples cannot be scored by multivariate distributions.\")\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n # precision issue: jnp.sum(x / jnp.sum(x)) = 0.99999994 != 1\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e):\n samples = jax.device_get(samples).astype(\"float64\")\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-5)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(\n dist.Categorical(logits=np.zeros(2)), dist.Normal(0, 1).expand([2])\n )\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\n@pytest.mark.parametrize(\n \"jax_dist, sp_dist, params\",\n # TODO: add more complete pattern for Discrete.cdf\n CONTINUOUS + [T(dist.Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))],\n)\n@pytest.mark.filterwarnings(\"ignore:overflow encountered:RuntimeWarning\")\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\"skip testing cdf/icdf methods of multivariate distributions\")\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 2e-3 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-5\n if d.shape() == () and not d.is_discrete:\n assert_allclose(\n jax.vmap(jax.grad(d.cdf))(samples),\n jnp.exp(d.log_prob(samples)),\n atol=1e-5,\n rtol=rtol,\n )\n assert_allclose(\n jax.vmap(jax.grad(d.icdf))(quantiles),\n jnp.exp(-d.log_prob(d.icdf(quantiles))),\n atol=1e-5,\n rtol=rtol,\n )\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-5, rtol=1e-5)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-5, rtol=rtol)\n except NotImplementedError:\n pass\n\n # test against scipy\n if not sp_dist:\n pytest.skip(\"no corresponding scipy distn.\")\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-5, rtol=1e-5)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=1e-4, rtol=1e-4)\n except NotImplementedError:\n pass\n\n\n@pytest.mark.parametrize(\"jax_dist, sp_dist, params\", CONTINUOUS + DIRECTIONAL)\ndef test_gof(jax_dist, sp_dist, params):\n if \"Improper\" in jax_dist.__name__:\n pytest.skip(\"distribution has improper .log_prob()\")\n if \"LKJ\" in jax_dist.__name__:\n pytest.xfail(\"incorrect submanifold scaling\")\n if jax_dist is dist.EulerMaruyama:\n d = jax_dist(*params)\n if d.event_dim > 1:\n pytest.skip(\"EulerMaruyama skip test when event shape is non-trivial.\")\n\n num_samples = 10000\n if \"BetaProportion\" in jax_dist.__name__:\n num_samples = 20000\n rng_key = random.PRNGKey(0)\n d = jax_dist(*params)\n samples = d.sample(key=rng_key, sample_shape=(num_samples,))\n probs = np.exp(d.log_prob(samples))\n\n dim = None\n if jax_dist is dist.ProjectedNormal:\n dim = samples.shape[-1] - 1\n\n # Test each batch independently.\n probs = probs.reshape(num_samples, -1)\n samples = samples.reshape(probs.shape + d.event_shape)\n if \"Dirichlet\" in jax_dist.__name__:\n # The Dirichlet density is over all but one of the probs.\n samples = samples[..., :-1]\n for b in range(probs.shape[1]):\n try:\n gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)\n except InvalidTest:\n pytest.skip(\"expensive test\")\n else:\n assert gof > TEST_FAILURE_RATE\n\n\n@pytest.mark.parametrize(\"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE)\ndef test_independent_shape(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n batch_shape, event_shape = d.batch_shape, d.event_shape\n shape = batch_shape + event_shape\n for i in range(len(batch_shape)):\n indep = dist.Independent(d, reinterpreted_batch_ndims=i)\n sample = indep.sample(random.PRNGKey(0))\n event_boundary = len(shape) - len(event_shape) - i\n assert indep.batch_shape == shape[:event_boundary]\n assert indep.event_shape == shape[event_boundary:]\n assert jnp.shape(indep.log_prob(sample)) == shape[:event_boundary]\n\n\ndef _tril_cholesky_to_tril_corr(x):\n w = vec_to_tril_matrix(x, diagonal=-1)\n diag = jnp.sqrt(1 - jnp.sum(w**2, axis=-1))\n cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])\n corr = jnp.matmul(cholesky, cholesky.T)\n return matrix_to_tril_vec(corr, diagonal=-1)\n\n\n@pytest.mark.parametrize(\"dimension\", [2, 3, 5])\ndef test_log_prob_LKJCholesky_uniform(dimension):\n # When concentration=1, the distribution of correlation matrices is uniform.\n # We will test that fact here.\n d = dist.LKJCholesky(dimension=dimension, concentration=1)\n N = 5\n corr_log_prob = []\n for i in range(N):\n sample = d.sample(random.PRNGKey(i))\n log_prob = d.log_prob(sample)\n sample_tril = matrix_to_tril_vec(sample, diagonal=-1)\n cholesky_to_corr_jac = np.linalg.slogdet(\n jax.jacobian(_tril_cholesky_to_tril_corr)(sample_tril)\n )[1]\n corr_log_prob.append(log_prob - cholesky_to_corr_jac)\n\n corr_log_prob = np.array(corr_log_prob)\n # test if they are constant\n assert_allclose(\n corr_log_prob,\n jnp.broadcast_to(corr_log_prob[0], corr_log_prob.shape),\n rtol=1e-6,\n )\n\n if dimension == 2:\n # when concentration = 1, LKJ gives a uniform distribution over correlation matrix,\n # hence for the case dimension = 2,\n # density of a correlation matrix will be Uniform(-1, 1) = 0.5.\n # In addition, jacobian of the transformation from cholesky -> corr is 1 (hence its\n # log value is 0) because the off-diagonal lower triangular element does not change\n # in the transform.\n # So target_log_prob = log(0.5)\n assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-6)\n\n\n@pytest.mark.parametrize(\"dimension\", [2, 3, 5])\n@pytest.mark.parametrize(\"concentration\", [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n # We will test against the fact that LKJCorrCholesky can be seen as a\n # TransformedDistribution with base distribution is a distribution of partial\n # correlations in C-vine method (modulo an affine transform to change domain from (0, 1)\n # to (1, 0)) and transform is a signed stick-breaking process.\n d = dist.LKJCholesky(dimension, concentration, sample_method=\"cvine\")\n\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n\n # compute signed stick breaking logdet\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2 # noqa: E731\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky).log_abs_det_jacobian(\n unconstrained, sample\n )\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = beta_log_prob - affine_logdet - signed_stick_breaking_logdet\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-5)\n\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-6)\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(\n 0,\n 20,\n (\n 1000,\n 100,\n ),\n )\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\n@pytest.mark.parametrize(\"rate\", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])\ndef test_ZIP_log_prob(rate):\n # if gate is 0 ZIP is Poisson\n zip_ = dist.ZeroInflatedPoisson(0.0, rate)\n pois = dist.Poisson(rate)\n s = zip_.sample(random.PRNGKey(0), (20,))\n zip_prob = zip_.log_prob(s)\n pois_prob = pois.log_prob(s)\n assert_allclose(zip_prob, pois_prob, rtol=1e-6)\n\n # if gate is 1 ZIP is Delta(0)\n zip_ = dist.ZeroInflatedPoisson(1.0, rate)\n delta = dist.Delta(0.0)\n s = np.array([0.0, 1.0])\n zip_prob = zip_.log_prob(s)\n delta_prob = delta.log_prob(s)\n assert_allclose(zip_prob, delta_prob, rtol=1e-6)\n\n\n@pytest.mark.parametrize(\"total_count\", [1, 2, 3, 10])\n@pytest.mark.parametrize(\"shape\", [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n\n actual = dist.BetaBinomial(concentration1, concentration0, total_count).log_prob(\n value\n )\n assert_allclose(actual, expected, rtol=0.02)\n\n\n@pytest.mark.parametrize(\"total_count\", [1, 2, 3, 10])\n@pytest.mark.parametrize(\"batch_shape\", [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = (3,)\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n # test on one-hots\n value = total_count * jnp.eye(event_shape[-1]).reshape(\n event_shape + (1,) * len(batch_shape) + event_shape\n )\n\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize(\"shape\", [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n\n num_samples = 300000\n poisson_rate = np.random.gamma(\n gamma_conc, 1 / gamma_rate, size=(num_samples,) + shape\n )\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize(\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip(\"we have separated tests for LKJCholesky distribution\")\n if jax_dist is _ImproperWrapper:\n pytest.skip(\"no param for ImproperUniform to test for log_prob gradient\")\n\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n\n eps = 1e-3\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n # skip taking grad w.r.t. sde_fn\n continue\n if jax_dist is _SparseCAR and i == 3:\n # skip taking grad w.r.t. adj_matrix\n continue\n if isinstance(\n params[i], dist.Distribution\n ): # skip taking grad w.r.t. base_dist\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32, jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [p if j != i else p - eps for j, p in enumerate(params)]\n args_rhs = [p if j != i else p + eps for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n # finite diff approximation\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n # grad w.r.t. `value` of Delta distribution will be 0\n # but numerical value will give nan (= inf - inf)\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01, atol=0.01)\n\n\n@pytest.mark.parametrize(\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_mean_var(jax_dist, sp_dist, params):\n if jax_dist is _ImproperWrapper:\n pytest.skip(\"Improper distribution does not has mean/var implemented\")\n if jax_dist is FoldedNormal:\n pytest.skip(\"Folded distribution does not has mean/var implemented\")\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\"EulerMaruyama distribution does not has mean/var implemented\")\n if jax_dist is dist.RelaxedBernoulliLogits:\n pytest.skip(\"RelaxedBernoulli distribution does not has mean/var implemented\")\n if \"SineSkewed\" in jax_dist.__name__:\n pytest.skip(\"Skewed Distribution are not symmetric about location.\")\n if jax_dist in (\n _TruncatedNormal,\n _TruncatedCauchy,\n dist.LeftTruncatedDistribution,\n dist.RightTruncatedDistribution,\n dist.TwoSidedTruncatedDistribution,\n ):\n pytest.skip(\"Truncated distributions do not has mean/var implemented\")\n if jax_dist is dist.ProjectedNormal:\n pytest.skip(\"Mean is defined in submanifold\")\n\n n = (\n 20000\n if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.SineBivariateVonMises]\n else 200000\n )\n d_jax = jax_dist(*params)\n k = random.PRNGKey(0)\n samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)\n # check with suitable scipy implementation if available\n # XXX: VonMises is already tested below\n if (\n sp_dist\n and not _is_batched_multivariate(d_jax)\n and jax_dist\n not in [dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]\n ):\n d_sp = sp_dist(*params)\n try:\n sp_mean = d_sp.mean()\n except TypeError: # mvn does not have .mean() method\n sp_mean = d_sp.mean\n # for multivariate distns try .cov first\n if d_jax.event_shape:\n try:\n sp_var = jnp.diag(d_sp.cov())\n except TypeError: # mvn does not have .cov() method\n sp_var = jnp.diag(d_sp.cov)\n except AttributeError:\n sp_var = d_sp.var()\n else:\n sp_var = d_sp.var()\n assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-7)\n assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-7)\n if jnp.all(jnp.isfinite(sp_mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05, atol=1e-2)\n if jnp.all(jnp.isfinite(sp_var)):\n assert_allclose(\n jnp.std(samples, 0), jnp.sqrt(d_jax.variance), rtol=0.05, atol=1e-2\n )\n elif jax_dist in [dist.LKJ, dist.LKJCholesky]:\n if jax_dist is dist.LKJCholesky:\n corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))\n else:\n corr_samples = samples\n dimension, concentration, _ = params\n # marginal of off-diagonal entries\n marginal = dist.Beta(\n concentration + 0.5 * (dimension - 2), concentration + 0.5 * (dimension - 2)\n )\n # scale statistics due to linear mapping\n marginal_mean = 2 * marginal.mean - 1\n marginal_std = 2 * jnp.sqrt(marginal.variance)\n expected_mean = jnp.broadcast_to(\n jnp.reshape(marginal_mean, jnp.shape(marginal_mean) + (1, 1)),\n jnp.shape(marginal_mean) + d_jax.event_shape,\n )\n expected_std = jnp.broadcast_to(\n jnp.reshape(marginal_std, jnp.shape(marginal_std) + (1, 1)),\n jnp.shape(marginal_std) + d_jax.event_shape,\n )\n # diagonal elements of correlation matrices are 1\n expected_mean = expected_mean * (1 - jnp.identity(dimension)) + jnp.identity(\n dimension\n )\n expected_std = expected_std * (1 - jnp.identity(dimension))\n\n assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol=0.01)\n assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)\n elif jax_dist in [dist.VonMises]:\n # circular mean = sample mean\n assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=1e-2)\n\n # circular variance\n x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)\n\n expected_variance = 1 - jnp.sqrt(x**2 + y**2)\n assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=1e-2)\n elif jax_dist in [dist.SineBivariateVonMises]:\n phi_loc = _circ_mean(samples[..., 0])\n psi_loc = _circ_mean(samples[..., 1])\n\n assert_allclose(\n d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1), rtol=0.05, atol=1e-2\n )\n elif jax_dist in [dist.MatrixNormal]:\n sample_shape = (200_000,)\n # use X ~ MN(loc, U, V) then vec(X) ~ MVN(vec(loc), kron(V, U))\n if len(d_jax.batch_shape) > 0:\n axes = [len(sample_shape) + i for i in range(len(d_jax.batch_shape))]\n axes = tuple(axes)\n samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))\n subshape = samples_re.shape[: len(axes)]\n ixi = product(*[range(k) for k in subshape])\n for ix in ixi:\n # mean\n def get_min_shape(ix, batch_shape):\n return min(ix, tuple(map(lambda x: x - 1, batch_shape)))\n\n ix_loc = get_min_shape(ix, d_jax.loc.shape[: len(ix)])\n jnp.allclose(\n jnp.mean(samples_re[ix], 0),\n jnp.squeeze(d_jax.mean[ix_loc]),\n rtol=0.5,\n atol=1e-2,\n )\n # cov\n samples_mvn = jnp.squeeze(samples_re[ix]).reshape(\n sample_shape + (-1,), order=\"F\"\n )\n ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[: len(ix)])\n ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[: len(ix)])\n scale_tril = my_kron(\n d_jax.scale_tril_column[ix_col],\n d_jax.scale_tril_row[ix_row],\n )\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=1e-2)\n else: # unbatched\n # mean\n jnp.allclose(\n jnp.mean(samples, 0),\n jnp.squeeze(d_jax.mean),\n rtol=0.5,\n atol=1e-2,\n )\n # cov\n samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,), order=\"F\")\n scale_tril = my_kron(\n jnp.squeeze(d_jax.scale_tril_column), jnp.squeeze(d_jax.scale_tril_row)\n )\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=1e-2)\n else:\n if jnp.all(jnp.isfinite(d_jax.mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05, atol=1e-2)\n if isinstance(d_jax, dist.CAR):\n pytest.skip(\"CAR distribution does not have `variance` implemented.\")\n if isinstance(d_jax, dist.Gompertz):\n pytest.skip(\"Gompertz distribution does not have `variance` implemented.\")\n if jnp.all(jnp.isfinite(d_jax.variance)):\n assert_allclose(\n jnp.std(samples, 0), jnp.sqrt(d_jax.variance), rtol=0.05, atol=1e-2\n )\n\n\n@pytest.mark.parametrize(\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\n@pytest.mark.parametrize(\"prepend_shape\", [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (\n _TruncatedNormal,\n _TruncatedCauchy,\n _GaussianMixture,\n _Gaussian2DMixture,\n _GeneralMixture,\n _General2DMixture,\n ):\n pytest.skip(f\"{jax_dist.__name__} is a function, not a class\")\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if (\n jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky)\n and dist_args[i] != \"concentration\"\n ):\n continue\n if \"SineSkewed\" in jax_dist.__name__ and dist_args[i] != \"skewness\":\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != \"t\":\n continue\n if (\n jax_dist is dist.TwoSidedTruncatedDistribution\n and dist_args[i] == \"base_dist\"\n ):\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == \"num_steps\":\n continue\n if (\n jax_dist is dist.SineBivariateVonMises\n and dist_args[i] == \"weighted_correlation\"\n ):\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(\n constraint, jnp.shape(params[i]), key_gen\n )\n valid_params[i] = gen_values_within_bounds(\n constraint, jnp.shape(params[i]), key_gen\n )\n if jax_dist is dist.MultivariateStudentT:\n # As mean is only defined for df > 1 & we instantiate\n # scipy.stats.multivariate_t with same mean as jax_dist\n # we need to ensure this is defined, so force df >= 1\n valid_params[0] += 1\n\n if jax_dist is dist.LogUniform:\n # scipy.stats.loguniform take parameter a and b\n # which is a > 0 and b > a.\n # gen_values_within_bounds() generates just\n # a > 0 and b > 0. Then, make b = a + b.\n valid_params[1] += valid_params[0]\n\n assert jax_dist(*oob_params)\n\n # Invalid parameter values throw ValueError\n if not dependent_constraint and (\n jax_dist is not _ImproperWrapper and \"SineSkewed\" not in jax_dist.__name__\n ):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n\n with pytest.raises(ValueError):\n # test error raised under jit omnistaging\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n\n jax.jit(dist_gen_fn)()\n\n d = jax_dist(*valid_params, validate_args=True)\n\n # Test agreement of log density evaluation on randomly generated samples\n # with scipy's implementation when available.\n if (\n sp_dist\n and not _is_batched_multivariate(d)\n and not (d.event_shape and prepend_shape)\n ):\n valid_samples = gen_values_within_bounds(\n d.support, size=prepend_shape + d.batch_shape + d.event_shape\n )\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-5, rtol=1e-5)\n\n # Out of support samples throw ValueError\n oob_samples = gen_values_outside_bounds(\n d.support, size=prepend_shape + d.batch_shape + d.event_shape\n )\n with pytest.warns(UserWarning, match=\"Out-of-support\"):\n d.log_prob(oob_samples)\n\n with pytest.warns(UserWarning, match=\"Out-of-support\"):\n # test warning work under jit omnistaging\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n\n with pytest.raises(ValueError, match=\"got invalid\"):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n\n with pytest.warns(UserWarning, match=\"Out-of-support\"):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return (\n dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(data).sum()\n )\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data).sum()\n\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-6)\n assert_allclose(grad_fx, grad_gx, atol=1e-4)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(\n ValueError, match=r\"^BetaProportion distribution got invalid mean parameter\\.$\"\n ):\n dist.BetaProportion(1.0, 1.0)\n\n\n########################################\n# Tests for constraints and transforms #\n########################################\n\n\n@pytest.mark.parametrize(\n \"constraint, x, expected\",\n [\n (constraints.boolean, np.array([True, False]), np.array([True, True])),\n (constraints.boolean, np.array([1, 1]), np.array([True, True])),\n (constraints.boolean, np.array([-1, 1]), np.array([False, True])),\n (\n constraints.corr_cholesky,\n np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]),\n np.array([True, False]),\n ), # NB: not lower_triangular\n (\n constraints.corr_cholesky,\n np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, 0.5]]]),\n np.array([False, False]),\n ), # NB: not positive_diagonal & not unit_norm_row\n (\n constraints.corr_matrix,\n np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]),\n np.array([True, False]),\n ), # NB: not lower_triangular\n (\n constraints.corr_matrix,\n np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, 0.5]]]),\n np.array([False, False]),\n ), # NB: not unit diagonal\n (constraints.greater_than(1), 3, True),\n (\n constraints.greater_than(1),\n np.array([-1, 1, 5]),\n np.array([False, False, True]),\n ),\n (constraints.integer_interval(-3, 5), 0, True),\n (\n constraints.integer_interval(-3, 5),\n np.array([-5, -3, 0, 1.1, 5, 7]),\n np.array([False, True, True, False, True, False]),\n ),\n (constraints.interval(-3, 5), 0, True),\n (\n constraints.interval(-3, 5),\n np.array([-5, -3, 0, 5, 7]),\n np.array([False, True, True, True, False]),\n ),\n (constraints.less_than(1), -2, True),\n (\n constraints.less_than(1),\n np.array([-1, 1, 5]),\n np.array([True, False, False]),\n ),\n (constraints.lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True),\n (\n constraints.lower_cholesky,\n np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]),\n np.array([False, False]),\n ),\n (constraints.nonnegative_integer, 3, True),\n (\n constraints.nonnegative_integer,\n np.array([-1.0, 0.0, 5.0]),\n np.array([False, True, True]),\n ),\n (constraints.positive, 3, True),\n (constraints.positive, np.array([-1, 0, 5]), np.array([False, False, True])),\n (constraints.positive_definite, np.array([[1.0, 0.3], [0.3, 1.0]]), True),\n (\n constraints.positive_definite,\n np.array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]),\n np.array([False, False]),\n ),\n (constraints.positive_integer, 3, True),\n (\n constraints.positive_integer,\n np.array([-1.0, 0.0, 5.0]),\n np.array([False, False, True]),\n ),\n (constraints.real, -1, True),\n (\n constraints.real,\n np.array([np.inf, -np.inf, np.nan, np.pi]),\n np.array([False, False, False, True]),\n ),\n (constraints.simplex, np.array([0.1, 0.3, 0.6]), True),\n (\n constraints.simplex,\n np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),\n np.array([True, False, False]),\n ),\n (constraints.softplus_positive, 3, True),\n (\n constraints.softplus_positive,\n np.array([-1, 0, 5]),\n np.array([False, False, True]),\n ),\n (\n constraints.softplus_lower_cholesky,\n np.array([[1.0, 0.0], [-2.0, 0.1]]),\n True,\n ),\n (\n constraints.softplus_lower_cholesky,\n np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]),\n np.array([False, False]),\n ),\n (constraints.unit_interval, 0.1, True),\n (\n constraints.unit_interval,\n np.array([-5, 0, 0.5, 1, 7]),\n np.array([False, True, True, True, False]),\n ),\n (\n constraints.sphere,\n np.array([[1, 0, 0], [0.5, 0.5, 0]]),\n np.array([True, False]),\n ),\n (\n constraints.open_interval(0.0, 1.0),\n np.array([-5, 0, 0.5, 1, 7]),\n np.array([False, False, True, False, False]),\n ),\n ],\n)\ndef test_constraints(constraint, x, expected):\n v = constraint.feasible_like(x)\n if jnp.result_type(v) == \"float32\" or jnp.result_type(v) == \"float64\":\n assert not constraint.is_discrete\n assert_array_equal(constraint(x), expected)\n\n feasible_value = constraint.feasible_like(x)\n assert jnp.shape(feasible_value) == jnp.shape(x)\n assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected), True))\n\n try:\n inverse = biject_to(constraint).inv(feasible_value)\n except NotImplementedError:\n pass\n else:\n assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-7)\n\n\n@pytest.mark.parametrize(\n \"constraint\",\n [\n constraints.corr_cholesky,\n constraints.corr_matrix,\n constraints.greater_than(2),\n constraints.interval(-3, 5),\n constraints.l1_ball,\n constraints.less_than(1),\n constraints.lower_cholesky,\n constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector,\n constraints.positive,\n constraints.positive_definite,\n constraints.positive_ordered_vector,\n constraints.real,\n constraints.real_vector,\n constraints.simplex,\n constraints.softplus_positive,\n constraints.softplus_lower_cholesky,\n constraints.unit_interval,\n constraints.open_interval(0.0, 1.0),\n ],\n ids=lambda x: x.__class__,\n)\n@pytest.mark.parametrize(\"shape\", [(), (1,), (3,), (6,), (3, 1), (1, 3), (5, 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n\n # test inv work for NaN arrays:\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n\n # test codomain\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=jnp.bool_))\n\n # test inv\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-5, rtol=1e-5)\n\n # test domain, currently all is constraints.real or constraints.real_vector\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n\n # test log_abs_det_jacobian\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)[:, :-1])[1]\n elif constraint in [\n constraints.real_vector,\n constraints.ordered_vector,\n constraints.positive_ordered_vector,\n constraints.l1_ball,\n ]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix]:\n vec_transform = lambda x: matrix_to_tril_vec( # noqa: E731\n transform(x), diagonal=-1\n )\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n # fill the upper triangular part\n matrix = (\n matrix\n + jnp.swapaxes(matrix, -2, -1)\n + jnp.identity(matrix.shape[-1])\n )\n return transform.inv(matrix)\n\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform)(y_tril))[1]\n elif constraint in [\n constraints.lower_cholesky,\n constraints.scaled_unit_lower_cholesky,\n constraints.positive_definite,\n constraints.softplus_lower_cholesky,\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x)) # noqa: E731\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n # fill the upper triangular part\n matrix = (\n matrix\n + jnp.swapaxes(matrix, -2, -1)\n - jnp.diag(jnp.diag(matrix))\n )\n return transform.inv(matrix)\n\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform)(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n\n assert_allclose(actual, expected, atol=1e-5, rtol=1e-5)\n assert_allclose(actual, -inv_expected, atol=1e-5, rtol=1e-5)\n\n\n# NB: skip transforms which are tested in `test_biject_to`\n@pytest.mark.parametrize(\n \"transform, event_shape\",\n [\n (PermuteTransform(np.array([3, 0, 4, 1, 2])), (5,)),\n (PowerTransform(2.0), ()),\n (SoftplusTransform(), ()),\n (\n LowerCholeskyAffine(\n np.array([1.0, 2.0]), np.array([[0.6, 0.0], [1.5, 0.4]])\n ),\n (2,),\n ),\n (\n transforms.ComposeTransform(\n [\n biject_to(constraints.simplex),\n SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv,\n ]\n ),\n (5,),\n ),\n ],\n)\n@pytest.mark.parametrize(\n \"batch_shape\",\n [\n (),\n (1,),\n (3,),\n (6,),\n (3, 1),\n (1, 3),\n (5, 3),\n ],\n)\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n\n # test codomain\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n\n # test inv\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-6, rtol=1e-4)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n\n # test domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n\n # test log_abs_det_jacobian\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n\n assert_allclose(actual, expected, atol=1e-6)\n assert_allclose(actual, -inv_expected, atol=1e-6)\n\n\n@pytest.mark.parametrize(\"batch_shape\", [(), (5,)])\ndef test_composed_transform(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t1])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 2\n\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n expected_log_det = (\n jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2) + jnp.log(2) * 9\n )\n assert_allclose(log_det, expected_log_det)\n\n\n@pytest.mark.parametrize(\"batch_shape\", [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = (\n jnp.log(2) * 6\n + t2.log_abs_det_jacobian(x * 2, z)\n + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n )\n assert_allclose(log_det, expected_log_det)\n\n\n@pytest.mark.parametrize(\"batch_shape\", [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\n@pytest.mark.parametrize(\"batch_shape\", [(), (5,)])\n@pytest.mark.parametrize(\"prepend_event_shape\", [(), (4,)])\n@pytest.mark.parametrize(\"sample_shape\", [(), (7,)])\ndef test_transformed_distribution(batch_shape, prepend_event_shape, sample_shape):\n base_dist = (\n dist.Normal(0, 1)\n .expand(batch_shape + prepend_event_shape + (6,))\n .to_event(1 + len(prepend_event_shape))\n )\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n d = dist.TransformedDistribution(base_dist, [t1, t2, t1])\n assert d.event_dim == 2 + len(prepend_event_shape)\n\n y = d.sample(random.PRNGKey(0), sample_shape)\n t = transforms.ComposeTransform([t1, t2, t1])\n x = t.inv(y)\n assert x.shape == sample_shape + base_dist.shape()\n log_prob = d.log_prob(y)\n assert log_prob.shape == sample_shape + batch_shape\n t_log_det = t.log_abs_det_jacobian(x, y)\n if prepend_event_shape:\n t_log_det = t_log_det.sum(-1)\n expected_log_prob = base_dist.log_prob(x) - t_log_det\n assert_allclose(log_prob, expected_log_prob, atol=1e-5)\n\n\n@pytest.mark.parametrize(\n \"transformed_dist\",\n [\n dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()\n ),\n dist.TransformedDistribution(\n dist.Exponential(jnp.ones(2)),\n [\n transforms.PowerTransform(0.7),\n transforms.AffineTransform(0.0, jnp.ones(2) * 3),\n ],\n ),\n ],\n)\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(\n random.PRNGKey(1)\n )\n assert_allclose(\n transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample),\n )\n\n\ndef test_transformed_transformed_distribution():\n loc, scale = -2, 3\n dist1 = dist.TransformedDistribution(\n dist.Normal(2, 3), transforms.PowerTransform(2.0)\n )\n dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(-2, 3))\n assert isinstance(dist2.base_dist, dist.Normal)\n assert len(dist2.transforms) == 2\n assert isinstance(dist2.transforms[0], transforms.PowerTransform)\n assert isinstance(dist2.transforms[1], transforms.AffineTransform)\n\n rng_key = random.PRNGKey(0)\n assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))\n intermediates = dist2.sample_with_intermediates(rng_key)\n assert len(intermediates) == 2\n\n\ndef _make_iaf(input_dim, hidden_dims, rng_key):\n arn_init, arn = AutoregressiveNN(input_dim, hidden_dims, param_dims=[1, 1])\n _, init_params = arn_init(rng_key, (input_dim,))\n return InverseAutoregressiveTransform(partial(arn, init_params))\n\n\n@pytest.mark.parametrize(\n \"ts\",\n [\n [transforms.PowerTransform(0.7), transforms.AffineTransform(2.0, 3.0)],\n [transforms.ExpTransform()],\n [\n transforms.ComposeTransform(\n [transforms.AffineTransform(-2, 3), transforms.ExpTransform()]\n ),\n transforms.PowerTransform(3.0),\n ],\n [\n _make_iaf(5, hidden_dims=[10], rng_key=random.PRNGKey(0)),\n transforms.PermuteTransform(jnp.arange(5)[::-1]),\n _make_iaf(5, hidden_dims=[10], rng_key=random.PRNGKey(1)),\n ],\n ],\n)\ndef test_compose_transform_with_intermediates(ts):\n transform = transforms.ComposeTransform(ts)\n x = random.normal(random.PRNGKey(2), (7, 5))\n y, intermediates = transform.call_with_intermediates(x)\n logdet = transform.log_abs_det_jacobian(x, y, intermediates)\n assert_allclose(y, transform(x))\n assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))\n\n\n@pytest.mark.parametrize(\"x_dim, y_dim\", [(3, 3), (3, 4)])\ndef test_unpack_transform(x_dim, y_dim):\n xy = np.random.randn(x_dim + y_dim)\n unpack_fn = lambda xy: {\"x\": xy[:x_dim], \"y\": xy[x_dim:]} # noqa: E731\n transform = transforms.UnpackTransform(unpack_fn)\n z = transform(xy)\n if x_dim == y_dim:\n with pytest.warns(UserWarning, match=\"UnpackTransform.inv\"):\n t = transform.inv(z)\n else:\n t = transform.inv(z)\n\n assert_allclose(t, xy)\n\n\n@pytest.mark.parametrize(\"jax_dist, sp_dist, params\", CONTINUOUS)\ndef test_generated_sample_distribution(\n jax_dist, sp_dist, params, N_sample=100_000, key=random.PRNGKey(11)\n):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n \"{} sampling method taken from upstream, no need to\"\n \"test generated samples.\".format(jax_dist.__name__)\n )\n\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\n@pytest.mark.parametrize(\n \"jax_dist, params, support\",\n [\n (dist.BernoulliLogits, (5.0,), jnp.arange(2)),\n (dist.BernoulliProbs, (0.5,), jnp.arange(2)),\n (dist.BinomialLogits, (4.5, 10), jnp.arange(11)),\n (dist.BinomialProbs, (0.5, 11), jnp.arange(12)),\n (dist.BetaBinomial, (2.0, 0.5, 12), jnp.arange(13)),\n (dist.CategoricalLogits, (np.array([3.0, 4.0, 5.0]),), jnp.arange(3)),\n (dist.CategoricalProbs, (np.array([0.1, 0.5, 0.4]),), jnp.arange(3)),\n ],\n)\n@pytest.mark.parametrize(\"batch_shape\", [(5,), ()])\n@pytest.mark.parametrize(\"expand\", [False, True])\ndef test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand):\n p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))\n actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)\n expected = support.reshape((-1,) + (1,) * len(batch_shape))\n if expand:\n expected = jnp.broadcast_to(expected, support.shape + batch_shape)\n assert_allclose(actual, expected)\n\n\ndef test_zero_inflated_enumerate_support():\n base_dist = dist.Bernoulli(0.5)\n d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)\n assert d.has_enumerate_support\n assert_allclose(d.enumerate_support(), base_dist.enumerate_support())\n\n\n@pytest.mark.parametrize(\"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE)\n@pytest.mark.parametrize(\"prepend_shape\", [(), (2, 3)])\n@pytest.mark.parametrize(\"sample_shape\", [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples).shape == sample_shape + new_batch_shape\n # test expand of expand\n assert (\n expanded_dist.expand((3,) + new_batch_shape).batch_shape\n == (3,) + new_batch_shape\n )\n # test expand error\n if prepend_shape:\n with pytest.raises(ValueError, match=\"Cannot broadcast distribution of shape\"):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\n@pytest.mark.parametrize(\"base_shape\", [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1, 5)])\n@pytest.mark.parametrize(\"event_dim\", [0, 1, 2, 3])\n@pytest.mark.parametrize(\"sample_shape\", [(1000,), (1000, 7, 1), (1000, 1, 7)])\ndef test_expand_shuffle_regression(base_shape, event_dim, sample_shape):\n expand_shape = (2, 3, 5)\n event_dim = min(event_dim, len(base_shape))\n loc = random.normal(random.PRNGKey(0), base_shape) * 10\n base_dist = dist.Normal(loc, 0.1).to_event(event_dim)\n expanded_dist = base_dist.expand(expand_shape[: len(expand_shape) - event_dim])\n samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)\n expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.shape())\n assert_allclose(samples.mean(0), expected_mean, atol=0.1)\n\n\n@pytest.mark.parametrize(\"batch_shape\", [(), (4,), (10, 3)])\ndef test_sine_bivariate_von_mises_batch_shape(batch_shape):\n phi_loc = jnp.broadcast_to(jnp.array(0.0), batch_shape)\n psi_loc = jnp.array(0.0)\n phi_conc = jnp.array(1.0)\n psi_conc = jnp.array(1.0)\n corr = jnp.array(0.1)\n\n sine = SineBivariateVonMises(phi_loc, psi_loc, phi_conc, psi_conc, corr)\n assert sine.batch_shape == batch_shape\n\n samples = sine.sample(random.PRNGKey(0))\n assert samples.shape == (*batch_shape, 2)\n\n\ndef test_sine_bivariate_von_mises_sample_mean():\n loc = jnp.array([[2.0, -1.0], [-2, 1.0]])\n\n sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)\n samples = sine.sample(random.PRNGKey(0), (5000,))\n\n assert_allclose(_circ_mean(samples).T, loc, rtol=5e-3)\n\n\n@pytest.mark.parametrize(\"batch_shape\", [(), (4,)])\ndef test_polya_gamma(batch_shape, num_points=20000):\n d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)\n rng_key = random.PRNGKey(0)\n\n # test density approximately normalized\n x = jnp.linspace(1.0e-6, d.truncation_point, num_points)\n prob = (d.truncation_point / num_points) * jnp.exp(\n logsumexp(d.log_prob(x), axis=-1)\n )\n assert_allclose(prob, jnp.ones(batch_shape), rtol=1.0e-4)\n\n # test mean of approximate sampler\n z = d.sample(rng_key, sample_shape=(3000,))\n mean = jnp.mean(z, axis=-1)\n assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)\n\n\n@pytest.mark.parametrize(\n \"extra_event_dims,expand_shape\",\n [(0, (4, 3, 2, 1)), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))],\n)\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = (4, 1, 1, 1, 6)\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n\n # Throws error when batch shape cannot be broadcasted\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n\n # Throws error when trying to shrink existing batch shape\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\n@pytest.mark.parametrize(\n \"batch_shape, mask_shape\",\n [((), ()), ((2,), ()), ((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))],\n)\n@pytest.mark.parametrize(\"event_shape\", [(), (3,)])\ndef test_mask(batch_shape, event_shape, mask_shape):\n jax_dist = (\n dist.Normal().expand(batch_shape + event_shape).to_event(len(event_shape))\n )\n mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)\n if mask_shape == ():\n mask = bool(mask)\n samples = jax_dist.sample(random.PRNGKey(1))\n actual = jax_dist.mask(mask).log_prob(samples)\n assert_allclose(\n actual != 0,\n jnp.broadcast_to(mask, lax.broadcast_shapes(batch_shape, mask_shape)),\n )\n\n\n@pytest.mark.parametrize(\"event_shape\", [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(\n jnp.isfinite(data), tuple(-i - 1 for i in range(len(event_shape)))\n )\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[: len(data.shape) - len(event_shape)]\n return log_prob.sum()\n\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\n@pytest.mark.parametrize(\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_dist_pytree(jax_dist, sp_dist, params):\n def f(x):\n return jax_dist(*params)\n\n if jax_dist is _ImproperWrapper:\n pytest.skip(\"Cannot flattening ImproperUniform\")\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\"EulerMaruyama doesn't define flatten/unflatten\")\n jax.jit(f)(0) # this test for flatten/unflatten\n lax.map(f, np.ones(3)) # this test for compatibility w.r.t. scan\n # Test that parameters do not change after flattening.\n expected_dist = f(0)\n actual_dist = jax.jit(f)(0)\n expected_sample = expected_dist.sample(random.PRNGKey(0))\n actual_sample = actual_dist.sample(random.PRNGKey(0))\n expected_log_prob = expected_dist.log_prob(expected_sample)\n actual_log_prob = actual_dist.log_prob(actual_sample)\n assert_allclose(actual_sample, expected_sample, rtol=1e-6)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-6)\n\n\n@pytest.mark.parametrize(\n \"method, arg\", [(\"to_event\", 1), (\"mask\", False), (\"expand\", [5])]\n)\ndef test_special_dist_pytree(method, arg):\n def f(x):\n d = dist.Normal(np.zeros(1), np.ones(1))\n return getattr(d, method)(arg)\n\n jax.jit(f)(0)\n lax.map(f, np.ones(3))\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n # ExpandedDistribution can mutate the `batch_shape` of\n # its base distribution in order to make ExpandedDistribution\n # mappable, see #684. However, this mutation should not take\n # place if no mapping operation is performed.\n\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n # Low level test: ensure that (tree_flatten o tree_unflatten)(expanded_dist)\n # amounts to an identity operation.\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n # High-level test: `jax.jit`ting a function returning an ExpandedDistribution\n # (which involves an instance of the low-level case as it will transform\n # the original function by adding some flattening and unflattening steps)\n # should return same object as its non-jitted equivalent.\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\n@pytest.mark.parametrize(\"batch_shape\", [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\n@pytest.mark.parametrize(\"batch_shape\", [(), (4,), (2, 3)], ids=str)\n@pytest.mark.parametrize(\"event_shape\", [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(\n dist.Independent(p, len(event_shape)), dist.Independent(q, len(event_shape))\n )\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\n@pytest.mark.parametrize(\"batch_shape\", [(), (4,), (2, 3)], ids=str)\n@pytest.mark.parametrize(\"event_shape\", [(), (4,), (2, 3)], ids=str)\ndef test_kl_expanded_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(shape)\n q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(shape)\n actual = kl_divergence(\n dist.Independent(p, len(event_shape)), dist.Independent(q, len(event_shape))\n )\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\n@pytest.mark.parametrize(\"shape\", [(), (4,), (2, 3)], ids=str)\n@pytest.mark.parametrize(\n \"p_dist, q_dist\",\n [\n (dist.Beta, dist.Beta),\n (dist.Gamma, dist.Gamma),\n (dist.Kumaraswamy, dist.Beta),\n (dist.Normal, dist.Normal),\n (dist.Weibull, dist.Gamma),\n ],\n)\ndef test_kl_univariate(shape, p_dist, q_dist):\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f\"Missing pattern for param {k}.\")\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean((p.log_prob(x) - q.log_prob(x)), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\n@pytest.mark.parametrize(\"shape\", [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10_000,)).copy()\n expected = jnp.mean((p.log_prob(x) - q.log_prob(x)), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n # test that vmapped binomial with p = 0 does not have an infinite loop\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\ndef _get_vmappable_dist_init_params(jax_dist):\n if jax_dist.__name__ == (\"_TruncatedCauchy\"):\n return [2, 3]\n elif jax_dist.__name__ == (\"_TruncatedNormal\"):\n return [2, 3]\n elif issubclass(jax_dist, dist.Distribution):\n init_parameters = list(inspect.signature(jax_dist.__init__).parameters.keys())[\n 1:\n ]\n vmap_over_parameters = list(\n inspect.signature(vmap_over.dispatch(jax_dist)).parameters.keys()\n )[1:]\n return list(\n [\n i\n for i, name in enumerate(init_parameters)\n if name in vmap_over_parameters\n ]\n )\n else:\n raise ValueError\n\n\ndef _allclose_or_equal(a1, a2):\n if isinstance(a1, np.ndarray):\n return np.allclose(a2, a1)\n elif isinstance(a1, jnp.ndarray):\n return jnp.allclose(a2, a1)\n elif isinstance(a1, csr_matrix):\n return np.allclose(a2.todense(), a1.todense())\n else:\n return a2 == a1 or a2 is a1\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\n@pytest.mark.parametrize(\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[: len(params)]\n\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n\n d = make_jax_dist(*params)\n\n if isinstance(d, _SparseCAR) and d.is_sparse:\n # In this case, since csr arrays are not jittable,\n # _SparseCAR has a csr_matrix as part of its pytree\n # definition (not as a pytree leaf). This causes pytree\n # operations like tree_map to fail, since these functions\n # compare the pytree def of each of the arguments using ==\n # which is ambiguous for array-like objects.\n return\n\n in_out_axes_cases = [\n # vmap over all args\n (\n tuple(0 if i in vmappable_param_idxs else None for i in range(len(params))),\n 0,\n ),\n # vmap over a single arg, out over all attributes of a distribution\n *(\n ([0 if i == idx else None for i in range(len(params))], 0)\n for idx in vmappable_param_idxs\n if params[idx] is not None\n ),\n # vmap over a single arg, out over the associated attribute of the distribution\n *(\n (\n [0 if i == idx else None for i in range(len(params))],\n vmap_over(d, **{param_names[idx]: 0}),\n )\n for idx in vmappable_param_idxs\n if params[idx] is not None\n ),\n # vmap over a single arg, axis=1, (out single attribute, axis=1)\n *(\n (\n [1 if i == idx else None for i in range(len(params))],\n vmap_over(d, **{param_names[idx]: 1}),\n )\n for idx in vmappable_param_idxs\n if isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).ndim > 0\n # skip this distribution because _GeneralMixture.__init__ turns\n # 1d inputs into 0d attributes, thus breaks the expectations of\n # the vmapping test case where in_axes=1, only done for rank>=1 tensors.\n and jax_dist is not _GeneralMixture\n ),\n ]\n\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [\n jax.tree_map(lambda x: jnp.expand_dims(x, ax), arg)\n if isinstance(ax, int)\n else arg\n for arg, ax in zip(params, in_axes)\n ]\n # Recreate the jax_dist to avoid side effects coming from `d.sample`\n # triggering lazy_property computations, which, in a few cases, break\n # vmap_over's expectations regarding existing attributes to be vmapped.\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes)(\n *batched_params\n )\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d\n )\n assert eq == jnp.array([True])\n\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10).sample(\n key\n )\n\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-6)\n\n\ndef test_normal_log_cdf():\n # test if log_cdf method agrees with jax.scipy.stats.norm.logcdf\n # and if exp(log_cdf) agrees with cdf\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-6)\n\n\n@pytest.mark.parametrize(\n \"value\",\n [\n -15.0,\n jnp.array([[-15.0], [-10.0], [-5.0]]),\n jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]]),\n ],\n)\ndef test_truncated_normal_log_prob_in_tail(value):\n # define set of distributions truncated in tail of distribution\n loc = 1.35\n scale = jnp.geomspace(0.01, 1, 10)\n low, high = (-20, -1.0)\n a, b = (low - loc) / scale, (high - loc) / scale # rescale for jax input\n\n numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high).log_prob(\n value\n )\n jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)\n assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)\n\n\ndef test_sample_truncated_normal_in_tail():\n # test, if samples from distributions truncated in\n # tail of distribution returns any inf's\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10_000,))\n assert ~jnp.isinf(samples).any()\n\n\n@jax.enable_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-ids": [
97,
100,
123,
125,
137
]
}
|
[
97,
100,
123,
125,
137
] |
<|reserved_special_token_0|>
def daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
time.sleep(2)
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
<|reserved_special_token_0|>
def main2():
d = multiprocessing.Process(name='daemon_process', target=daemon)
n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)
print('daemon_process default daemon value: %s' % d.daemon)
print('no_daemon_process default daemon value: %s' % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
d.join()
n.join()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
time.sleep(2)
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def non_daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def main1():
d = multiprocessing.Process(name='daemon_process', target=daemon)
n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)
print('daemon_process default daemon value: %s' % d.daemon)
print('no_daemon_process default daemon value: %s' % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
def main2():
d = multiprocessing.Process(name='daemon_process', target=daemon)
n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)
print('daemon_process default daemon value: %s' % d.daemon)
print('no_daemon_process default daemon value: %s' % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
d.join()
n.join()
def main3():
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non-daemon', target=non_daemon)
n.daemon = False
d.start()
n.start()
d.join(1)
print('d.is_alive()', d.is_alive())
n.join()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
time.sleep(2)
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def non_daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def main1():
d = multiprocessing.Process(name='daemon_process', target=daemon)
n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)
print('daemon_process default daemon value: %s' % d.daemon)
print('no_daemon_process default daemon value: %s' % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
def main2():
d = multiprocessing.Process(name='daemon_process', target=daemon)
n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)
print('daemon_process default daemon value: %s' % d.daemon)
print('no_daemon_process default daemon value: %s' % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
d.join()
n.join()
def main3():
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non-daemon', target=non_daemon)
n.daemon = False
d.start()
n.start()
d.join(1)
print('d.is_alive()', d.is_alive())
n.join()
if __name__ == '__main__':
main3()
<|reserved_special_token_1|>
import multiprocessing
import time
import sys
def daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
time.sleep(2)
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def non_daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def main1():
d = multiprocessing.Process(name='daemon_process', target=daemon)
n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)
print('daemon_process default daemon value: %s' % d.daemon)
print('no_daemon_process default daemon value: %s' % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
def main2():
d = multiprocessing.Process(name='daemon_process', target=daemon)
n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)
print('daemon_process default daemon value: %s' % d.daemon)
print('no_daemon_process default daemon value: %s' % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
d.join()
n.join()
def main3():
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non-daemon', target=non_daemon)
n.daemon = False
d.start()
n.start()
d.join(1)
print('d.is_alive()', d.is_alive())
n.join()
if __name__ == '__main__':
main3()
<|reserved_special_token_1|>
#!/usr/bin/env python
# encoding: utf-8
import multiprocessing
import time
import sys
def daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
time.sleep(2)
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def non_daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def main1():
d = multiprocessing.Process(name="daemon_process", target=daemon)
n = multiprocessing.Process(name="no_daemon_process", target=non_daemon)
print("daemon_process default daemon value: %s" % d.daemon)
print("no_daemon_process default daemon value: %s" % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
def main2():
d = multiprocessing.Process(name="daemon_process", target=daemon)
n = multiprocessing.Process(name="no_daemon_process", target=non_daemon)
print("daemon_process default daemon value: %s" % d.daemon)
print("no_daemon_process default daemon value: %s" % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
# 阻塞父进程,直到子进程结束为止。
# 从实验来看,子进程结束和join的先后顺序无关。
# 唯一的限制是父进程需要等所有join的子进程结束后,才会继续向下执行。
d.join()
n.join()
def main3():
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non-daemon', target=non_daemon)
n.daemon = False
d.start()
n.start()
# join接受一个timeout的参数,意思就是如果超过了timeout的时间,不管子进程是否结束,join函数也会直接返回。
d.join(1)
# 可以看到子进程d仍然未结束,但是父进程已经继续执行了。
print('d.is_alive()', d.is_alive())
n.join()
if __name__ == "__main__":
# main1()
# main2()
main3()
|
flexible
|
{
"blob_id": "9bb6fd6fbe212bdc29e2d1ec37fa6ec6ca9a9469",
"index": 1060,
"step-1": "<mask token>\n\n\ndef daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n time.sleep(2)\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\n<mask token>\n\n\ndef main2():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n d.join()\n n.join()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n time.sleep(2)\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef non_daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef main1():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n\n\ndef main2():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n d.join()\n n.join()\n\n\ndef main3():\n d = multiprocessing.Process(name='daemon', target=daemon)\n d.daemon = True\n n = multiprocessing.Process(name='non-daemon', target=non_daemon)\n n.daemon = False\n d.start()\n n.start()\n d.join(1)\n print('d.is_alive()', d.is_alive())\n n.join()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n time.sleep(2)\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef non_daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef main1():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n\n\ndef main2():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n d.join()\n n.join()\n\n\ndef main3():\n d = multiprocessing.Process(name='daemon', target=daemon)\n d.daemon = True\n n = multiprocessing.Process(name='non-daemon', target=non_daemon)\n n.daemon = False\n d.start()\n n.start()\n d.join(1)\n print('d.is_alive()', d.is_alive())\n n.join()\n\n\nif __name__ == '__main__':\n main3()\n",
"step-4": "import multiprocessing\nimport time\nimport sys\n\n\ndef daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n time.sleep(2)\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef non_daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef main1():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n\n\ndef main2():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n d.join()\n n.join()\n\n\ndef main3():\n d = multiprocessing.Process(name='daemon', target=daemon)\n d.daemon = True\n n = multiprocessing.Process(name='non-daemon', target=non_daemon)\n n.daemon = False\n d.start()\n n.start()\n d.join(1)\n print('d.is_alive()', d.is_alive())\n n.join()\n\n\nif __name__ == '__main__':\n main3()\n",
"step-5": "#!/usr/bin/env python\n# encoding: utf-8\n\nimport multiprocessing\nimport time\nimport sys\n\n\ndef daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n time.sleep(2)\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef non_daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef main1():\n d = multiprocessing.Process(name=\"daemon_process\", target=daemon)\n n = multiprocessing.Process(name=\"no_daemon_process\", target=non_daemon)\n print(\"daemon_process default daemon value: %s\" % d.daemon)\n print(\"no_daemon_process default daemon value: %s\" % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n\n\ndef main2():\n d = multiprocessing.Process(name=\"daemon_process\", target=daemon)\n n = multiprocessing.Process(name=\"no_daemon_process\", target=non_daemon)\n print(\"daemon_process default daemon value: %s\" % d.daemon)\n print(\"no_daemon_process default daemon value: %s\" % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n # 阻塞父进程,直到子进程结束为止。\n # 从实验来看,子进程结束和join的先后顺序无关。\n # 唯一的限制是父进程需要等所有join的子进程结束后,才会继续向下执行。\n d.join()\n n.join()\n\n\ndef main3():\n d = multiprocessing.Process(name='daemon', target=daemon)\n d.daemon = True\n n = multiprocessing.Process(name='non-daemon', target=non_daemon)\n n.daemon = False\n d.start()\n n.start()\n # join接受一个timeout的参数,意思就是如果超过了timeout的时间,不管子进程是否结束,join函数也会直接返回。\n d.join(1)\n # 可以看到子进程d仍然未结束,但是父进程已经继续执行了。\n print('d.is_alive()', d.is_alive())\n n.join()\n\n\nif __name__ == \"__main__\":\n # main1()\n # main2()\n main3()\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def initialize():
hands_file = open('euler54_poker.txt')
hands_string = hands_file.read()
tempList = []
newString = hands_string.replace('\n', ' ').replace(' ', '')
for i in range(0, len(newString), 2):
tempList.append(newString[i:i + 2])
hands_list = []
for i in range(0, len(tempList), 10):
new_hand = []
for j in range(2):
player_hand = []
for k in range(5):
player_hand.append(pokerAssignments[tempList[i + 5 * j + k]
[0]] + pokerAssignments[tempList[i + 5 * j + k][1]])
new_hand.append(player_hand)
hands_list.append(new_hand)
return hands_list
<|reserved_special_token_0|>
def check_flush(hand):
suit = hand[0] % 10
for i in range(1, 5):
if hand[i] % 10 != suit:
return False
return True
def check_straight(hand):
for i in range(1, 5):
if hand[i] // 10 != hand[i - 1] // 10 - 1:
return False
return True
def check_copies(hand):
config = []
hand.sort()
i = 0
while i < 5:
count = 1
j = 1
while i + j < 5 and hand[i + j] // 10 == hand[i] // 10:
count += 1
j += 1
config.append([count, hand[i] // 10])
i += j
if config != []:
config.sort()
for i in range(len(config)):
for j in range(5):
if hand[j] // 10 == config[i][1]:
hand.insert(0, hand[j])
hand.pop(j + 1)
return hand, config[-2][0], config[-1][0]
def score_hand(hand):
hand.sort(reverse=True)
is_flush = check_flush(hand)
is_straight = check_straight(hand)
if is_flush and is_straight:
return hand, 8
elif is_flush:
return hand, 5
elif is_straight:
return hand, 4
else:
hand, config_one, config_two = check_copies(hand)
return hand, configScoring[config_one, config_two]
def compare(hand_one, hand_two):
for i in range(5):
if hand_one[i] // 10 > hand_two[i] // 10:
return 1
elif hand_two[i] // 10 > hand_one[i] // 10:
return 2
return None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def initialize():
hands_file = open('euler54_poker.txt')
hands_string = hands_file.read()
tempList = []
newString = hands_string.replace('\n', ' ').replace(' ', '')
for i in range(0, len(newString), 2):
tempList.append(newString[i:i + 2])
hands_list = []
for i in range(0, len(tempList), 10):
new_hand = []
for j in range(2):
player_hand = []
for k in range(5):
player_hand.append(pokerAssignments[tempList[i + 5 * j + k]
[0]] + pokerAssignments[tempList[i + 5 * j + k][1]])
new_hand.append(player_hand)
hands_list.append(new_hand)
return hands_list
<|reserved_special_token_0|>
def check_flush(hand):
suit = hand[0] % 10
for i in range(1, 5):
if hand[i] % 10 != suit:
return False
return True
def check_straight(hand):
for i in range(1, 5):
if hand[i] // 10 != hand[i - 1] // 10 - 1:
return False
return True
def check_copies(hand):
config = []
hand.sort()
i = 0
while i < 5:
count = 1
j = 1
while i + j < 5 and hand[i + j] // 10 == hand[i] // 10:
count += 1
j += 1
config.append([count, hand[i] // 10])
i += j
if config != []:
config.sort()
for i in range(len(config)):
for j in range(5):
if hand[j] // 10 == config[i][1]:
hand.insert(0, hand[j])
hand.pop(j + 1)
return hand, config[-2][0], config[-1][0]
def score_hand(hand):
hand.sort(reverse=True)
is_flush = check_flush(hand)
is_straight = check_straight(hand)
if is_flush and is_straight:
return hand, 8
elif is_flush:
return hand, 5
elif is_straight:
return hand, 4
else:
hand, config_one, config_two = check_copies(hand)
return hand, configScoring[config_one, config_two]
def compare(hand_one, hand_two):
for i in range(5):
if hand_one[i] // 10 > hand_two[i] // 10:
return 1
elif hand_two[i] // 10 > hand_one[i] // 10:
return 2
return None
def main(hands):
p_one_wins = 0
for i in range(len(hands)):
p_one_hand, p_one_score = score_hand(hands[i][0])
p_two_hand, p_two_score = score_hand(hands[i][1])
if p_one_score > p_two_score:
p_one_wins += 1
elif p_one_score == p_two_score:
if compare(p_one_hand, p_two_hand) == 1:
p_one_wins += 1
return p_one_wins
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def initialize():
hands_file = open('euler54_poker.txt')
hands_string = hands_file.read()
tempList = []
newString = hands_string.replace('\n', ' ').replace(' ', '')
for i in range(0, len(newString), 2):
tempList.append(newString[i:i + 2])
hands_list = []
for i in range(0, len(tempList), 10):
new_hand = []
for j in range(2):
player_hand = []
for k in range(5):
player_hand.append(pokerAssignments[tempList[i + 5 * j + k]
[0]] + pokerAssignments[tempList[i + 5 * j + k][1]])
new_hand.append(player_hand)
hands_list.append(new_hand)
return hands_list
<|reserved_special_token_0|>
def check_flush(hand):
suit = hand[0] % 10
for i in range(1, 5):
if hand[i] % 10 != suit:
return False
return True
def check_straight(hand):
for i in range(1, 5):
if hand[i] // 10 != hand[i - 1] // 10 - 1:
return False
return True
def check_copies(hand):
config = []
hand.sort()
i = 0
while i < 5:
count = 1
j = 1
while i + j < 5 and hand[i + j] // 10 == hand[i] // 10:
count += 1
j += 1
config.append([count, hand[i] // 10])
i += j
if config != []:
config.sort()
for i in range(len(config)):
for j in range(5):
if hand[j] // 10 == config[i][1]:
hand.insert(0, hand[j])
hand.pop(j + 1)
return hand, config[-2][0], config[-1][0]
def score_hand(hand):
hand.sort(reverse=True)
is_flush = check_flush(hand)
is_straight = check_straight(hand)
if is_flush and is_straight:
return hand, 8
elif is_flush:
return hand, 5
elif is_straight:
return hand, 4
else:
hand, config_one, config_two = check_copies(hand)
return hand, configScoring[config_one, config_two]
def compare(hand_one, hand_two):
for i in range(5):
if hand_one[i] // 10 > hand_two[i] // 10:
return 1
elif hand_two[i] // 10 > hand_one[i] // 10:
return 2
return None
def main(hands):
p_one_wins = 0
for i in range(len(hands)):
p_one_hand, p_one_score = score_hand(hands[i][0])
p_two_hand, p_two_score = score_hand(hands[i][1])
if p_one_score > p_two_score:
p_one_wins += 1
elif p_one_score == p_two_score:
if compare(p_one_hand, p_two_hand) == 1:
p_one_wins += 1
return p_one_wins
print(main(hands_list))
<|reserved_special_token_1|>
pokerAssignments = {'2': 20, '3': 30, '4': 40, '5': 50, '6': 60, '7': 70,
'8': 80, '9': 90, 'T': 100, 'J': 110, 'Q': 120, 'K': 130, 'A': 140, 'C':
0, 'S': 1, 'H': 2, 'D': 3}
configScoring = {(1, 1): 0, (1, 2): 1, (2, 2): 2, (1, 3): 3, (2, 3): 6, (1,
4): 7}
scoreValues = {(0): 'High Card', (1): 'Pair', (2): '2 Pair', (3):
'3 of a Kind', (4): 'Straight', (5): 'Flush', (6): 'Full House', (7):
'4 of a Kind', (8): 'Straight Flush'}
def initialize():
hands_file = open('euler54_poker.txt')
hands_string = hands_file.read()
tempList = []
newString = hands_string.replace('\n', ' ').replace(' ', '')
for i in range(0, len(newString), 2):
tempList.append(newString[i:i + 2])
hands_list = []
for i in range(0, len(tempList), 10):
new_hand = []
for j in range(2):
player_hand = []
for k in range(5):
player_hand.append(pokerAssignments[tempList[i + 5 * j + k]
[0]] + pokerAssignments[tempList[i + 5 * j + k][1]])
new_hand.append(player_hand)
hands_list.append(new_hand)
return hands_list
hands_list = initialize()
def check_flush(hand):
suit = hand[0] % 10
for i in range(1, 5):
if hand[i] % 10 != suit:
return False
return True
def check_straight(hand):
for i in range(1, 5):
if hand[i] // 10 != hand[i - 1] // 10 - 1:
return False
return True
def check_copies(hand):
config = []
hand.sort()
i = 0
while i < 5:
count = 1
j = 1
while i + j < 5 and hand[i + j] // 10 == hand[i] // 10:
count += 1
j += 1
config.append([count, hand[i] // 10])
i += j
if config != []:
config.sort()
for i in range(len(config)):
for j in range(5):
if hand[j] // 10 == config[i][1]:
hand.insert(0, hand[j])
hand.pop(j + 1)
return hand, config[-2][0], config[-1][0]
def score_hand(hand):
hand.sort(reverse=True)
is_flush = check_flush(hand)
is_straight = check_straight(hand)
if is_flush and is_straight:
return hand, 8
elif is_flush:
return hand, 5
elif is_straight:
return hand, 4
else:
hand, config_one, config_two = check_copies(hand)
return hand, configScoring[config_one, config_two]
def compare(hand_one, hand_two):
for i in range(5):
if hand_one[i] // 10 > hand_two[i] // 10:
return 1
elif hand_two[i] // 10 > hand_one[i] // 10:
return 2
return None
def main(hands):
p_one_wins = 0
for i in range(len(hands)):
p_one_hand, p_one_score = score_hand(hands[i][0])
p_two_hand, p_two_score = score_hand(hands[i][1])
if p_one_score > p_two_score:
p_one_wins += 1
elif p_one_score == p_two_score:
if compare(p_one_hand, p_two_hand) == 1:
p_one_wins += 1
return p_one_wins
print(main(hands_list))
<|reserved_special_token_1|>
pokerAssignments = {'2': 20, '3': 30, '4': 40, '5': 50, '6': 60, '7': 70, '8': 80, '9': 90, 'T': 100, 'J': 110, 'Q': 120, 'K': 130, 'A': 140, 'C': 0, 'S': 1, 'H': 2, 'D': 3} #Used to assign each card to a unique three-digit integer
configScoring = {(1, 1): 0, (1, 2): 1, (2, 2): 2, (1, 3): 3, (2, 3): 6, (1, 4): 7} #Tracks hand scores for (respectively) high card, pair, two pair, three-of-a-kind, full house, and four-of-a-kind
scoreValues = {0: 'High Card', 1: 'Pair', 2: '2 Pair', 3: '3 of a Kind', 4: 'Straight', 5: 'Flush', 6: 'Full House', 7: '4 of a Kind', 8: 'Straight Flush'} #This data object is purely to enhance readability by demonstrating what type of hand each hand score corresponds to
def initialize(): #initalizes hands_list, assigns each card in a hand to a unique three-digit integer
hands_file = open("euler54_poker.txt")
hands_string = hands_file.read()
tempList = []
newString = (hands_string.replace('\n', ' ')).replace(' ', '')
for i in range(0, len(newString), 2):
tempList.append(newString[i: i + 2])
hands_list = []
for i in range(0, len(tempList), 10): #generates list item for each hand of 10 cards
new_hand = []
for j in range(2): #generates list item for each player's cards
player_hand = []
for k in range(5):
player_hand.append(pokerAssignments[tempList[i + 5*j + k][0]] + pokerAssignments[tempList[i + 5*j + k][1]])
new_hand.append(player_hand)
hands_list.append(new_hand)
return hands_list
hands_list = initialize()
def check_flush(hand): # checks if a reverse sorted hand is a flush
suit = hand[0] % 10
for i in range(1, 5):
if hand[i] % 10 != suit:
return False
return True
def check_straight(hand): #checks if a reverse sorted hand is a straight
for i in range(1, 5):
if hand[i] // 10 != (hand[i - 1] // 10) - 1:
return False
return True
def check_copies(hand): #checks if a hand has any pairs, three of a kind, two pair, etc. and sorts it accordingly
config = []
hand.sort()
i = 0
while i < 5:
count = 1
j = 1
while i + j < 5 and (hand[i + j] // 10) == (hand[i] // 10):
count += 1
j += 1
config.append([count, hand[i] // 10])
i += j
if config != []: #sorts for comparison
config.sort()
for i in range(len(config)):
for j in range(5):
if (hand[j] // 10) == config[i][1]:
hand.insert(0, hand[j])
hand.pop(j + 1)
return hand, config[-2][0], config[-1][0]
def score_hand(hand): #returns a number 0-8 for the hand the player has and the hand properly sorted
hand.sort(reverse = True)
is_flush = check_flush(hand)
is_straight = check_straight(hand)
if is_flush and is_straight:
return hand, 8
elif is_flush:
return hand, 5
elif is_straight:
return hand, 4
else:
hand, config_one, config_two = check_copies(hand)
return hand, configScoring[config_one, config_two]
def compare(hand_one, hand_two): #returns the number of the winning player if players have same hand score (who has higher card in tiebreak?)
for i in range(5):
if hand_one[i] // 10 > hand_two[i] // 10:
return 1
elif hand_two[i] // 10 > hand_one[i] // 10:
return 2
return None
def main(hands):
p_one_wins = 0
for i in range(len(hands)):
p_one_hand, p_one_score = score_hand(hands[i][0])
p_two_hand, p_two_score = score_hand(hands[i][1])
if p_one_score > p_two_score:
p_one_wins += 1
elif p_one_score == p_two_score:
if compare(p_one_hand, p_two_hand) == 1:
p_one_wins += 1
return p_one_wins
print(main(hands_list))
|
flexible
|
{
"blob_id": "a2a3e8d52fd467178460b178c5dbf9ccd72706e7",
"index": 8251,
"step-1": "<mask token>\n\n\ndef initialize():\n hands_file = open('euler54_poker.txt')\n hands_string = hands_file.read()\n tempList = []\n newString = hands_string.replace('\\n', ' ').replace(' ', '')\n for i in range(0, len(newString), 2):\n tempList.append(newString[i:i + 2])\n hands_list = []\n for i in range(0, len(tempList), 10):\n new_hand = []\n for j in range(2):\n player_hand = []\n for k in range(5):\n player_hand.append(pokerAssignments[tempList[i + 5 * j + k]\n [0]] + pokerAssignments[tempList[i + 5 * j + k][1]])\n new_hand.append(player_hand)\n hands_list.append(new_hand)\n return hands_list\n\n\n<mask token>\n\n\ndef check_flush(hand):\n suit = hand[0] % 10\n for i in range(1, 5):\n if hand[i] % 10 != suit:\n return False\n return True\n\n\ndef check_straight(hand):\n for i in range(1, 5):\n if hand[i] // 10 != hand[i - 1] // 10 - 1:\n return False\n return True\n\n\ndef check_copies(hand):\n config = []\n hand.sort()\n i = 0\n while i < 5:\n count = 1\n j = 1\n while i + j < 5 and hand[i + j] // 10 == hand[i] // 10:\n count += 1\n j += 1\n config.append([count, hand[i] // 10])\n i += j\n if config != []:\n config.sort()\n for i in range(len(config)):\n for j in range(5):\n if hand[j] // 10 == config[i][1]:\n hand.insert(0, hand[j])\n hand.pop(j + 1)\n return hand, config[-2][0], config[-1][0]\n\n\ndef score_hand(hand):\n hand.sort(reverse=True)\n is_flush = check_flush(hand)\n is_straight = check_straight(hand)\n if is_flush and is_straight:\n return hand, 8\n elif is_flush:\n return hand, 5\n elif is_straight:\n return hand, 4\n else:\n hand, config_one, config_two = check_copies(hand)\n return hand, configScoring[config_one, config_two]\n\n\ndef compare(hand_one, hand_two):\n for i in range(5):\n if hand_one[i] // 10 > hand_two[i] // 10:\n return 1\n elif hand_two[i] // 10 > hand_one[i] // 10:\n return 2\n return None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef initialize():\n hands_file = open('euler54_poker.txt')\n hands_string = hands_file.read()\n tempList = []\n newString = hands_string.replace('\\n', ' ').replace(' ', '')\n for i in range(0, len(newString), 2):\n tempList.append(newString[i:i + 2])\n hands_list = []\n for i in range(0, len(tempList), 10):\n new_hand = []\n for j in range(2):\n player_hand = []\n for k in range(5):\n player_hand.append(pokerAssignments[tempList[i + 5 * j + k]\n [0]] + pokerAssignments[tempList[i + 5 * j + k][1]])\n new_hand.append(player_hand)\n hands_list.append(new_hand)\n return hands_list\n\n\n<mask token>\n\n\ndef check_flush(hand):\n suit = hand[0] % 10\n for i in range(1, 5):\n if hand[i] % 10 != suit:\n return False\n return True\n\n\ndef check_straight(hand):\n for i in range(1, 5):\n if hand[i] // 10 != hand[i - 1] // 10 - 1:\n return False\n return True\n\n\ndef check_copies(hand):\n config = []\n hand.sort()\n i = 0\n while i < 5:\n count = 1\n j = 1\n while i + j < 5 and hand[i + j] // 10 == hand[i] // 10:\n count += 1\n j += 1\n config.append([count, hand[i] // 10])\n i += j\n if config != []:\n config.sort()\n for i in range(len(config)):\n for j in range(5):\n if hand[j] // 10 == config[i][1]:\n hand.insert(0, hand[j])\n hand.pop(j + 1)\n return hand, config[-2][0], config[-1][0]\n\n\ndef score_hand(hand):\n hand.sort(reverse=True)\n is_flush = check_flush(hand)\n is_straight = check_straight(hand)\n if is_flush and is_straight:\n return hand, 8\n elif is_flush:\n return hand, 5\n elif is_straight:\n return hand, 4\n else:\n hand, config_one, config_two = check_copies(hand)\n return hand, configScoring[config_one, config_two]\n\n\ndef compare(hand_one, hand_two):\n for i in range(5):\n if hand_one[i] // 10 > hand_two[i] // 10:\n return 1\n elif hand_two[i] // 10 > hand_one[i] // 10:\n return 2\n return None\n\n\ndef main(hands):\n p_one_wins = 0\n for i in range(len(hands)):\n p_one_hand, p_one_score = score_hand(hands[i][0])\n p_two_hand, p_two_score = score_hand(hands[i][1])\n if p_one_score > p_two_score:\n p_one_wins += 1\n elif p_one_score == p_two_score:\n if compare(p_one_hand, p_two_hand) == 1:\n p_one_wins += 1\n return p_one_wins\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef initialize():\n hands_file = open('euler54_poker.txt')\n hands_string = hands_file.read()\n tempList = []\n newString = hands_string.replace('\\n', ' ').replace(' ', '')\n for i in range(0, len(newString), 2):\n tempList.append(newString[i:i + 2])\n hands_list = []\n for i in range(0, len(tempList), 10):\n new_hand = []\n for j in range(2):\n player_hand = []\n for k in range(5):\n player_hand.append(pokerAssignments[tempList[i + 5 * j + k]\n [0]] + pokerAssignments[tempList[i + 5 * j + k][1]])\n new_hand.append(player_hand)\n hands_list.append(new_hand)\n return hands_list\n\n\n<mask token>\n\n\ndef check_flush(hand):\n suit = hand[0] % 10\n for i in range(1, 5):\n if hand[i] % 10 != suit:\n return False\n return True\n\n\ndef check_straight(hand):\n for i in range(1, 5):\n if hand[i] // 10 != hand[i - 1] // 10 - 1:\n return False\n return True\n\n\ndef check_copies(hand):\n config = []\n hand.sort()\n i = 0\n while i < 5:\n count = 1\n j = 1\n while i + j < 5 and hand[i + j] // 10 == hand[i] // 10:\n count += 1\n j += 1\n config.append([count, hand[i] // 10])\n i += j\n if config != []:\n config.sort()\n for i in range(len(config)):\n for j in range(5):\n if hand[j] // 10 == config[i][1]:\n hand.insert(0, hand[j])\n hand.pop(j + 1)\n return hand, config[-2][0], config[-1][0]\n\n\ndef score_hand(hand):\n hand.sort(reverse=True)\n is_flush = check_flush(hand)\n is_straight = check_straight(hand)\n if is_flush and is_straight:\n return hand, 8\n elif is_flush:\n return hand, 5\n elif is_straight:\n return hand, 4\n else:\n hand, config_one, config_two = check_copies(hand)\n return hand, configScoring[config_one, config_two]\n\n\ndef compare(hand_one, hand_two):\n for i in range(5):\n if hand_one[i] // 10 > hand_two[i] // 10:\n return 1\n elif hand_two[i] // 10 > hand_one[i] // 10:\n return 2\n return None\n\n\ndef main(hands):\n p_one_wins = 0\n for i in range(len(hands)):\n p_one_hand, p_one_score = score_hand(hands[i][0])\n p_two_hand, p_two_score = score_hand(hands[i][1])\n if p_one_score > p_two_score:\n p_one_wins += 1\n elif p_one_score == p_two_score:\n if compare(p_one_hand, p_two_hand) == 1:\n p_one_wins += 1\n return p_one_wins\n\n\nprint(main(hands_list))\n",
"step-4": "pokerAssignments = {'2': 20, '3': 30, '4': 40, '5': 50, '6': 60, '7': 70,\n '8': 80, '9': 90, 'T': 100, 'J': 110, 'Q': 120, 'K': 130, 'A': 140, 'C':\n 0, 'S': 1, 'H': 2, 'D': 3}\nconfigScoring = {(1, 1): 0, (1, 2): 1, (2, 2): 2, (1, 3): 3, (2, 3): 6, (1,\n 4): 7}\nscoreValues = {(0): 'High Card', (1): 'Pair', (2): '2 Pair', (3):\n '3 of a Kind', (4): 'Straight', (5): 'Flush', (6): 'Full House', (7):\n '4 of a Kind', (8): 'Straight Flush'}\n\n\ndef initialize():\n hands_file = open('euler54_poker.txt')\n hands_string = hands_file.read()\n tempList = []\n newString = hands_string.replace('\\n', ' ').replace(' ', '')\n for i in range(0, len(newString), 2):\n tempList.append(newString[i:i + 2])\n hands_list = []\n for i in range(0, len(tempList), 10):\n new_hand = []\n for j in range(2):\n player_hand = []\n for k in range(5):\n player_hand.append(pokerAssignments[tempList[i + 5 * j + k]\n [0]] + pokerAssignments[tempList[i + 5 * j + k][1]])\n new_hand.append(player_hand)\n hands_list.append(new_hand)\n return hands_list\n\n\nhands_list = initialize()\n\n\ndef check_flush(hand):\n suit = hand[0] % 10\n for i in range(1, 5):\n if hand[i] % 10 != suit:\n return False\n return True\n\n\ndef check_straight(hand):\n for i in range(1, 5):\n if hand[i] // 10 != hand[i - 1] // 10 - 1:\n return False\n return True\n\n\ndef check_copies(hand):\n config = []\n hand.sort()\n i = 0\n while i < 5:\n count = 1\n j = 1\n while i + j < 5 and hand[i + j] // 10 == hand[i] // 10:\n count += 1\n j += 1\n config.append([count, hand[i] // 10])\n i += j\n if config != []:\n config.sort()\n for i in range(len(config)):\n for j in range(5):\n if hand[j] // 10 == config[i][1]:\n hand.insert(0, hand[j])\n hand.pop(j + 1)\n return hand, config[-2][0], config[-1][0]\n\n\ndef score_hand(hand):\n hand.sort(reverse=True)\n is_flush = check_flush(hand)\n is_straight = check_straight(hand)\n if is_flush and is_straight:\n return hand, 8\n elif is_flush:\n return hand, 5\n elif is_straight:\n return hand, 4\n else:\n hand, config_one, config_two = check_copies(hand)\n return hand, configScoring[config_one, config_two]\n\n\ndef compare(hand_one, hand_two):\n for i in range(5):\n if hand_one[i] // 10 > hand_two[i] // 10:\n return 1\n elif hand_two[i] // 10 > hand_one[i] // 10:\n return 2\n return None\n\n\ndef main(hands):\n p_one_wins = 0\n for i in range(len(hands)):\n p_one_hand, p_one_score = score_hand(hands[i][0])\n p_two_hand, p_two_score = score_hand(hands[i][1])\n if p_one_score > p_two_score:\n p_one_wins += 1\n elif p_one_score == p_two_score:\n if compare(p_one_hand, p_two_hand) == 1:\n p_one_wins += 1\n return p_one_wins\n\n\nprint(main(hands_list))\n",
"step-5": "pokerAssignments = {'2': 20, '3': 30, '4': 40, '5': 50, '6': 60, '7': 70, '8': 80, '9': 90, 'T': 100, 'J': 110, 'Q': 120, 'K': 130, 'A': 140, 'C': 0, 'S': 1, 'H': 2, 'D': 3} #Used to assign each card to a unique three-digit integer\n\nconfigScoring = {(1, 1): 0, (1, 2): 1, (2, 2): 2, (1, 3): 3, (2, 3): 6, (1, 4): 7} #Tracks hand scores for (respectively) high card, pair, two pair, three-of-a-kind, full house, and four-of-a-kind\n\nscoreValues = {0: 'High Card', 1: 'Pair', 2: '2 Pair', 3: '3 of a Kind', 4: 'Straight', 5: 'Flush', 6: 'Full House', 7: '4 of a Kind', 8: 'Straight Flush'} #This data object is purely to enhance readability by demonstrating what type of hand each hand score corresponds to\n\ndef initialize(): #initalizes hands_list, assigns each card in a hand to a unique three-digit integer\n hands_file = open(\"euler54_poker.txt\")\n hands_string = hands_file.read()\n tempList = []\n newString = (hands_string.replace('\\n', ' ')).replace(' ', '')\n\n for i in range(0, len(newString), 2):\n tempList.append(newString[i: i + 2])\n\n hands_list = []\n\n for i in range(0, len(tempList), 10): #generates list item for each hand of 10 cards\n new_hand = []\n\n for j in range(2): #generates list item for each player's cards\n player_hand = []\n\n for k in range(5):\n player_hand.append(pokerAssignments[tempList[i + 5*j + k][0]] + pokerAssignments[tempList[i + 5*j + k][1]])\n\n new_hand.append(player_hand)\n\n hands_list.append(new_hand)\n\n return hands_list\n\nhands_list = initialize()\n\ndef check_flush(hand): # checks if a reverse sorted hand is a flush\n suit = hand[0] % 10\n\n for i in range(1, 5):\n if hand[i] % 10 != suit:\n return False\n\n return True\n\ndef check_straight(hand): #checks if a reverse sorted hand is a straight\n\n for i in range(1, 5):\n\n if hand[i] // 10 != (hand[i - 1] // 10) - 1:\n return False\n\n return True\n\ndef check_copies(hand): #checks if a hand has any pairs, three of a kind, two pair, etc. and sorts it accordingly\n config = []\n hand.sort()\n\n i = 0\n while i < 5:\n count = 1\n j = 1\n\n while i + j < 5 and (hand[i + j] // 10) == (hand[i] // 10):\n count += 1\n j += 1\n\n config.append([count, hand[i] // 10])\n i += j\n\n if config != []: #sorts for comparison\n config.sort()\n\n for i in range(len(config)):\n\n for j in range(5):\n\n if (hand[j] // 10) == config[i][1]:\n hand.insert(0, hand[j])\n hand.pop(j + 1)\n\n return hand, config[-2][0], config[-1][0]\n\ndef score_hand(hand): #returns a number 0-8 for the hand the player has and the hand properly sorted\n hand.sort(reverse = True)\n is_flush = check_flush(hand)\n is_straight = check_straight(hand)\n\n if is_flush and is_straight:\n return hand, 8\n\n elif is_flush:\n return hand, 5\n\n elif is_straight:\n return hand, 4\n\n else:\n hand, config_one, config_two = check_copies(hand)\n return hand, configScoring[config_one, config_two]\n\ndef compare(hand_one, hand_two): #returns the number of the winning player if players have same hand score (who has higher card in tiebreak?)\n\n for i in range(5):\n if hand_one[i] // 10 > hand_two[i] // 10:\n return 1\n\n elif hand_two[i] // 10 > hand_one[i] // 10:\n return 2\n\n return None\n\ndef main(hands):\n p_one_wins = 0\n\n for i in range(len(hands)):\n p_one_hand, p_one_score = score_hand(hands[i][0])\n p_two_hand, p_two_score = score_hand(hands[i][1])\n\n if p_one_score > p_two_score:\n p_one_wins += 1\n\n elif p_one_score == p_two_score:\n if compare(p_one_hand, p_two_hand) == 1:\n p_one_wins += 1\n\n return p_one_wins\n\nprint(main(hands_list))\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
def test_configuration(host):
sshd = host.file('/etc/ssh/sshd_config')
assert sshd.contains('^PermitRootLogin no$')
assert sshd.contains('^X11Forwarding no$')
assert sshd.contains('^UsePAM yes$')
assert sshd.contains('\\sPermitTTY no$')
ssh = host.file('/etc/ssh/ssh_config')
assert ssh.contains('^User test$')
assert ssh.contains('^Host \\*$')
assert ssh.contains('\\sPort 23$')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_configuration(host):
sshd = host.file('/etc/ssh/sshd_config')
assert sshd.contains('^PermitRootLogin no$')
assert sshd.contains('^X11Forwarding no$')
assert sshd.contains('^UsePAM yes$')
assert sshd.contains('\\sPermitTTY no$')
ssh = host.file('/etc/ssh/ssh_config')
assert ssh.contains('^User test$')
assert ssh.contains('^Host \\*$')
assert ssh.contains('\\sPort 23$')
def test_service(host):
ssh = host.service('ssh')
assert ssh.is_running
assert ssh.is_enabled
assert host.socket('tcp://0.0.0.0:22').is_listening
<|reserved_special_token_1|>
<|reserved_special_token_0|>
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ[
'MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_configuration(host):
sshd = host.file('/etc/ssh/sshd_config')
assert sshd.contains('^PermitRootLogin no$')
assert sshd.contains('^X11Forwarding no$')
assert sshd.contains('^UsePAM yes$')
assert sshd.contains('\\sPermitTTY no$')
ssh = host.file('/etc/ssh/ssh_config')
assert ssh.contains('^User test$')
assert ssh.contains('^Host \\*$')
assert ssh.contains('\\sPort 23$')
def test_service(host):
ssh = host.service('ssh')
assert ssh.is_running
assert ssh.is_enabled
assert host.socket('tcp://0.0.0.0:22').is_listening
<|reserved_special_token_1|>
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ[
'MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_configuration(host):
sshd = host.file('/etc/ssh/sshd_config')
assert sshd.contains('^PermitRootLogin no$')
assert sshd.contains('^X11Forwarding no$')
assert sshd.contains('^UsePAM yes$')
assert sshd.contains('\\sPermitTTY no$')
ssh = host.file('/etc/ssh/ssh_config')
assert ssh.contains('^User test$')
assert ssh.contains('^Host \\*$')
assert ssh.contains('\\sPort 23$')
def test_service(host):
ssh = host.service('ssh')
assert ssh.is_running
assert ssh.is_enabled
assert host.socket('tcp://0.0.0.0:22').is_listening
<|reserved_special_token_1|>
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_configuration(host):
sshd = host.file('/etc/ssh/sshd_config')
assert sshd.contains(r'^PermitRootLogin no$')
assert sshd.contains(r'^X11Forwarding no$')
assert sshd.contains(r'^UsePAM yes$')
assert sshd.contains(r'\sPermitTTY no$')
ssh = host.file('/etc/ssh/ssh_config')
assert ssh.contains(r'^User test$')
assert ssh.contains(r'^Host \*$')
assert ssh.contains(r'\sPort 23$')
def test_service(host):
ssh = host.service('ssh')
assert ssh.is_running
assert ssh.is_enabled
assert host.socket('tcp://0.0.0.0:22').is_listening
|
flexible
|
{
"blob_id": "2345d1f72fb695ccec5af0ed157c0606f197009c",
"index": 3398,
"step-1": "<mask token>\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains('^PermitRootLogin no$')\n assert sshd.contains('^X11Forwarding no$')\n assert sshd.contains('^UsePAM yes$')\n assert sshd.contains('\\\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains('^User test$')\n assert ssh.contains('^Host \\\\*$')\n assert ssh.contains('\\\\sPort 23$')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains('^PermitRootLogin no$')\n assert sshd.contains('^X11Forwarding no$')\n assert sshd.contains('^UsePAM yes$')\n assert sshd.contains('\\\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains('^User test$')\n assert ssh.contains('^Host \\\\*$')\n assert ssh.contains('\\\\sPort 23$')\n\n\ndef test_service(host):\n ssh = host.service('ssh')\n assert ssh.is_running\n assert ssh.is_enabled\n assert host.socket('tcp://0.0.0.0:22').is_listening\n",
"step-3": "<mask token>\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ[\n 'MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains('^PermitRootLogin no$')\n assert sshd.contains('^X11Forwarding no$')\n assert sshd.contains('^UsePAM yes$')\n assert sshd.contains('\\\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains('^User test$')\n assert ssh.contains('^Host \\\\*$')\n assert ssh.contains('\\\\sPort 23$')\n\n\ndef test_service(host):\n ssh = host.service('ssh')\n assert ssh.is_running\n assert ssh.is_enabled\n assert host.socket('tcp://0.0.0.0:22').is_listening\n",
"step-4": "import os\nimport testinfra.utils.ansible_runner\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ[\n 'MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains('^PermitRootLogin no$')\n assert sshd.contains('^X11Forwarding no$')\n assert sshd.contains('^UsePAM yes$')\n assert sshd.contains('\\\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains('^User test$')\n assert ssh.contains('^Host \\\\*$')\n assert ssh.contains('\\\\sPort 23$')\n\n\ndef test_service(host):\n ssh = host.service('ssh')\n assert ssh.is_running\n assert ssh.is_enabled\n assert host.socket('tcp://0.0.0.0:22').is_listening\n",
"step-5": "import os\n\nimport testinfra.utils.ansible_runner\n\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains(r'^PermitRootLogin no$')\n assert sshd.contains(r'^X11Forwarding no$')\n assert sshd.contains(r'^UsePAM yes$')\n assert sshd.contains(r'\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains(r'^User test$')\n assert ssh.contains(r'^Host \\*$')\n assert ssh.contains(r'\\sPort 23$')\n\n\ndef test_service(host):\n ssh = host.service('ssh')\n assert ssh.is_running\n assert ssh.is_enabled\n assert host.socket('tcp://0.0.0.0:22').is_listening\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import pyttsx3
from pydub import AudioSegment
engine = pyttsx3.init() # object creation
""" RATE"""
#printing current voice rate
engine.setProperty('rate', 150) # setting up new voice rate
rate = engine.getProperty('rate') # getting details of current speaking rate
print (rate)
"""VOLUME"""
# volume = engine.getProperty('volume') #getting to know current volume level (min=0 and max=1)
# print (volume) #printing current volume level
# engine.setProperty('volume',1.0) # setting up volume level between 0 and 1
# """VOICE"""
# voices = engine.getProperty('voices') #getting details of current voice
# #engine.setProperty('voice', voices[0].id) #changing index, changes voices. o for male
# engine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female
# engine.say("Hello World!")
# engine.say('My current speaking rate is ' + str(rate))
# engine.runAndWait()
# engine.stop()
"""Saving Voice to a file"""
# On linux make sure that 'espeak' and 'ffmpeg' are installed
a=open('TrumpNewFF.srt').readlines()
i=2
l = len(a)
while i<l:
engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))
engine.runAndWait()
if i+3<l:
time_1 = a[i-1].split(' --> ')[1].split(':')
time_1_mil = time_1[-1].split(',')
time_1_mil = int(time_1_mil[0])*1000+int(time_1_mil[1])%1000
time_1_hour = float(time_1[-2])*60000
time_2 = a[i+3].split(' --> ')[0].split(':')
time_2_hour = float(time_2[-2])*60000
time_2_mil = time_2[-1].split(',')
time_2_mil = int(time_2_mil[0])*1000+int(time_2_mil[1])%1000
duration = float(time_2_hour+time_2_mil)-float(time_1_hour+time_1_mil)
# create 1 sec of silence audio segment
one_sec_segment = AudioSegment.silent(duration=int(duration)) #duration in milliseconds
print(i, duration, time_2_hour+time_2_mil, time_1_hour+time_1_mil)
#Either save modified audio
one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i+1)), format="wav")
i+=4
engine.stop()
|
normal
|
{
"blob_id": "32f4f7ad61b99848c907e092c5ed7a839f0b352b",
"index": 6399,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nengine.setProperty('rate', 150)\n<mask token>\nprint(rate)\n<mask token>\nwhile i < l:\n engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))\n engine.runAndWait()\n if i + 3 < l:\n time_1 = a[i - 1].split(' --> ')[1].split(':')\n time_1_mil = time_1[-1].split(',')\n time_1_mil = int(time_1_mil[0]) * 1000 + int(time_1_mil[1]) % 1000\n time_1_hour = float(time_1[-2]) * 60000\n time_2 = a[i + 3].split(' --> ')[0].split(':')\n time_2_hour = float(time_2[-2]) * 60000\n time_2_mil = time_2[-1].split(',')\n time_2_mil = int(time_2_mil[0]) * 1000 + int(time_2_mil[1]) % 1000\n duration = float(time_2_hour + time_2_mil) - float(time_1_hour +\n time_1_mil)\n one_sec_segment = AudioSegment.silent(duration=int(duration))\n print(i, duration, time_2_hour + time_2_mil, time_1_hour + time_1_mil)\n one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i + 1)),\n format='wav')\n i += 4\nengine.stop()\n",
"step-3": "<mask token>\nengine = pyttsx3.init()\n<mask token>\nengine.setProperty('rate', 150)\nrate = engine.getProperty('rate')\nprint(rate)\n<mask token>\na = open('TrumpNewFF.srt').readlines()\ni = 2\nl = len(a)\nwhile i < l:\n engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))\n engine.runAndWait()\n if i + 3 < l:\n time_1 = a[i - 1].split(' --> ')[1].split(':')\n time_1_mil = time_1[-1].split(',')\n time_1_mil = int(time_1_mil[0]) * 1000 + int(time_1_mil[1]) % 1000\n time_1_hour = float(time_1[-2]) * 60000\n time_2 = a[i + 3].split(' --> ')[0].split(':')\n time_2_hour = float(time_2[-2]) * 60000\n time_2_mil = time_2[-1].split(',')\n time_2_mil = int(time_2_mil[0]) * 1000 + int(time_2_mil[1]) % 1000\n duration = float(time_2_hour + time_2_mil) - float(time_1_hour +\n time_1_mil)\n one_sec_segment = AudioSegment.silent(duration=int(duration))\n print(i, duration, time_2_hour + time_2_mil, time_1_hour + time_1_mil)\n one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i + 1)),\n format='wav')\n i += 4\nengine.stop()\n",
"step-4": "import pyttsx3\nfrom pydub import AudioSegment\nengine = pyttsx3.init()\n<mask token>\nengine.setProperty('rate', 150)\nrate = engine.getProperty('rate')\nprint(rate)\n<mask token>\na = open('TrumpNewFF.srt').readlines()\ni = 2\nl = len(a)\nwhile i < l:\n engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))\n engine.runAndWait()\n if i + 3 < l:\n time_1 = a[i - 1].split(' --> ')[1].split(':')\n time_1_mil = time_1[-1].split(',')\n time_1_mil = int(time_1_mil[0]) * 1000 + int(time_1_mil[1]) % 1000\n time_1_hour = float(time_1[-2]) * 60000\n time_2 = a[i + 3].split(' --> ')[0].split(':')\n time_2_hour = float(time_2[-2]) * 60000\n time_2_mil = time_2[-1].split(',')\n time_2_mil = int(time_2_mil[0]) * 1000 + int(time_2_mil[1]) % 1000\n duration = float(time_2_hour + time_2_mil) - float(time_1_hour +\n time_1_mil)\n one_sec_segment = AudioSegment.silent(duration=int(duration))\n print(i, duration, time_2_hour + time_2_mil, time_1_hour + time_1_mil)\n one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i + 1)),\n format='wav')\n i += 4\nengine.stop()\n",
"step-5": "import pyttsx3\r\nfrom pydub import AudioSegment\r\n\r\nengine = pyttsx3.init() # object creation\r\n\r\n\"\"\" RATE\"\"\"\r\n #printing current voice rate\r\nengine.setProperty('rate', 150) # setting up new voice rate\r\nrate = engine.getProperty('rate') # getting details of current speaking rate\r\nprint (rate) \r\n\r\n\"\"\"VOLUME\"\"\"\r\n# volume = engine.getProperty('volume') #getting to know current volume level (min=0 and max=1)\r\n# print (volume) #printing current volume level\r\n# engine.setProperty('volume',1.0) # setting up volume level between 0 and 1\r\n\r\n# \"\"\"VOICE\"\"\"\r\n# voices = engine.getProperty('voices') #getting details of current voice\r\n# #engine.setProperty('voice', voices[0].id) #changing index, changes voices. o for male\r\n# engine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female\r\n\r\n# engine.say(\"Hello World!\")\r\n# engine.say('My current speaking rate is ' + str(rate))\r\n# engine.runAndWait()\r\n# engine.stop()\r\n\r\n\"\"\"Saving Voice to a file\"\"\"\r\n# On linux make sure that 'espeak' and 'ffmpeg' are installed\r\na=open('TrumpNewFF.srt').readlines()\r\ni=2\r\nl = len(a)\r\nwhile i<l:\r\n engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))\r\n engine.runAndWait()\r\n if i+3<l:\r\n time_1 = a[i-1].split(' --> ')[1].split(':')\r\n time_1_mil = time_1[-1].split(',')\r\n time_1_mil = int(time_1_mil[0])*1000+int(time_1_mil[1])%1000\r\n time_1_hour = float(time_1[-2])*60000\r\n \r\n time_2 = a[i+3].split(' --> ')[0].split(':')\r\n time_2_hour = float(time_2[-2])*60000\r\n time_2_mil = time_2[-1].split(',')\r\n time_2_mil = int(time_2_mil[0])*1000+int(time_2_mil[1])%1000\r\n \r\n duration = float(time_2_hour+time_2_mil)-float(time_1_hour+time_1_mil) \r\n # create 1 sec of silence audio segment\r\n one_sec_segment = AudioSegment.silent(duration=int(duration)) #duration in milliseconds\r\n \r\n \r\n print(i, duration, time_2_hour+time_2_mil, time_1_hour+time_1_mil)\r\n #Either save modified audio\r\n one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i+1)), format=\"wav\")\r\n i+=4\r\nengine.stop()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('\n\n\n\n')
<|reserved_special_token_0|>
admin1.describe_user()
print('\n')
admin1.set_user_name('Reven10')
print('\n')
admin1.describe_user()
admin1.privileges.show_privileges()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('\n\n\n\n')
admin1 = userObj.Admin('john', 'deer', 30)
admin1.describe_user()
print('\n')
admin1.set_user_name('Reven10')
print('\n')
admin1.describe_user()
admin1.privileges.show_privileges()
<|reserved_special_token_1|>
import TryItYourSelf_9_8 as userObj
print('\n\n\n\n')
admin1 = userObj.Admin('john', 'deer', 30)
admin1.describe_user()
print('\n')
admin1.set_user_name('Reven10')
print('\n')
admin1.describe_user()
admin1.privileges.show_privileges()
|
flexible
|
{
"blob_id": "169ad888e7629faff9509399ac7ead7a149a9602",
"index": 543,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('\\n\\n\\n\\n')\n<mask token>\nadmin1.describe_user()\nprint('\\n')\nadmin1.set_user_name('Reven10')\nprint('\\n')\nadmin1.describe_user()\nadmin1.privileges.show_privileges()\n",
"step-3": "<mask token>\nprint('\\n\\n\\n\\n')\nadmin1 = userObj.Admin('john', 'deer', 30)\nadmin1.describe_user()\nprint('\\n')\nadmin1.set_user_name('Reven10')\nprint('\\n')\nadmin1.describe_user()\nadmin1.privileges.show_privileges()\n",
"step-4": "import TryItYourSelf_9_8 as userObj\nprint('\\n\\n\\n\\n')\nadmin1 = userObj.Admin('john', 'deer', 30)\nadmin1.describe_user()\nprint('\\n')\nadmin1.set_user_name('Reven10')\nprint('\\n')\nadmin1.describe_user()\nadmin1.privileges.show_privileges()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('-' * 40)
print('LOJA SUPER BARATÃO')
print('-' * 40)
while True:
produto = str(input('Nome do Produto: '))
preco = float(input('Preço: '))
cont += 1
total += preco
if preco > 1000:
totmil += 1
if cont == 1 or preco < menor:
barato = produto
menor = preco
resp = ' '
while resp not in 'SN':
resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]
if resp == 'N':
break
print('O total da compra foi R${:.2f}'.format(total))
print('Temos {} produtos custando mais de R$1000,00'.format(totmil))
print('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))
<|reserved_special_token_1|>
total = totmil = cont = menor = 0
barato = ' '
print('-' * 40)
print('LOJA SUPER BARATÃO')
print('-' * 40)
while True:
produto = str(input('Nome do Produto: '))
preco = float(input('Preço: '))
cont += 1
total += preco
if preco > 1000:
totmil += 1
if cont == 1 or preco < menor:
barato = produto
menor = preco
resp = ' '
while resp not in 'SN':
resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]
if resp == 'N':
break
print('O total da compra foi R${:.2f}'.format(total))
print('Temos {} produtos custando mais de R$1000,00'.format(totmil))
print('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))
|
flexible
|
{
"blob_id": "35b24ffa14f8b3c2040d5becc8a35721e86d8b3d",
"index": 345,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('-' * 40)\nprint('LOJA SUPER BARATÃO')\nprint('-' * 40)\nwhile True:\n produto = str(input('Nome do Produto: '))\n preco = float(input('Preço: '))\n cont += 1\n total += preco\n if preco > 1000:\n totmil += 1\n if cont == 1 or preco < menor:\n barato = produto\n menor = preco\n resp = ' '\n while resp not in 'SN':\n resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]\n if resp == 'N':\n break\nprint('O total da compra foi R${:.2f}'.format(total))\nprint('Temos {} produtos custando mais de R$1000,00'.format(totmil))\nprint('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))\n",
"step-3": "total = totmil = cont = menor = 0\nbarato = ' '\nprint('-' * 40)\nprint('LOJA SUPER BARATÃO')\nprint('-' * 40)\nwhile True:\n produto = str(input('Nome do Produto: '))\n preco = float(input('Preço: '))\n cont += 1\n total += preco\n if preco > 1000:\n totmil += 1\n if cont == 1 or preco < menor:\n barato = produto\n menor = preco\n resp = ' '\n while resp not in 'SN':\n resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]\n if resp == 'N':\n break\nprint('O total da compra foi R${:.2f}'.format(total))\nprint('Temos {} produtos custando mais de R$1000,00'.format(totmil))\nprint('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def main(targetsrting):
email = ''
key = ''
target = base64.b64encode(targetsrting.encode('utf-8')).decode('utf-8')
url = (
'https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000'
.format(email, key, target))
resp = requests.get(url)
try:
resp = requests.get(url)
data_model = json.loads(resp.text)
num = 0
for i in data_model['results']:
num = num + 1
if len(i[2]) > 0 and ('Not Found' not in i[2]) & ('ERROR' not in
i[2]) & ('Unavailable' not in i[2]):
print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))
a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')
if a == '1':
body(targetsrting, data_model)
print('fofa查询总共', num, '条数据,以上数据均通过title筛查不输出空值。')
except:
print("'\n',出现问题了,账号密码、网络、其他原因,无法fofa查询")
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(targetsrting):
email = ''
key = ''
target = base64.b64encode(targetsrting.encode('utf-8')).decode('utf-8')
url = (
'https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000'
.format(email, key, target))
resp = requests.get(url)
try:
resp = requests.get(url)
data_model = json.loads(resp.text)
num = 0
for i in data_model['results']:
num = num + 1
if len(i[2]) > 0 and ('Not Found' not in i[2]) & ('ERROR' not in
i[2]) & ('Unavailable' not in i[2]):
print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))
a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')
if a == '1':
body(targetsrting, data_model)
print('fofa查询总共', num, '条数据,以上数据均通过title筛查不输出空值。')
except:
print("'\n',出现问题了,账号密码、网络、其他原因,无法fofa查询")
def body(targetsrting, data_model):
print('/n', 'body筛查的结果')
num = 0
inputString = '{}'.format(targetsrting)
f2 = re.findall('"([^"]*)"', inputString)
for i in data_model['results']:
num = num + 1
if f2[0] in i[2]:
print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(targetsrting):
email = ''
key = ''
target = base64.b64encode(targetsrting.encode('utf-8')).decode('utf-8')
url = (
'https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000'
.format(email, key, target))
resp = requests.get(url)
try:
resp = requests.get(url)
data_model = json.loads(resp.text)
num = 0
for i in data_model['results']:
num = num + 1
if len(i[2]) > 0 and ('Not Found' not in i[2]) & ('ERROR' not in
i[2]) & ('Unavailable' not in i[2]):
print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))
a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')
if a == '1':
body(targetsrting, data_model)
print('fofa查询总共', num, '条数据,以上数据均通过title筛查不输出空值。')
except:
print("'\n',出现问题了,账号密码、网络、其他原因,无法fofa查询")
def body(targetsrting, data_model):
print('/n', 'body筛查的结果')
num = 0
inputString = '{}'.format(targetsrting)
f2 = re.findall('"([^"]*)"', inputString)
for i in data_model['results']:
num = num + 1
if f2[0] in i[2]:
print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))
if __name__ == '__main__':
print(
"""
fofa语法
host=".gov.cn"
port="6379"
ip="1.1.1.1"
ip="220.181.111.1/24"
该脚本主要用于快速C段寻找目标边缘资产。 --by aufeng
"""
)
a = input('请输入需要查询的fofa语法:')
main(a)
<|reserved_special_token_1|>
import requests
import json
import base64
import re
def main(targetsrting):
email = ''
key = ''
target = base64.b64encode(targetsrting.encode('utf-8')).decode('utf-8')
url = (
'https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000'
.format(email, key, target))
resp = requests.get(url)
try:
resp = requests.get(url)
data_model = json.loads(resp.text)
num = 0
for i in data_model['results']:
num = num + 1
if len(i[2]) > 0 and ('Not Found' not in i[2]) & ('ERROR' not in
i[2]) & ('Unavailable' not in i[2]):
print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))
a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')
if a == '1':
body(targetsrting, data_model)
print('fofa查询总共', num, '条数据,以上数据均通过title筛查不输出空值。')
except:
print("'\n',出现问题了,账号密码、网络、其他原因,无法fofa查询")
def body(targetsrting, data_model):
print('/n', 'body筛查的结果')
num = 0
inputString = '{}'.format(targetsrting)
f2 = re.findall('"([^"]*)"', inputString)
for i in data_model['results']:
num = num + 1
if f2[0] in i[2]:
print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))
if __name__ == '__main__':
print(
"""
fofa语法
host=".gov.cn"
port="6379"
ip="1.1.1.1"
ip="220.181.111.1/24"
该脚本主要用于快速C段寻找目标边缘资产。 --by aufeng
"""
)
a = input('请输入需要查询的fofa语法:')
main(a)
<|reserved_special_token_1|>
import requests
import json
import base64
import re
def main(targetsrting):
email="" #email
key="" #key
#targetsrting='ip="202.107.117.5/24"' #搜索关键字
target=base64.b64encode(targetsrting.encode('utf-8')).decode("utf-8")
url="https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000".format(email,key,target)
resp = requests.get(url)
try:
resp = requests.get(url)
data_model = json.loads(resp.text) #字符串转换为字典
#print(data_model)
num = 0
for i in data_model["results"]:
num = num +1
if (len(i[2]) > 0) and ('Not Found' not in i[2])&('ERROR' not in i[2])&('Unavailable' not in i[2]):
print('{:<30}{:<30}{:<20}'.format(i[0],i[1],i[2]))
a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')
if(a == '1'):
body(targetsrting,data_model)
print("fofa查询总共",num,"条数据,以上数据均通过title筛查不输出空值。")
except:
print("'\n',出现问题了,账号密码、网络、其他原因,无法fofa查询")
def body(targetsrting,data_model):
print('/n','body筛查的结果')
num = 0
inputString = '{}'.format(targetsrting)
f2 = re.findall(r'"([^"]*)"', inputString)
for i in data_model["results"]:
num = num +1
if (f2[0] in i[2]):
print('{:<30}{:<30}{:<20}'.format(i[0],i[1],i[2]))
if __name__ == '__main__':
print('''
fofa语法
host=".gov.cn"
port="6379"
ip="1.1.1.1"
ip="220.181.111.1/24"
该脚本主要用于快速C段寻找目标边缘资产。 --by aufeng
''')
a = input("请输入需要查询的fofa语法:")
main(a)
|
flexible
|
{
"blob_id": "5f13866bd5c6d20e8ddc112fb1d1335e3fd46c3e",
"index": 1817,
"step-1": "<mask token>\n\n\ndef main(targetsrting):\n email = ''\n key = ''\n target = base64.b64encode(targetsrting.encode('utf-8')).decode('utf-8')\n url = (\n 'https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000'\n .format(email, key, target))\n resp = requests.get(url)\n try:\n resp = requests.get(url)\n data_model = json.loads(resp.text)\n num = 0\n for i in data_model['results']:\n num = num + 1\n if len(i[2]) > 0 and ('Not Found' not in i[2]) & ('ERROR' not in\n i[2]) & ('Unavailable' not in i[2]):\n print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))\n a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')\n if a == '1':\n body(targetsrting, data_model)\n print('fofa查询总共', num, '条数据,以上数据均通过title筛查不输出空值。')\n except:\n print(\"'\\n',出现问题了,账号密码、网络、其他原因,无法fofa查询\")\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(targetsrting):\n email = ''\n key = ''\n target = base64.b64encode(targetsrting.encode('utf-8')).decode('utf-8')\n url = (\n 'https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000'\n .format(email, key, target))\n resp = requests.get(url)\n try:\n resp = requests.get(url)\n data_model = json.loads(resp.text)\n num = 0\n for i in data_model['results']:\n num = num + 1\n if len(i[2]) > 0 and ('Not Found' not in i[2]) & ('ERROR' not in\n i[2]) & ('Unavailable' not in i[2]):\n print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))\n a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')\n if a == '1':\n body(targetsrting, data_model)\n print('fofa查询总共', num, '条数据,以上数据均通过title筛查不输出空值。')\n except:\n print(\"'\\n',出现问题了,账号密码、网络、其他原因,无法fofa查询\")\n\n\ndef body(targetsrting, data_model):\n print('/n', 'body筛查的结果')\n num = 0\n inputString = '{}'.format(targetsrting)\n f2 = re.findall('\"([^\"]*)\"', inputString)\n for i in data_model['results']:\n num = num + 1\n if f2[0] in i[2]:\n print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(targetsrting):\n email = ''\n key = ''\n target = base64.b64encode(targetsrting.encode('utf-8')).decode('utf-8')\n url = (\n 'https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000'\n .format(email, key, target))\n resp = requests.get(url)\n try:\n resp = requests.get(url)\n data_model = json.loads(resp.text)\n num = 0\n for i in data_model['results']:\n num = num + 1\n if len(i[2]) > 0 and ('Not Found' not in i[2]) & ('ERROR' not in\n i[2]) & ('Unavailable' not in i[2]):\n print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))\n a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')\n if a == '1':\n body(targetsrting, data_model)\n print('fofa查询总共', num, '条数据,以上数据均通过title筛查不输出空值。')\n except:\n print(\"'\\n',出现问题了,账号密码、网络、其他原因,无法fofa查询\")\n\n\ndef body(targetsrting, data_model):\n print('/n', 'body筛查的结果')\n num = 0\n inputString = '{}'.format(targetsrting)\n f2 = re.findall('\"([^\"]*)\"', inputString)\n for i in data_model['results']:\n num = num + 1\n if f2[0] in i[2]:\n print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))\n\n\nif __name__ == '__main__':\n print(\n \"\"\"\n fofa语法\n host=\".gov.cn\"\n port=\"6379\"\n ip=\"1.1.1.1\"\n ip=\"220.181.111.1/24\"\n \n 该脚本主要用于快速C段寻找目标边缘资产。 --by aufeng\n \"\"\"\n )\n a = input('请输入需要查询的fofa语法:')\n main(a)\n",
"step-4": "import requests\nimport json\nimport base64\nimport re\n\n\ndef main(targetsrting):\n email = ''\n key = ''\n target = base64.b64encode(targetsrting.encode('utf-8')).decode('utf-8')\n url = (\n 'https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000'\n .format(email, key, target))\n resp = requests.get(url)\n try:\n resp = requests.get(url)\n data_model = json.loads(resp.text)\n num = 0\n for i in data_model['results']:\n num = num + 1\n if len(i[2]) > 0 and ('Not Found' not in i[2]) & ('ERROR' not in\n i[2]) & ('Unavailable' not in i[2]):\n print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))\n a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')\n if a == '1':\n body(targetsrting, data_model)\n print('fofa查询总共', num, '条数据,以上数据均通过title筛查不输出空值。')\n except:\n print(\"'\\n',出现问题了,账号密码、网络、其他原因,无法fofa查询\")\n\n\ndef body(targetsrting, data_model):\n print('/n', 'body筛查的结果')\n num = 0\n inputString = '{}'.format(targetsrting)\n f2 = re.findall('\"([^\"]*)\"', inputString)\n for i in data_model['results']:\n num = num + 1\n if f2[0] in i[2]:\n print('{:<30}{:<30}{:<20}'.format(i[0], i[1], i[2]))\n\n\nif __name__ == '__main__':\n print(\n \"\"\"\n fofa语法\n host=\".gov.cn\"\n port=\"6379\"\n ip=\"1.1.1.1\"\n ip=\"220.181.111.1/24\"\n \n 该脚本主要用于快速C段寻找目标边缘资产。 --by aufeng\n \"\"\"\n )\n a = input('请输入需要查询的fofa语法:')\n main(a)\n",
"step-5": "import requests\nimport json\nimport base64\nimport re\n\ndef main(targetsrting):\n email=\"\" #email\n key=\"\" #key\n #targetsrting='ip=\"202.107.117.5/24\"' #搜索关键字\n target=base64.b64encode(targetsrting.encode('utf-8')).decode(\"utf-8\")\n url=\"https://fofa.so/api/v1/search/all?email={}&key={}&qbase64={}&fields=host,server,title&size=1000\".format(email,key,target)\n resp = requests.get(url)\n try:\n resp = requests.get(url)\n data_model = json.loads(resp.text) #字符串转换为字典\n #print(data_model)\n num = 0\n for i in data_model[\"results\"]:\n num = num +1\n if (len(i[2]) > 0) and ('Not Found' not in i[2])&('ERROR' not in i[2])&('Unavailable' not in i[2]):\n print('{:<30}{:<30}{:<20}'.format(i[0],i[1],i[2]))\n a = input('是否要进行边缘资产的title筛查(建议用body搜索 --确定的话请摁1):')\n if(a == '1'):\n body(targetsrting,data_model)\n print(\"fofa查询总共\",num,\"条数据,以上数据均通过title筛查不输出空值。\")\n except:\n print(\"'\\n',出现问题了,账号密码、网络、其他原因,无法fofa查询\")\n\ndef body(targetsrting,data_model):\n print('/n','body筛查的结果')\n num = 0\n inputString = '{}'.format(targetsrting)\n f2 = re.findall(r'\"([^\"]*)\"', inputString)\n for i in data_model[\"results\"]:\n num = num +1\n if (f2[0] in i[2]):\n print('{:<30}{:<30}{:<20}'.format(i[0],i[1],i[2]))\n\nif __name__ == '__main__':\n print('''\n fofa语法\n host=\".gov.cn\"\n port=\"6379\"\n ip=\"1.1.1.1\"\n ip=\"220.181.111.1/24\"\n \n 该脚本主要用于快速C段寻找目标边缘资产。 --by aufeng\n ''')\n a = input(\"请输入需要查询的fofa语法:\")\n main(a)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import pandas as pd
import numpy as np
#import data
df = pd.read_csv('../.gitignore/PPP_data_to_150k.csv')
counties = pd.read_csv('../data/zip_code_database.csv')
demographics = pd.read_csv('../data/counties.csv')
#filter out all unanswered ethnicities
df2 = df[~df.RaceEthnicity.str.contains("Unanswered")]
#drop nonprofit column
df2.drop('NonProfit', axis=1,inplace=True)
#drop row with Nebraska Zip code
df2.drop([71479],axis=0, inplace=True)
#filter zip code database for Colorado, drop unnecessary columns
co_counties = counties[counties['state']=='CO']
co_counties_1 = co_counties.drop(['decommissioned', 'acceptable_cities', 'unacceptable_cities','timezone','area_codes','world_region','country','irs_estimated_population_2015','primary_city','state'],axis=1)
#merge counties onto dataframe
df_with_counties = pd.merge(df2,co_counties_1, left_on='Zip', right_on='zip')
#only include 2018 demographic data
demographics_18 = demographics[demographics['YEAR']==2018]
demographics_18 = demographics_18.iloc[:,:11]
#drop NAN Jobs Retained values for scatter comparison of Jobs Retained to Loan Amount by ethnicity
ethnicity_dfs_job_comparison = [x.dropna(subset=['JobsRetained']) for x in ethnicity_dfs]
if __name__ == '__main__':
|
normal
|
{
"blob_id": "732478fd826e09cf304760dfcc30cd077f74d83e",
"index": 2250,
"step-1": "import pandas as pd\nimport numpy as np\n\n#import data\ndf = pd.read_csv('../.gitignore/PPP_data_to_150k.csv')\ncounties = pd.read_csv('../data/zip_code_database.csv')\ndemographics = pd.read_csv('../data/counties.csv')\n\n#filter out all unanswered ethnicities\ndf2 = df[~df.RaceEthnicity.str.contains(\"Unanswered\")]\n\n#drop nonprofit column\ndf2.drop('NonProfit', axis=1,inplace=True)\n\n#drop row with Nebraska Zip code\ndf2.drop([71479],axis=0, inplace=True)\n\n#filter zip code database for Colorado, drop unnecessary columns\nco_counties = counties[counties['state']=='CO']\nco_counties_1 = co_counties.drop(['decommissioned', 'acceptable_cities', 'unacceptable_cities','timezone','area_codes','world_region','country','irs_estimated_population_2015','primary_city','state'],axis=1)\n\n#merge counties onto dataframe \ndf_with_counties = pd.merge(df2,co_counties_1, left_on='Zip', right_on='zip')\n\n#only include 2018 demographic data\ndemographics_18 = demographics[demographics['YEAR']==2018]\ndemographics_18 = demographics_18.iloc[:,:11]\n\n#drop NAN Jobs Retained values for scatter comparison of Jobs Retained to Loan Amount by ethnicity\nethnicity_dfs_job_comparison = [x.dropna(subset=['JobsRetained']) for x in ethnicity_dfs]\n\nif __name__ == '__main__':\n\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python 3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 PanXu, Inc. All Rights Reserved
#
"""
测试 label index decoder
Authors: PanXu
Date: 2020/07/05 15:10:00
"""
import pytest
import torch
from easytext.tests import ASSERT
from easytext.data import LabelVocabulary
from easytext.modules import ConditionalRandomField
from easytext.label_decoder import CRFLabelIndexDecoder
class CRFData:
"""
测试用的 crf 数据
"""
def __init__(self):
bio_labels = [["O", "I-X", "B-X", "I-Y", "B-Y"]]
self.label_vocabulary = LabelVocabulary(labels=bio_labels,
padding=LabelVocabulary.PADDING)
self.logits = torch.tensor([
[[0, 0, .5, .5, .2], [0, 0, .3, .3, .1], [0, 0, .9, 10, 1]],
[[0, 0, .2, .5, .2], [0, 0, 3, .3, .1], [0, 0, .9, 1, 1]],
], dtype=torch.float)
self.tags = torch.tensor([
[2, 3, 4],
[3, 2, 2]
], dtype=torch.long)
self.transitions = torch.tensor([
[0.1, 0.2, 0.3, 0.4, 0.5],
[0.8, 0.3, 0.1, 0.7, 0.9],
[-0.3, 2.1, -5.6, 3.4, 4.0],
[0.2, 0.4, 0.6, -0.3, -0.4],
[1.0, 1.0, 1.0, 1.0, 1.0]
], dtype=torch.float)
self.transitions_from_start = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.6], dtype=torch.float)
self.transitions_to_end = torch.tensor([-0.1, -0.2, 0.3, -0.4, -0.4], dtype=torch.float)
# Use the CRF Module with fixed transitions to compute the log_likelihood
self.crf = ConditionalRandomField(5)
self.crf.transitions = torch.nn.Parameter(self.transitions)
self.crf.start_transitions = torch.nn.Parameter(self.transitions_from_start)
self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)
# constraint crf
constraints = {(0, 0), (0, 1),
(1, 1), (1, 2),
(2, 2), (2, 3),
(3, 3), (3, 4),
(4, 4), (4, 0)}
# Add the transitions to the end tag
# and from the start tag.
for i in range(5):
constraints.add((5, i))
constraints.add((i, 6))
constraint_crf = ConditionalRandomField(num_tags=5, constraints=constraints)
constraint_crf.transitions = torch.nn.Parameter(self.transitions)
constraint_crf.start_transitions = torch.nn.Parameter(self.transitions_from_start)
constraint_crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)
self.constraint_crf = constraint_crf
@pytest.fixture(scope="class")
def crf_data():
"""
产生测试用的 crf data
:return:
"""
return CRFData()
def test_crf_label_index_decoder(crf_data):
"""
测试 crf label index decoder
:param crf_data: crf data
:return:
"""
mask = torch.tensor([
[1, 1, 1],
[1, 1, 0]
], dtype=torch.long)
crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.crf,
label_vocabulary=crf_data.label_vocabulary)
label_indices = crf_label_index_decoder(logits=crf_data.logits,
mask=mask)
padding_index = crf_data.label_vocabulary.padding_index
expect = [[2, 4, 3], [4, 2, padding_index]]
ASSERT.assertListEqual(expect, label_indices.tolist())
def test_crf_label_index_decoder_with_constraint(crf_data):
mask = torch.tensor([
[1, 1, 1],
[1, 1, 0]
], dtype=torch.uint8)
crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.constraint_crf,
label_vocabulary=crf_data.label_vocabulary)
label_indices = crf_label_index_decoder(logits=crf_data.logits,
mask=mask)
padding_index = crf_data.label_vocabulary.padding_index
expect = [[2, 3, 3], [2, 3, padding_index]]
ASSERT.assertListEqual(expect, label_indices.tolist())
|
normal
|
{
"blob_id": "f64138ee5a64f09deb72b47b86bd7795acddad4d",
"index": 9980,
"step-1": "<mask token>\n\n\nclass CRFData:\n \"\"\"\n 测试用的 crf 数据\n \"\"\"\n\n def __init__(self):\n bio_labels = [['O', 'I-X', 'B-X', 'I-Y', 'B-Y']]\n self.label_vocabulary = LabelVocabulary(labels=bio_labels, padding=\n LabelVocabulary.PADDING)\n self.logits = torch.tensor([[[0, 0, 0.5, 0.5, 0.2], [0, 0, 0.3, 0.3,\n 0.1], [0, 0, 0.9, 10, 1]], [[0, 0, 0.2, 0.5, 0.2], [0, 0, 3, \n 0.3, 0.1], [0, 0, 0.9, 1, 1]]], dtype=torch.float)\n self.tags = torch.tensor([[2, 3, 4], [3, 2, 2]], dtype=torch.long)\n self.transitions = torch.tensor([[0.1, 0.2, 0.3, 0.4, 0.5], [0.8, \n 0.3, 0.1, 0.7, 0.9], [-0.3, 2.1, -5.6, 3.4, 4.0], [0.2, 0.4, \n 0.6, -0.3, -0.4], [1.0, 1.0, 1.0, 1.0, 1.0]], dtype=torch.float)\n self.transitions_from_start = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.6\n ], dtype=torch.float)\n self.transitions_to_end = torch.tensor([-0.1, -0.2, 0.3, -0.4, -0.4\n ], dtype=torch.float)\n self.crf = ConditionalRandomField(5)\n self.crf.transitions = torch.nn.Parameter(self.transitions)\n self.crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)\n constraints = {(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 3), (3, \n 3), (3, 4), (4, 4), (4, 0)}\n for i in range(5):\n constraints.add((5, i))\n constraints.add((i, 6))\n constraint_crf = ConditionalRandomField(num_tags=5, constraints=\n constraints)\n constraint_crf.transitions = torch.nn.Parameter(self.transitions)\n constraint_crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n constraint_crf.end_transitions = torch.nn.Parameter(self.\n transitions_to_end)\n self.constraint_crf = constraint_crf\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CRFData:\n \"\"\"\n 测试用的 crf 数据\n \"\"\"\n\n def __init__(self):\n bio_labels = [['O', 'I-X', 'B-X', 'I-Y', 'B-Y']]\n self.label_vocabulary = LabelVocabulary(labels=bio_labels, padding=\n LabelVocabulary.PADDING)\n self.logits = torch.tensor([[[0, 0, 0.5, 0.5, 0.2], [0, 0, 0.3, 0.3,\n 0.1], [0, 0, 0.9, 10, 1]], [[0, 0, 0.2, 0.5, 0.2], [0, 0, 3, \n 0.3, 0.1], [0, 0, 0.9, 1, 1]]], dtype=torch.float)\n self.tags = torch.tensor([[2, 3, 4], [3, 2, 2]], dtype=torch.long)\n self.transitions = torch.tensor([[0.1, 0.2, 0.3, 0.4, 0.5], [0.8, \n 0.3, 0.1, 0.7, 0.9], [-0.3, 2.1, -5.6, 3.4, 4.0], [0.2, 0.4, \n 0.6, -0.3, -0.4], [1.0, 1.0, 1.0, 1.0, 1.0]], dtype=torch.float)\n self.transitions_from_start = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.6\n ], dtype=torch.float)\n self.transitions_to_end = torch.tensor([-0.1, -0.2, 0.3, -0.4, -0.4\n ], dtype=torch.float)\n self.crf = ConditionalRandomField(5)\n self.crf.transitions = torch.nn.Parameter(self.transitions)\n self.crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)\n constraints = {(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 3), (3, \n 3), (3, 4), (4, 4), (4, 0)}\n for i in range(5):\n constraints.add((5, i))\n constraints.add((i, 6))\n constraint_crf = ConditionalRandomField(num_tags=5, constraints=\n constraints)\n constraint_crf.transitions = torch.nn.Parameter(self.transitions)\n constraint_crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n constraint_crf.end_transitions = torch.nn.Parameter(self.\n transitions_to_end)\n self.constraint_crf = constraint_crf\n\n\n@pytest.fixture(scope='class')\ndef crf_data():\n \"\"\"\n 产生测试用的 crf data\n :return:\n \"\"\"\n return CRFData()\n\n\ndef test_crf_label_index_decoder(crf_data):\n \"\"\"\n 测试 crf label index decoder\n :param crf_data: crf data\n :return:\n \"\"\"\n mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.long)\n crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.crf,\n label_vocabulary=crf_data.label_vocabulary)\n label_indices = crf_label_index_decoder(logits=crf_data.logits, mask=mask)\n padding_index = crf_data.label_vocabulary.padding_index\n expect = [[2, 4, 3], [4, 2, padding_index]]\n ASSERT.assertListEqual(expect, label_indices.tolist())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CRFData:\n \"\"\"\n 测试用的 crf 数据\n \"\"\"\n\n def __init__(self):\n bio_labels = [['O', 'I-X', 'B-X', 'I-Y', 'B-Y']]\n self.label_vocabulary = LabelVocabulary(labels=bio_labels, padding=\n LabelVocabulary.PADDING)\n self.logits = torch.tensor([[[0, 0, 0.5, 0.5, 0.2], [0, 0, 0.3, 0.3,\n 0.1], [0, 0, 0.9, 10, 1]], [[0, 0, 0.2, 0.5, 0.2], [0, 0, 3, \n 0.3, 0.1], [0, 0, 0.9, 1, 1]]], dtype=torch.float)\n self.tags = torch.tensor([[2, 3, 4], [3, 2, 2]], dtype=torch.long)\n self.transitions = torch.tensor([[0.1, 0.2, 0.3, 0.4, 0.5], [0.8, \n 0.3, 0.1, 0.7, 0.9], [-0.3, 2.1, -5.6, 3.4, 4.0], [0.2, 0.4, \n 0.6, -0.3, -0.4], [1.0, 1.0, 1.0, 1.0, 1.0]], dtype=torch.float)\n self.transitions_from_start = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.6\n ], dtype=torch.float)\n self.transitions_to_end = torch.tensor([-0.1, -0.2, 0.3, -0.4, -0.4\n ], dtype=torch.float)\n self.crf = ConditionalRandomField(5)\n self.crf.transitions = torch.nn.Parameter(self.transitions)\n self.crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)\n constraints = {(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 3), (3, \n 3), (3, 4), (4, 4), (4, 0)}\n for i in range(5):\n constraints.add((5, i))\n constraints.add((i, 6))\n constraint_crf = ConditionalRandomField(num_tags=5, constraints=\n constraints)\n constraint_crf.transitions = torch.nn.Parameter(self.transitions)\n constraint_crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n constraint_crf.end_transitions = torch.nn.Parameter(self.\n transitions_to_end)\n self.constraint_crf = constraint_crf\n\n\n@pytest.fixture(scope='class')\ndef crf_data():\n \"\"\"\n 产生测试用的 crf data\n :return:\n \"\"\"\n return CRFData()\n\n\ndef test_crf_label_index_decoder(crf_data):\n \"\"\"\n 测试 crf label index decoder\n :param crf_data: crf data\n :return:\n \"\"\"\n mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.long)\n crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.crf,\n label_vocabulary=crf_data.label_vocabulary)\n label_indices = crf_label_index_decoder(logits=crf_data.logits, mask=mask)\n padding_index = crf_data.label_vocabulary.padding_index\n expect = [[2, 4, 3], [4, 2, padding_index]]\n ASSERT.assertListEqual(expect, label_indices.tolist())\n\n\ndef test_crf_label_index_decoder_with_constraint(crf_data):\n mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.uint8)\n crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.\n constraint_crf, label_vocabulary=crf_data.label_vocabulary)\n label_indices = crf_label_index_decoder(logits=crf_data.logits, mask=mask)\n padding_index = crf_data.label_vocabulary.padding_index\n expect = [[2, 3, 3], [2, 3, padding_index]]\n ASSERT.assertListEqual(expect, label_indices.tolist())\n",
"step-4": "<mask token>\nimport pytest\nimport torch\nfrom easytext.tests import ASSERT\nfrom easytext.data import LabelVocabulary\nfrom easytext.modules import ConditionalRandomField\nfrom easytext.label_decoder import CRFLabelIndexDecoder\n\n\nclass CRFData:\n \"\"\"\n 测试用的 crf 数据\n \"\"\"\n\n def __init__(self):\n bio_labels = [['O', 'I-X', 'B-X', 'I-Y', 'B-Y']]\n self.label_vocabulary = LabelVocabulary(labels=bio_labels, padding=\n LabelVocabulary.PADDING)\n self.logits = torch.tensor([[[0, 0, 0.5, 0.5, 0.2], [0, 0, 0.3, 0.3,\n 0.1], [0, 0, 0.9, 10, 1]], [[0, 0, 0.2, 0.5, 0.2], [0, 0, 3, \n 0.3, 0.1], [0, 0, 0.9, 1, 1]]], dtype=torch.float)\n self.tags = torch.tensor([[2, 3, 4], [3, 2, 2]], dtype=torch.long)\n self.transitions = torch.tensor([[0.1, 0.2, 0.3, 0.4, 0.5], [0.8, \n 0.3, 0.1, 0.7, 0.9], [-0.3, 2.1, -5.6, 3.4, 4.0], [0.2, 0.4, \n 0.6, -0.3, -0.4], [1.0, 1.0, 1.0, 1.0, 1.0]], dtype=torch.float)\n self.transitions_from_start = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.6\n ], dtype=torch.float)\n self.transitions_to_end = torch.tensor([-0.1, -0.2, 0.3, -0.4, -0.4\n ], dtype=torch.float)\n self.crf = ConditionalRandomField(5)\n self.crf.transitions = torch.nn.Parameter(self.transitions)\n self.crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)\n constraints = {(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 3), (3, \n 3), (3, 4), (4, 4), (4, 0)}\n for i in range(5):\n constraints.add((5, i))\n constraints.add((i, 6))\n constraint_crf = ConditionalRandomField(num_tags=5, constraints=\n constraints)\n constraint_crf.transitions = torch.nn.Parameter(self.transitions)\n constraint_crf.start_transitions = torch.nn.Parameter(self.\n transitions_from_start)\n constraint_crf.end_transitions = torch.nn.Parameter(self.\n transitions_to_end)\n self.constraint_crf = constraint_crf\n\n\n@pytest.fixture(scope='class')\ndef crf_data():\n \"\"\"\n 产生测试用的 crf data\n :return:\n \"\"\"\n return CRFData()\n\n\ndef test_crf_label_index_decoder(crf_data):\n \"\"\"\n 测试 crf label index decoder\n :param crf_data: crf data\n :return:\n \"\"\"\n mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.long)\n crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.crf,\n label_vocabulary=crf_data.label_vocabulary)\n label_indices = crf_label_index_decoder(logits=crf_data.logits, mask=mask)\n padding_index = crf_data.label_vocabulary.padding_index\n expect = [[2, 4, 3], [4, 2, padding_index]]\n ASSERT.assertListEqual(expect, label_indices.tolist())\n\n\ndef test_crf_label_index_decoder_with_constraint(crf_data):\n mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.uint8)\n crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.\n constraint_crf, label_vocabulary=crf_data.label_vocabulary)\n label_indices = crf_label_index_decoder(logits=crf_data.logits, mask=mask)\n padding_index = crf_data.label_vocabulary.padding_index\n expect = [[2, 3, 3], [2, 3, padding_index]]\n ASSERT.assertListEqual(expect, label_indices.tolist())\n",
"step-5": "#!/usr/bin/env python 3\n# -*- coding: utf-8 -*-\n\n#\n# Copyright (c) 2020 PanXu, Inc. All Rights Reserved\n#\n\"\"\"\n测试 label index decoder\n\nAuthors: PanXu\nDate: 2020/07/05 15:10:00\n\"\"\"\nimport pytest\nimport torch\n\nfrom easytext.tests import ASSERT\n\nfrom easytext.data import LabelVocabulary\nfrom easytext.modules import ConditionalRandomField\nfrom easytext.label_decoder import CRFLabelIndexDecoder\n\n\nclass CRFData:\n \"\"\"\n 测试用的 crf 数据\n \"\"\"\n\n def __init__(self):\n bio_labels = [[\"O\", \"I-X\", \"B-X\", \"I-Y\", \"B-Y\"]]\n\n self.label_vocabulary = LabelVocabulary(labels=bio_labels,\n padding=LabelVocabulary.PADDING)\n\n self.logits = torch.tensor([\n [[0, 0, .5, .5, .2], [0, 0, .3, .3, .1], [0, 0, .9, 10, 1]],\n [[0, 0, .2, .5, .2], [0, 0, 3, .3, .1], [0, 0, .9, 1, 1]],\n ], dtype=torch.float)\n\n self.tags = torch.tensor([\n [2, 3, 4],\n [3, 2, 2]\n ], dtype=torch.long)\n\n self.transitions = torch.tensor([\n [0.1, 0.2, 0.3, 0.4, 0.5],\n [0.8, 0.3, 0.1, 0.7, 0.9],\n [-0.3, 2.1, -5.6, 3.4, 4.0],\n [0.2, 0.4, 0.6, -0.3, -0.4],\n [1.0, 1.0, 1.0, 1.0, 1.0]\n ], dtype=torch.float)\n\n self.transitions_from_start = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.6], dtype=torch.float)\n self.transitions_to_end = torch.tensor([-0.1, -0.2, 0.3, -0.4, -0.4], dtype=torch.float)\n\n # Use the CRF Module with fixed transitions to compute the log_likelihood\n self.crf = ConditionalRandomField(5)\n self.crf.transitions = torch.nn.Parameter(self.transitions)\n self.crf.start_transitions = torch.nn.Parameter(self.transitions_from_start)\n self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)\n\n # constraint crf\n constraints = {(0, 0), (0, 1),\n (1, 1), (1, 2),\n (2, 2), (2, 3),\n (3, 3), (3, 4),\n (4, 4), (4, 0)}\n\n # Add the transitions to the end tag\n # and from the start tag.\n for i in range(5):\n constraints.add((5, i))\n constraints.add((i, 6))\n\n constraint_crf = ConditionalRandomField(num_tags=5, constraints=constraints)\n constraint_crf.transitions = torch.nn.Parameter(self.transitions)\n constraint_crf.start_transitions = torch.nn.Parameter(self.transitions_from_start)\n constraint_crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)\n self.constraint_crf = constraint_crf\n\n\n@pytest.fixture(scope=\"class\")\ndef crf_data():\n \"\"\"\n 产生测试用的 crf data\n :return:\n \"\"\"\n return CRFData()\n\n\ndef test_crf_label_index_decoder(crf_data):\n \"\"\"\n 测试 crf label index decoder\n :param crf_data: crf data\n :return:\n \"\"\"\n mask = torch.tensor([\n [1, 1, 1],\n [1, 1, 0]\n ], dtype=torch.long)\n\n crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.crf,\n label_vocabulary=crf_data.label_vocabulary)\n\n label_indices = crf_label_index_decoder(logits=crf_data.logits,\n mask=mask)\n padding_index = crf_data.label_vocabulary.padding_index\n expect = [[2, 4, 3], [4, 2, padding_index]]\n\n ASSERT.assertListEqual(expect, label_indices.tolist())\n\n\ndef test_crf_label_index_decoder_with_constraint(crf_data):\n mask = torch.tensor([\n [1, 1, 1],\n [1, 1, 0]\n ], dtype=torch.uint8)\n\n crf_label_index_decoder = CRFLabelIndexDecoder(crf=crf_data.constraint_crf,\n label_vocabulary=crf_data.label_vocabulary)\n\n label_indices = crf_label_index_decoder(logits=crf_data.logits,\n mask=mask)\n padding_index = crf_data.label_vocabulary.padding_index\n expect = [[2, 3, 3], [2, 3, padding_index]]\n\n ASSERT.assertListEqual(expect, label_indices.tolist())\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
default_app_config = 'teacher.apps.A1Config'
|
normal
|
{
"blob_id": "c466c7e05608b1fbba5eea5bec16d301cee3688f",
"index": 9817,
"step-1": "<mask token>\n",
"step-2": "default_app_config = 'teacher.apps.A1Config'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def run():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input-base', required=True, help='')
parser.add_argument('--output-base', default='parsed_pages', help='')
parser.add_argument('--datetime', required=True, help='YYYYMMDD')
parser.add_argument('--hour', default='*', help='HH')
parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(
), help='')
parser.add_argument('--load-es', action='store_true')
parser.add_argument('--es-host', default='localhost', help=
'default to localhost')
parser.add_argument('--es-port', default='9200', help='default to 9200')
parser.add_argument('--es-cleanup', action='store_true', help=
'remove index before loading new data')
args = parser.parse_args()
dt_str = args.datetime
hour_str = args.hour
brand_str = args.brand
input_base = args.input_base
output_base = args.output_base
es_host, es_port = args.es_host, args.es_port
load_es = args.load_es
input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,
'*', '*', '*'))
for file_path in input_files:
dt_str, hour_str, br, category, sub_category, filename = (file_path
.split('/')[-6:])
parser = BRAND_PARSERS[brand_str](file_path)
parsed_docs = parser.parse()
if parsed_docs:
doc_list, price_list = parsed_docs
logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),
file_path))
if not load_es:
output_dir = os.path.join(output_base, os.path.join(dt_str,
hour_str, br, category))
ensure_mkdir(output_dir)
output_path = os.path.join(output_dir, filename + '.json')
logging.info('[WRITE] output to %s' % output_path)
with open(output_path + '.doc', 'w') as ofile:
ofile.write(json.dumps(doc_list, default=date_handler))
with open(output_path + '.price', 'w') as ofile:
ofile.write(json.dumps(price_list, default=date_handler))
else:
logging.info('[LOAD ES] loading to ElasticSearch...')
preprocessed_list = []
for doc in doc_list:
preprocessed_list.append({'index': {'_index': br, '_type':
category, '_id': doc['product_id']}})
preprocessed_list.append(doc)
bulk_load_es(es_host, es_port, br, category, preprocessed_list,
opt_dict=None)
bulk_load_es(es_host, es_port, br, 'price', price_list)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def run():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input-base', required=True, help='')
parser.add_argument('--output-base', default='parsed_pages', help='')
parser.add_argument('--datetime', required=True, help='YYYYMMDD')
parser.add_argument('--hour', default='*', help='HH')
parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(
), help='')
parser.add_argument('--load-es', action='store_true')
parser.add_argument('--es-host', default='localhost', help=
'default to localhost')
parser.add_argument('--es-port', default='9200', help='default to 9200')
parser.add_argument('--es-cleanup', action='store_true', help=
'remove index before loading new data')
args = parser.parse_args()
dt_str = args.datetime
hour_str = args.hour
brand_str = args.brand
input_base = args.input_base
output_base = args.output_base
es_host, es_port = args.es_host, args.es_port
load_es = args.load_es
input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,
'*', '*', '*'))
for file_path in input_files:
dt_str, hour_str, br, category, sub_category, filename = (file_path
.split('/')[-6:])
parser = BRAND_PARSERS[brand_str](file_path)
parsed_docs = parser.parse()
if parsed_docs:
doc_list, price_list = parsed_docs
logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),
file_path))
if not load_es:
output_dir = os.path.join(output_base, os.path.join(dt_str,
hour_str, br, category))
ensure_mkdir(output_dir)
output_path = os.path.join(output_dir, filename + '.json')
logging.info('[WRITE] output to %s' % output_path)
with open(output_path + '.doc', 'w') as ofile:
ofile.write(json.dumps(doc_list, default=date_handler))
with open(output_path + '.price', 'w') as ofile:
ofile.write(json.dumps(price_list, default=date_handler))
else:
logging.info('[LOAD ES] loading to ElasticSearch...')
preprocessed_list = []
for doc in doc_list:
preprocessed_list.append({'index': {'_index': br, '_type':
category, '_id': doc['product_id']}})
preprocessed_list.append(doc)
bulk_load_es(es_host, es_port, br, category, preprocessed_list,
opt_dict=None)
bulk_load_es(es_host, es_port, br, 'price', price_list)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
BRAND_PARSERS = {'forever21': ForeverParser, 'jcrew': JcrewParser}
FORMAT = '[%(asctime)s][%(levelname)s] %(message)s'
logging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def run():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input-base', required=True, help='')
parser.add_argument('--output-base', default='parsed_pages', help='')
parser.add_argument('--datetime', required=True, help='YYYYMMDD')
parser.add_argument('--hour', default='*', help='HH')
parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(
), help='')
parser.add_argument('--load-es', action='store_true')
parser.add_argument('--es-host', default='localhost', help=
'default to localhost')
parser.add_argument('--es-port', default='9200', help='default to 9200')
parser.add_argument('--es-cleanup', action='store_true', help=
'remove index before loading new data')
args = parser.parse_args()
dt_str = args.datetime
hour_str = args.hour
brand_str = args.brand
input_base = args.input_base
output_base = args.output_base
es_host, es_port = args.es_host, args.es_port
load_es = args.load_es
input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,
'*', '*', '*'))
for file_path in input_files:
dt_str, hour_str, br, category, sub_category, filename = (file_path
.split('/')[-6:])
parser = BRAND_PARSERS[brand_str](file_path)
parsed_docs = parser.parse()
if parsed_docs:
doc_list, price_list = parsed_docs
logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),
file_path))
if not load_es:
output_dir = os.path.join(output_base, os.path.join(dt_str,
hour_str, br, category))
ensure_mkdir(output_dir)
output_path = os.path.join(output_dir, filename + '.json')
logging.info('[WRITE] output to %s' % output_path)
with open(output_path + '.doc', 'w') as ofile:
ofile.write(json.dumps(doc_list, default=date_handler))
with open(output_path + '.price', 'w') as ofile:
ofile.write(json.dumps(price_list, default=date_handler))
else:
logging.info('[LOAD ES] loading to ElasticSearch...')
preprocessed_list = []
for doc in doc_list:
preprocessed_list.append({'index': {'_index': br, '_type':
category, '_id': doc['product_id']}})
preprocessed_list.append(doc)
bulk_load_es(es_host, es_port, br, category, preprocessed_list,
opt_dict=None)
bulk_load_es(es_host, es_port, br, 'price', price_list)
<|reserved_special_token_1|>
import os
import sys
import json
import logging
import argparse
from glob import glob
from pricewatcher.tools import ensure_mkdir
from pricewatcher.parser.f21 import ForeverParser
from pricewatcher.parser.jcrew import JcrewParser
from pricewatcher.utils.load_es import bulk_load_es
BRAND_PARSERS = {'forever21': ForeverParser, 'jcrew': JcrewParser}
FORMAT = '[%(asctime)s][%(levelname)s] %(message)s'
logging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def run():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input-base', required=True, help='')
parser.add_argument('--output-base', default='parsed_pages', help='')
parser.add_argument('--datetime', required=True, help='YYYYMMDD')
parser.add_argument('--hour', default='*', help='HH')
parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(
), help='')
parser.add_argument('--load-es', action='store_true')
parser.add_argument('--es-host', default='localhost', help=
'default to localhost')
parser.add_argument('--es-port', default='9200', help='default to 9200')
parser.add_argument('--es-cleanup', action='store_true', help=
'remove index before loading new data')
args = parser.parse_args()
dt_str = args.datetime
hour_str = args.hour
brand_str = args.brand
input_base = args.input_base
output_base = args.output_base
es_host, es_port = args.es_host, args.es_port
load_es = args.load_es
input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,
'*', '*', '*'))
for file_path in input_files:
dt_str, hour_str, br, category, sub_category, filename = (file_path
.split('/')[-6:])
parser = BRAND_PARSERS[brand_str](file_path)
parsed_docs = parser.parse()
if parsed_docs:
doc_list, price_list = parsed_docs
logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),
file_path))
if not load_es:
output_dir = os.path.join(output_base, os.path.join(dt_str,
hour_str, br, category))
ensure_mkdir(output_dir)
output_path = os.path.join(output_dir, filename + '.json')
logging.info('[WRITE] output to %s' % output_path)
with open(output_path + '.doc', 'w') as ofile:
ofile.write(json.dumps(doc_list, default=date_handler))
with open(output_path + '.price', 'w') as ofile:
ofile.write(json.dumps(price_list, default=date_handler))
else:
logging.info('[LOAD ES] loading to ElasticSearch...')
preprocessed_list = []
for doc in doc_list:
preprocessed_list.append({'index': {'_index': br, '_type':
category, '_id': doc['product_id']}})
preprocessed_list.append(doc)
bulk_load_es(es_host, es_port, br, category, preprocessed_list,
opt_dict=None)
bulk_load_es(es_host, es_port, br, 'price', price_list)
<|reserved_special_token_1|>
import os
import sys
import json
import logging
import argparse
from glob import glob
from pricewatcher.tools import ensure_mkdir
from pricewatcher.parser.f21 import ForeverParser
from pricewatcher.parser.jcrew import JcrewParser
from pricewatcher.utils.load_es import bulk_load_es
BRAND_PARSERS={
'forever21': ForeverParser,
'jcrew': JcrewParser
}
# Set up logging
FORMAT = '[%(asctime)s][%(levelname)s] %(message)s'
logging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def run():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input-base', required=True, help='')
parser.add_argument('--output-base', default='parsed_pages', help='')
parser.add_argument('--datetime', required=True, help='YYYYMMDD')
parser.add_argument('--hour', default='*', help='HH')
parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(), help='')
parser.add_argument('--load-es', action='store_true')
parser.add_argument('--es-host', default='localhost', help='default to localhost')
parser.add_argument('--es-port', default='9200', help='default to 9200')
parser.add_argument('--es-cleanup', action='store_true', help='remove index before loading new data')
args = parser.parse_args()
# Argument parsing
dt_str = args.datetime
hour_str = args.hour
brand_str = args.brand
input_base = args.input_base
output_base = args.output_base
# ES arguments
es_host, es_port = args.es_host, args.es_port
load_es = args.load_es
# Parsing Raw Pages
input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str, '*', '*', '*'))
for file_path in input_files:
dt_str, hour_str, br, category, sub_category, filename = file_path.split('/')[-6:]
parser = BRAND_PARSERS[brand_str](file_path)
parsed_docs = parser.parse()
if parsed_docs:
doc_list, price_list = parsed_docs
logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list), file_path))
if not load_es:
# Output Result
output_dir = os.path.join(output_base, os.path.join(dt_str, hour_str, br, category))
ensure_mkdir(output_dir)
output_path = os.path.join(output_dir, filename + '.json')
logging.info('[WRITE] output to %s' % output_path)
# Dump Product List
with open(output_path + '.doc', 'w') as ofile:
ofile.write(json.dumps(doc_list, default=date_handler))
with open(output_path + '.price', 'w') as ofile:
ofile.write(json.dumps(price_list, default=date_handler))
else:
#es_index, es_doctype = br, category
logging.info('[LOAD ES] loading to ElasticSearch...')
preprocessed_list = []
for doc in doc_list:
preprocessed_list.append({ "index" : { "_index" : br, "_type" : category, "_id" : doc['product_id'] } })
preprocessed_list.append(doc)
bulk_load_es(es_host, es_port, br, category, preprocessed_list, opt_dict=None)
bulk_load_es(es_host, es_port, br, 'price', price_list)
|
flexible
|
{
"blob_id": "2c22f891f30825bcb97987c78a98988ad2a92210",
"index": 385,
"step-1": "<mask token>\n\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(\n ), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help=\n 'default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200')\n parser.add_argument('--es-cleanup', action='store_true', help=\n 'remove index before loading new data')\n args = parser.parse_args()\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n es_host, es_port = args.es_host, args.es_port\n load_es = args.load_es\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,\n '*', '*', '*'))\n for file_path in input_files:\n dt_str, hour_str, br, category, sub_category, filename = (file_path\n .split('/')[-6:])\n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),\n file_path))\n if not load_es:\n output_dir = os.path.join(output_base, os.path.join(dt_str,\n hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json')\n logging.info('[WRITE] output to %s' % output_path)\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({'index': {'_index': br, '_type':\n category, '_id': doc['product_id']}})\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list,\n opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n",
"step-2": "<mask token>\nlogging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(\n ), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help=\n 'default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200')\n parser.add_argument('--es-cleanup', action='store_true', help=\n 'remove index before loading new data')\n args = parser.parse_args()\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n es_host, es_port = args.es_host, args.es_port\n load_es = args.load_es\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,\n '*', '*', '*'))\n for file_path in input_files:\n dt_str, hour_str, br, category, sub_category, filename = (file_path\n .split('/')[-6:])\n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),\n file_path))\n if not load_es:\n output_dir = os.path.join(output_base, os.path.join(dt_str,\n hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json')\n logging.info('[WRITE] output to %s' % output_path)\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({'index': {'_index': br, '_type':\n category, '_id': doc['product_id']}})\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list,\n opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n",
"step-3": "<mask token>\nBRAND_PARSERS = {'forever21': ForeverParser, 'jcrew': JcrewParser}\nFORMAT = '[%(asctime)s][%(levelname)s] %(message)s'\nlogging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(\n ), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help=\n 'default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200')\n parser.add_argument('--es-cleanup', action='store_true', help=\n 'remove index before loading new data')\n args = parser.parse_args()\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n es_host, es_port = args.es_host, args.es_port\n load_es = args.load_es\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,\n '*', '*', '*'))\n for file_path in input_files:\n dt_str, hour_str, br, category, sub_category, filename = (file_path\n .split('/')[-6:])\n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),\n file_path))\n if not load_es:\n output_dir = os.path.join(output_base, os.path.join(dt_str,\n hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json')\n logging.info('[WRITE] output to %s' % output_path)\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({'index': {'_index': br, '_type':\n category, '_id': doc['product_id']}})\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list,\n opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n",
"step-4": "import os\nimport sys\nimport json\nimport logging\nimport argparse\nfrom glob import glob\nfrom pricewatcher.tools import ensure_mkdir\nfrom pricewatcher.parser.f21 import ForeverParser\nfrom pricewatcher.parser.jcrew import JcrewParser\nfrom pricewatcher.utils.load_es import bulk_load_es\nBRAND_PARSERS = {'forever21': ForeverParser, 'jcrew': JcrewParser}\nFORMAT = '[%(asctime)s][%(levelname)s] %(message)s'\nlogging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(\n ), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help=\n 'default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200')\n parser.add_argument('--es-cleanup', action='store_true', help=\n 'remove index before loading new data')\n args = parser.parse_args()\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n es_host, es_port = args.es_host, args.es_port\n load_es = args.load_es\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,\n '*', '*', '*'))\n for file_path in input_files:\n dt_str, hour_str, br, category, sub_category, filename = (file_path\n .split('/')[-6:])\n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),\n file_path))\n if not load_es:\n output_dir = os.path.join(output_base, os.path.join(dt_str,\n hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json')\n logging.info('[WRITE] output to %s' % output_path)\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({'index': {'_index': br, '_type':\n category, '_id': doc['product_id']}})\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list,\n opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n",
"step-5": "import os\nimport sys\nimport json\nimport logging\nimport argparse\nfrom glob import glob\n\nfrom pricewatcher.tools import ensure_mkdir\nfrom pricewatcher.parser.f21 import ForeverParser\nfrom pricewatcher.parser.jcrew import JcrewParser\nfrom pricewatcher.utils.load_es import bulk_load_es\n\nBRAND_PARSERS={\n'forever21': ForeverParser, \n'jcrew': JcrewParser\n}\n\n# Set up logging\nFORMAT = '[%(asctime)s][%(levelname)s] %(message)s'\nlogging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help='default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200') \n parser.add_argument('--es-cleanup', action='store_true', help='remove index before loading new data')\n args = parser.parse_args()\n\n # Argument parsing\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n\n # ES arguments\n es_host, es_port = args.es_host, args.es_port \n load_es = args.load_es\n\n # Parsing Raw Pages\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str, '*', '*', '*')) \n for file_path in input_files: \n dt_str, hour_str, br, category, sub_category, filename = file_path.split('/')[-6:] \n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list), file_path))\n if not load_es: \n # Output Result \n output_dir = os.path.join(output_base, os.path.join(dt_str, hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json') \n logging.info('[WRITE] output to %s' % output_path)\n # Dump Product List\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n #es_index, es_doctype = br, category \n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({ \"index\" : { \"_index\" : br, \"_type\" : category, \"_id\" : doc['product_id'] } })\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list, opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_sgd_optimizer(args, model):
opimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay
=0.0001)
return opimizer
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import torch
def get_sgd_optimizer(args, model):
opimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay
=0.0001)
return opimizer
<|reserved_special_token_1|>
#!/usr/bin/python3
#coding:utf-8
"""
Author: Xie Song
Email: 18406508513@163.com
Copyright: Xie Song
License: MIT
"""
import torch
def get_sgd_optimizer(args, model):
opimizer = torch.optim.SGD(model.parameters(),lr=args.lr,weight_decay=1e-4)
return opimizer
|
flexible
|
{
"blob_id": "5dca187cfe221f31189ca9a9309ece4b9144ac66",
"index": 2812,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_sgd_optimizer(args, model):\n opimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay\n =0.0001)\n return opimizer\n",
"step-3": "<mask token>\nimport torch\n\n\ndef get_sgd_optimizer(args, model):\n opimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay\n =0.0001)\n return opimizer\n",
"step-4": "#!/usr/bin/python3\n#coding:utf-8\n\n\"\"\"\n Author: Xie Song\n Email: 18406508513@163.com\n \n Copyright: Xie Song\n License: MIT\n\"\"\"\nimport torch\n\ndef get_sgd_optimizer(args, model):\n opimizer = torch.optim.SGD(model.parameters(),lr=args.lr,weight_decay=1e-4)\n return opimizer",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SpecValidator:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SpecValidator:
def __init__(self, type=None, default=None, choices=[], min=None, max=None
):
self.type = type
self.default = default
self.choices = choices
self.min = min
self.max = max
<|reserved_special_token_0|>
<|reserved_special_token_1|>
AWSGlobalInstanceChoices = ['t2.nano', 't2.micro', 't3.nano', 't3.micro',
't3a.nano', 't3a.micro']
class SpecValidator:
def __init__(self, type=None, default=None, choices=[], min=None, max=None
):
self.type = type
self.default = default
self.choices = choices
self.min = min
self.max = max
DefaultAWSSpec = {'available_os': {'CentOS7': {'image': SpecValidator(type=
'string', default='CentOS Linux 7 x86_64 HVM EBS*'), 'ssh_user':
SpecValidator(type='choice', choices=['centos'], default='centos')},
'RedHat7': {'image': SpecValidator(type='string', default=
'RHEL-7.8-x86_64*'), 'ssh_user': SpecValidator(type='choice', choices=[
'ec2-user'], default='ec2-user')}, 'RedHat8': {'image': SpecValidator(
type='string', default='RHEL-8.2-x86_64*'), 'ssh_user': SpecValidator(
type='choice', choices=['ec2-user'], default='ec2-user')},
'RockyLinux8': {'image': SpecValidator(type='string', default=
'Rocky-8-ec2-8.5-20211114.2.x86_64'), 'ssh_user': SpecValidator(type=
'choice', choices=['rocky'], default='rocky')}}, 'dbt2': SpecValidator(
type='choice', choices=[True, False], default=False), 'dbt2_client': {
'count': SpecValidator(type='integer', min=0, max=64, default=0),
'instance_type': SpecValidator(type='choice', choices=['m5n.xlarge',
'm5n.2xlarge', 'm5n.4xlarge'] + AWSGlobalInstanceChoices, default=
'm5n.xlarge'), 'volume': {'type': SpecValidator(type='choice', choices=
['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'], default='gp2'), 'size':
SpecValidator(type='integer', min=10, max=16000, default=50), 'iops':
SpecValidator(type='integer', min=100, max=64000, default=250)}},
'dbt2_driver': {'count': SpecValidator(type='integer', min=0, max=64,
default=0), 'instance_type': SpecValidator(type='choice', choices=[
'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'] + AWSGlobalInstanceChoices,
default='m5n.xlarge'), 'volume': {'type': SpecValidator(type='choice',
choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'], default='gp2'),
'size': SpecValidator(type='integer', min=10, max=16000, default=50),
'iops': SpecValidator(type='integer', min=100, max=64000, default=250)}
}, 'hammerdb_server': {'instance_type': SpecValidator(type='choice',
choices=['m5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'] +
AWSGlobalInstanceChoices, default='m5n.xlarge'), 'volume': {'type':
SpecValidator(type='choice', choices=['io1', 'io2', 'gp2', 'gp3', 'st1',
'sc1'], default='gp2'), 'size': SpecValidator(type='integer', min=10,
max=16000, default=50), 'iops': SpecValidator(type='integer', min=100,
max=64000, default=250)}}, 'pem_server': {'instance_type':
SpecValidator(type='choice', choices=['c5.large', 'c5.xlarge',
'c5.2xlarge', 'c5.4xlarge', 'c5.9xlarge', 'c5.12xlarge', 'c5.18xlarge',
'c5.24xlarge', 'c5.metal'] + AWSGlobalInstanceChoices, default=
'c5.xlarge'), 'volume': {'type': SpecValidator(type='choice', choices=[
'io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'], default='gp2'), 'size':
SpecValidator(type='integer', min=10, max=16000, default=100), 'iops':
SpecValidator(type='integer', min=100, max=64000, default=250)}}}
DefaultAzureSpec = {'available_os': {'CentOS7': {'publisher': SpecValidator
(type='string', default='OpenLogic'), 'offer': SpecValidator(type=
'string', default='CentOS'), 'sku': SpecValidator(type='string',
default='7.7'), 'ssh_user': SpecValidator(type='string', default=
'edbadm')}, 'RedHat7': {'publisher': SpecValidator(type='string',
default='RedHat'), 'offer': SpecValidator(type='string', default='RHEL'
), 'sku': SpecValidator(type='string', default='7.8'), 'ssh_user':
SpecValidator(type='string', default='edbadm')}, 'RedHat8': {
'publisher': SpecValidator(type='string', default='RedHat'), 'offer':
SpecValidator(type='string', default='RHEL'), 'sku': SpecValidator(type
='string', default='8.2'), 'ssh_user': SpecValidator(type='string',
default='edbadm')}, 'RockyLinux8': {'publisher': SpecValidator(type=
'string', default='Perforce'), 'offer': SpecValidator(type='string',
default='rockylinux8'), 'sku': SpecValidator(type='string', default='8'
), 'ssh_user': SpecValidator(type='string', default='rocky')}}, 'dbt2':
SpecValidator(type='choice', choices=[True, False], default=False),
'dbt2_driver': {'count': SpecValidator(type='integer', min=0, max=64,
default=0), 'instance_type': SpecValidator(type='choice', choices=[
'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2', 'Standard_A8_v2',
'Standard_A2m_v2', 'Standard_A4m_v2', 'Standard_A8m_v2'], default=
'Standard_A2_v2'), 'volume': {'storage_account_type': SpecValidator(
type='choice', choices=['Premium_LRS', 'StandardSSD_LRS',
'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS')}},
'dbt2_client': {'count': SpecValidator(type='integer', min=0, max=64,
default=0), 'instance_type': SpecValidator(type='choice', choices=[
'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2', 'Standard_A8_v2',
'Standard_A2m_v2', 'Standard_A4m_v2', 'Standard_A8m_v2'], default=
'Standard_A2_v2'), 'volume': {'storage_account_type': SpecValidator(
type='choice', choices=['Premium_LRS', 'StandardSSD_LRS',
'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS')}},
'pem_server': {'instance_type': SpecValidator(type='choice', choices=[
'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2', 'Standard_A8_v2',
'Standard_A2m_v2', 'Standard_A4m_v2', 'Standard_A8m_v2'], default=
'Standard_A2_v2'), 'volume': {'storage_account_type': SpecValidator(
type='choice', choices=['Premium_LRS', 'StandardSSD_LRS',
'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS')}},
'hammerdb_server': {'instance_type': SpecValidator(type='choice',
choices=['Standard_D4ds_v4', 'Standard_D8ds_v4'], default=
'Standard_D4ds_v4'), 'volume': {'storage_account_type': SpecValidator(
type='choice', choices=['Premium_LRS', 'StandardSSD_LRS',
'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS')},
'additional_volumes': {'count': SpecValidator(type='integer', min=0,
max=5, default=2), 'storage_account_type': SpecValidator(type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'], default='StandardSSD_LRS'), 'size': SpecValidator(type
='integer', min=10, max=16000, default=100)}}}
DefaultGcloudSpec = {'available_os': {'CentOS7': {'image': SpecValidator(
type='string', default='centos-7'), 'ssh_user': SpecValidator(type=
'string', default='edbadm')}, 'RedHat7': {'image': SpecValidator(type=
'string', default='rhel-7'), 'ssh_user': SpecValidator(type='string',
default='edbadm')}, 'RedHat8': {'image': SpecValidator(type='string',
default='rhel-8'), 'ssh_user': SpecValidator(type='string', default=
'edbadm')}, 'RockyLinux8': {'image': SpecValidator(type='string',
default='rocky-linux-8'), 'ssh_user': SpecValidator(type='string',
default='rocky')}}, 'dbt2': SpecValidator(type='choice', choices=[True,
False], default=False), 'dbt2_client': {'count': SpecValidator(type=
'integer', min=0, max=64, default=0), 'instance_type': SpecValidator(
type='choice', choices=['c2-standard-4', 'c2-standard-8',
'c2-standard-16'], default='c2-standard-4'), 'volume': {'type':
SpecValidator(type='choice', choices=['pd-standard', 'pd-ssd'], default
='pd-standard'), 'size': SpecValidator(type='integer', min=10, max=
16000, default=50)}}, 'dbt2_driver': {'count': SpecValidator(type=
'integer', min=0, max=64, default=0), 'instance_type': SpecValidator(
type='choice', choices=['c2-standard-4', 'c2-standard-8',
'c2-standard-16'], default='c2-standard-4'), 'volume': {'type':
SpecValidator(type='choice', choices=['pd-standard', 'pd-ssd'], default
='pd-standard'), 'size': SpecValidator(type='integer', min=10, max=
16000, default=50)}}, 'hammerdb_server': {'instance_type':
SpecValidator(type='choice', choices=['c2-standard-4', 'c2-standard-8',
'c2-standard-16'], default='c2-standard-4'), 'volume': {'type':
SpecValidator(type='choice', choices=['pd-standard', 'pd-ssd'], default
='pd-standard'), 'size': SpecValidator(type='integer', min=10, max=
16000, default=50)}, 'additional_volumes': {'count': SpecValidator(type
='integer', min=0, max=5, default=2), 'type': SpecValidator(type=
'choice', choices=['pd-standard', 'pd-ssd'], default='pd-ssd'), 'size':
SpecValidator(type='integer', min=10, max=65536, default=100)}},
'pem_server': {'instance_type': SpecValidator(type='choice', choices=[
'e2-standard-2', 'e2-standard-4', 'e2-standard-8', 'e2-standard-16',
'e2-standard-32', 'e2-highmem-2', 'e2-highmem-4', 'e2-highmem-8',
'e2-highmem-16'], default='e2-standard-4'), 'volume': {'type':
SpecValidator(type='choice', choices=['pd-standard', 'pd-ssd'], default
='pd-standard'), 'size': SpecValidator(type='integer', min=10, max=
65536, default=100)}}}
<|reserved_special_token_1|>
# These are instance types to make available to all AWS EC2 systems, except the .
# PostgreSQL server, until the auto tuning playbook can tune for systems that
# small.
AWSGlobalInstanceChoices = [
't2.nano', 't2.micro',
't3.nano', 't3.micro',
't3a.nano', 't3a.micro',
]
class SpecValidator:
def __init__(self, type=None, default=None, choices=[], min=None,
max=None):
self.type = type
self.default = default
self.choices = choices
self.min = min
self.max = max
DefaultAWSSpec = {
'available_os': {
'CentOS7': {
'image': SpecValidator(
type='string',
default="CentOS Linux 7 x86_64 HVM EBS*"
),
'ssh_user': SpecValidator(
type='choice',
choices=['centos'],
default='centos'
)
},
'RedHat7': {
'image': SpecValidator(
type='string',
default="RHEL-7.8-x86_64*"
),
'ssh_user': SpecValidator(
type='choice',
choices=['ec2-user'],
default='ec2-user'
)
},
'RedHat8': {
'image': SpecValidator(
type='string',
default="RHEL-8.2-x86_64*"
),
'ssh_user': SpecValidator(
type='choice',
choices=['ec2-user'],
default='ec2-user'
)
},
'RockyLinux8': {
'image': SpecValidator(
type='string',
default="Rocky-8-ec2-8.5-20211114.2.x86_64"
),
'ssh_user': SpecValidator(
type='choice',
choices=['rocky'],
default='rocky'
)
}
},
'dbt2': SpecValidator(
type='choice',
choices=[True, False],
default=False
),
'dbt2_client': {
'count': SpecValidator(
type='integer',
min=0,
max=64,
default=0
),
'instance_type': SpecValidator(
type='choice',
choices=[
'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'
] + AWSGlobalInstanceChoices,
default='m5n.xlarge'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],
default='gp2'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=50
),
'iops': SpecValidator(
type='integer',
min=100,
max=64000,
default=250
)
},
},
'dbt2_driver': {
'count': SpecValidator(
type='integer',
min=0,
max=64,
default=0
),
'instance_type': SpecValidator(
type='choice',
choices=[
'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'
] + AWSGlobalInstanceChoices,
default='m5n.xlarge'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],
default='gp2'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=50
),
'iops': SpecValidator(
type='integer',
min=100,
max=64000,
default=250
)
},
},
'hammerdb_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'
] + AWSGlobalInstanceChoices,
default='m5n.xlarge'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],
default='gp2'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=50
),
'iops': SpecValidator(
type='integer',
min=100,
max=64000,
default=250
)
},
},
'pem_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'c5.large', 'c5.xlarge', 'c5.2xlarge', 'c5.4xlarge',
'c5.9xlarge', 'c5.12xlarge', 'c5.18xlarge', 'c5.24xlarge',
'c5.metal'
] + AWSGlobalInstanceChoices,
default='c5.xlarge'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],
default='gp2'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=100
),
'iops': SpecValidator(
type='integer',
min=100,
max=64000,
default=250
)
}
}
}
DefaultAzureSpec = {
'available_os': {
'CentOS7': {
'publisher': SpecValidator(type='string', default="OpenLogic"),
'offer': SpecValidator(type='string', default="CentOS"),
'sku': SpecValidator(type='string', default="7.7"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'RedHat7': {
'publisher': SpecValidator(type='string', default="RedHat"),
'offer': SpecValidator(type='string', default="RHEL"),
'sku': SpecValidator(type='string', default="7.8"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'RedHat8': {
'publisher': SpecValidator(type='string', default="RedHat"),
'offer': SpecValidator(type='string', default="RHEL"),
'sku': SpecValidator(type='string', default="8.2"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'RockyLinux8': {
'publisher': SpecValidator(type='string', default="Perforce"),
'offer': SpecValidator(type='string', default="rockylinux8"),
'sku': SpecValidator(type='string', default="8"),
'ssh_user': SpecValidator(type='string', default='rocky')
}
},
'dbt2': SpecValidator(
type='choice',
choices=[True, False],
default=False
),
'dbt2_driver': {
'count': SpecValidator(
type='integer',
min=0,
max=64,
default=0
),
'instance_type': SpecValidator(
type='choice',
choices=[
'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',
'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',
'Standard_A8m_v2'
],
default='Standard_A2_v2'
),
'volume': {
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='Standard_LRS'
)
}
},
'dbt2_client': {
'count': SpecValidator(
type='integer',
min=0,
max=64,
default=0
),
'instance_type': SpecValidator(
type='choice',
choices=[
'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',
'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',
'Standard_A8m_v2'
],
default='Standard_A2_v2'
),
'volume': {
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='Standard_LRS'
)
}
},
'pem_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',
'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',
'Standard_A8m_v2'
],
default='Standard_A2_v2'
),
'volume': {
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='Standard_LRS'
)
}
},
'hammerdb_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'Standard_D4ds_v4', 'Standard_D8ds_v4'
],
default='Standard_D4ds_v4'
),
'volume': {
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='Standard_LRS'
)
},
'additional_volumes': {
'count': SpecValidator(
type='integer',
min=0,
max=5,
default=2
),
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='StandardSSD_LRS'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=100
)
}
}
}
DefaultGcloudSpec = {
'available_os': {
'CentOS7': {
'image': SpecValidator(type='string', default="centos-7"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'RedHat7': {
'image': SpecValidator(type='string', default="rhel-7"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'RedHat8': {
'image': SpecValidator(type='string', default="rhel-8"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'RockyLinux8': {
'image': SpecValidator(type='string', default="rocky-linux-8"),
'ssh_user': SpecValidator(type='string', default='rocky')
}
},
'dbt2': SpecValidator(
type='choice',
choices=[True, False],
default=False
),
'dbt2_client': {
'count': SpecValidator(
type='integer',
min=0,
max=64,
default=0
),
'instance_type': SpecValidator(
type='choice',
choices=[
'c2-standard-4', 'c2-standard-8', 'c2-standard-16'
],
default='c2-standard-4'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['pd-standard', 'pd-ssd'],
default='pd-standard'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=50
)
}
},
'dbt2_driver': {
'count': SpecValidator(
type='integer',
min=0,
max=64,
default=0
),
'instance_type': SpecValidator(
type='choice',
choices=[
'c2-standard-4', 'c2-standard-8', 'c2-standard-16'
],
default='c2-standard-4'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['pd-standard', 'pd-ssd'],
default='pd-standard'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=50
)
}
},
'hammerdb_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'c2-standard-4', 'c2-standard-8', 'c2-standard-16'
],
default='c2-standard-4'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['pd-standard', 'pd-ssd'],
default='pd-standard'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=50
)
},
'additional_volumes': {
'count': SpecValidator(
type='integer',
min=0,
max=5,
default=2
),
'type': SpecValidator(
type='choice',
choices=['pd-standard', 'pd-ssd'],
default='pd-ssd'
),
'size': SpecValidator(
type='integer',
min=10,
max=65536,
default=100
)
}
},
'pem_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'e2-standard-2', 'e2-standard-4', 'e2-standard-8',
'e2-standard-16', 'e2-standard-32', 'e2-highmem-2',
'e2-highmem-4', 'e2-highmem-8', 'e2-highmem-16'
],
default='e2-standard-4'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['pd-standard', 'pd-ssd'],
default='pd-standard'
),
'size': SpecValidator(
type='integer',
min=10,
max=65536,
default=100
)
}
}
}
|
flexible
|
{
"blob_id": "4db93bdab2d73e7226dcad61827f5faea8513767",
"index": 9888,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SpecValidator:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SpecValidator:\n\n def __init__(self, type=None, default=None, choices=[], min=None, max=None\n ):\n self.type = type\n self.default = default\n self.choices = choices\n self.min = min\n self.max = max\n\n\n<mask token>\n",
"step-4": "AWSGlobalInstanceChoices = ['t2.nano', 't2.micro', 't3.nano', 't3.micro',\n 't3a.nano', 't3a.micro']\n\n\nclass SpecValidator:\n\n def __init__(self, type=None, default=None, choices=[], min=None, max=None\n ):\n self.type = type\n self.default = default\n self.choices = choices\n self.min = min\n self.max = max\n\n\nDefaultAWSSpec = {'available_os': {'CentOS7': {'image': SpecValidator(type=\n 'string', default='CentOS Linux 7 x86_64 HVM EBS*'), 'ssh_user':\n SpecValidator(type='choice', choices=['centos'], default='centos')},\n 'RedHat7': {'image': SpecValidator(type='string', default=\n 'RHEL-7.8-x86_64*'), 'ssh_user': SpecValidator(type='choice', choices=[\n 'ec2-user'], default='ec2-user')}, 'RedHat8': {'image': SpecValidator(\n type='string', default='RHEL-8.2-x86_64*'), 'ssh_user': SpecValidator(\n type='choice', choices=['ec2-user'], default='ec2-user')},\n 'RockyLinux8': {'image': SpecValidator(type='string', default=\n 'Rocky-8-ec2-8.5-20211114.2.x86_64'), 'ssh_user': SpecValidator(type=\n 'choice', choices=['rocky'], default='rocky')}}, 'dbt2': SpecValidator(\n type='choice', choices=[True, False], default=False), 'dbt2_client': {\n 'count': SpecValidator(type='integer', min=0, max=64, default=0),\n 'instance_type': SpecValidator(type='choice', choices=['m5n.xlarge',\n 'm5n.2xlarge', 'm5n.4xlarge'] + AWSGlobalInstanceChoices, default=\n 'm5n.xlarge'), 'volume': {'type': SpecValidator(type='choice', choices=\n ['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'], default='gp2'), 'size':\n SpecValidator(type='integer', min=10, max=16000, default=50), 'iops':\n SpecValidator(type='integer', min=100, max=64000, default=250)}},\n 'dbt2_driver': {'count': SpecValidator(type='integer', min=0, max=64,\n default=0), 'instance_type': SpecValidator(type='choice', choices=[\n 'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'] + AWSGlobalInstanceChoices,\n default='m5n.xlarge'), 'volume': {'type': SpecValidator(type='choice',\n choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'], default='gp2'),\n 'size': SpecValidator(type='integer', min=10, max=16000, default=50),\n 'iops': SpecValidator(type='integer', min=100, max=64000, default=250)}\n }, 'hammerdb_server': {'instance_type': SpecValidator(type='choice',\n choices=['m5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'] +\n AWSGlobalInstanceChoices, default='m5n.xlarge'), 'volume': {'type':\n SpecValidator(type='choice', choices=['io1', 'io2', 'gp2', 'gp3', 'st1',\n 'sc1'], default='gp2'), 'size': SpecValidator(type='integer', min=10,\n max=16000, default=50), 'iops': SpecValidator(type='integer', min=100,\n max=64000, default=250)}}, 'pem_server': {'instance_type':\n SpecValidator(type='choice', choices=['c5.large', 'c5.xlarge',\n 'c5.2xlarge', 'c5.4xlarge', 'c5.9xlarge', 'c5.12xlarge', 'c5.18xlarge',\n 'c5.24xlarge', 'c5.metal'] + AWSGlobalInstanceChoices, default=\n 'c5.xlarge'), 'volume': {'type': SpecValidator(type='choice', choices=[\n 'io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'], default='gp2'), 'size':\n SpecValidator(type='integer', min=10, max=16000, default=100), 'iops':\n SpecValidator(type='integer', min=100, max=64000, default=250)}}}\nDefaultAzureSpec = {'available_os': {'CentOS7': {'publisher': SpecValidator\n (type='string', default='OpenLogic'), 'offer': SpecValidator(type=\n 'string', default='CentOS'), 'sku': SpecValidator(type='string',\n default='7.7'), 'ssh_user': SpecValidator(type='string', default=\n 'edbadm')}, 'RedHat7': {'publisher': SpecValidator(type='string',\n default='RedHat'), 'offer': SpecValidator(type='string', default='RHEL'\n ), 'sku': SpecValidator(type='string', default='7.8'), 'ssh_user':\n SpecValidator(type='string', default='edbadm')}, 'RedHat8': {\n 'publisher': SpecValidator(type='string', default='RedHat'), 'offer':\n SpecValidator(type='string', default='RHEL'), 'sku': SpecValidator(type\n ='string', default='8.2'), 'ssh_user': SpecValidator(type='string',\n default='edbadm')}, 'RockyLinux8': {'publisher': SpecValidator(type=\n 'string', default='Perforce'), 'offer': SpecValidator(type='string',\n default='rockylinux8'), 'sku': SpecValidator(type='string', default='8'\n ), 'ssh_user': SpecValidator(type='string', default='rocky')}}, 'dbt2':\n SpecValidator(type='choice', choices=[True, False], default=False),\n 'dbt2_driver': {'count': SpecValidator(type='integer', min=0, max=64,\n default=0), 'instance_type': SpecValidator(type='choice', choices=[\n 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2', 'Standard_A8_v2',\n 'Standard_A2m_v2', 'Standard_A4m_v2', 'Standard_A8m_v2'], default=\n 'Standard_A2_v2'), 'volume': {'storage_account_type': SpecValidator(\n type='choice', choices=['Premium_LRS', 'StandardSSD_LRS',\n 'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS')}},\n 'dbt2_client': {'count': SpecValidator(type='integer', min=0, max=64,\n default=0), 'instance_type': SpecValidator(type='choice', choices=[\n 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2', 'Standard_A8_v2',\n 'Standard_A2m_v2', 'Standard_A4m_v2', 'Standard_A8m_v2'], default=\n 'Standard_A2_v2'), 'volume': {'storage_account_type': SpecValidator(\n type='choice', choices=['Premium_LRS', 'StandardSSD_LRS',\n 'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS')}},\n 'pem_server': {'instance_type': SpecValidator(type='choice', choices=[\n 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2', 'Standard_A8_v2',\n 'Standard_A2m_v2', 'Standard_A4m_v2', 'Standard_A8m_v2'], default=\n 'Standard_A2_v2'), 'volume': {'storage_account_type': SpecValidator(\n type='choice', choices=['Premium_LRS', 'StandardSSD_LRS',\n 'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS')}},\n 'hammerdb_server': {'instance_type': SpecValidator(type='choice',\n choices=['Standard_D4ds_v4', 'Standard_D8ds_v4'], default=\n 'Standard_D4ds_v4'), 'volume': {'storage_account_type': SpecValidator(\n type='choice', choices=['Premium_LRS', 'StandardSSD_LRS',\n 'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS')},\n 'additional_volumes': {'count': SpecValidator(type='integer', min=0,\n max=5, default=2), 'storage_account_type': SpecValidator(type='choice',\n choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',\n 'UltraSSD_LRS'], default='StandardSSD_LRS'), 'size': SpecValidator(type\n ='integer', min=10, max=16000, default=100)}}}\nDefaultGcloudSpec = {'available_os': {'CentOS7': {'image': SpecValidator(\n type='string', default='centos-7'), 'ssh_user': SpecValidator(type=\n 'string', default='edbadm')}, 'RedHat7': {'image': SpecValidator(type=\n 'string', default='rhel-7'), 'ssh_user': SpecValidator(type='string',\n default='edbadm')}, 'RedHat8': {'image': SpecValidator(type='string',\n default='rhel-8'), 'ssh_user': SpecValidator(type='string', default=\n 'edbadm')}, 'RockyLinux8': {'image': SpecValidator(type='string',\n default='rocky-linux-8'), 'ssh_user': SpecValidator(type='string',\n default='rocky')}}, 'dbt2': SpecValidator(type='choice', choices=[True,\n False], default=False), 'dbt2_client': {'count': SpecValidator(type=\n 'integer', min=0, max=64, default=0), 'instance_type': SpecValidator(\n type='choice', choices=['c2-standard-4', 'c2-standard-8',\n 'c2-standard-16'], default='c2-standard-4'), 'volume': {'type':\n SpecValidator(type='choice', choices=['pd-standard', 'pd-ssd'], default\n ='pd-standard'), 'size': SpecValidator(type='integer', min=10, max=\n 16000, default=50)}}, 'dbt2_driver': {'count': SpecValidator(type=\n 'integer', min=0, max=64, default=0), 'instance_type': SpecValidator(\n type='choice', choices=['c2-standard-4', 'c2-standard-8',\n 'c2-standard-16'], default='c2-standard-4'), 'volume': {'type':\n SpecValidator(type='choice', choices=['pd-standard', 'pd-ssd'], default\n ='pd-standard'), 'size': SpecValidator(type='integer', min=10, max=\n 16000, default=50)}}, 'hammerdb_server': {'instance_type':\n SpecValidator(type='choice', choices=['c2-standard-4', 'c2-standard-8',\n 'c2-standard-16'], default='c2-standard-4'), 'volume': {'type':\n SpecValidator(type='choice', choices=['pd-standard', 'pd-ssd'], default\n ='pd-standard'), 'size': SpecValidator(type='integer', min=10, max=\n 16000, default=50)}, 'additional_volumes': {'count': SpecValidator(type\n ='integer', min=0, max=5, default=2), 'type': SpecValidator(type=\n 'choice', choices=['pd-standard', 'pd-ssd'], default='pd-ssd'), 'size':\n SpecValidator(type='integer', min=10, max=65536, default=100)}},\n 'pem_server': {'instance_type': SpecValidator(type='choice', choices=[\n 'e2-standard-2', 'e2-standard-4', 'e2-standard-8', 'e2-standard-16',\n 'e2-standard-32', 'e2-highmem-2', 'e2-highmem-4', 'e2-highmem-8',\n 'e2-highmem-16'], default='e2-standard-4'), 'volume': {'type':\n SpecValidator(type='choice', choices=['pd-standard', 'pd-ssd'], default\n ='pd-standard'), 'size': SpecValidator(type='integer', min=10, max=\n 65536, default=100)}}}\n",
"step-5": "# These are instance types to make available to all AWS EC2 systems, except the .\n# PostgreSQL server, until the auto tuning playbook can tune for systems that\n# small.\nAWSGlobalInstanceChoices = [\n 't2.nano', 't2.micro',\n 't3.nano', 't3.micro',\n 't3a.nano', 't3a.micro',\n]\n\n\nclass SpecValidator:\n def __init__(self, type=None, default=None, choices=[], min=None,\n max=None):\n self.type = type\n self.default = default\n self.choices = choices\n self.min = min\n self.max = max\n\n\nDefaultAWSSpec = {\n 'available_os': {\n 'CentOS7': {\n 'image': SpecValidator(\n type='string',\n default=\"CentOS Linux 7 x86_64 HVM EBS*\"\n ),\n 'ssh_user': SpecValidator(\n type='choice',\n choices=['centos'],\n default='centos'\n )\n },\n 'RedHat7': {\n 'image': SpecValidator(\n type='string',\n default=\"RHEL-7.8-x86_64*\"\n ),\n 'ssh_user': SpecValidator(\n type='choice',\n choices=['ec2-user'],\n default='ec2-user'\n )\n },\n 'RedHat8': {\n 'image': SpecValidator(\n type='string',\n default=\"RHEL-8.2-x86_64*\"\n ),\n 'ssh_user': SpecValidator(\n type='choice',\n choices=['ec2-user'],\n default='ec2-user'\n )\n },\n 'RockyLinux8': {\n 'image': SpecValidator(\n type='string',\n default=\"Rocky-8-ec2-8.5-20211114.2.x86_64\"\n ),\n 'ssh_user': SpecValidator(\n type='choice',\n choices=['rocky'],\n default='rocky'\n )\n }\n\n },\n 'dbt2': SpecValidator(\n type='choice',\n choices=[True, False],\n default=False\n ),\n 'dbt2_client': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=64,\n default=0\n ),\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'\n ] + AWSGlobalInstanceChoices,\n default='m5n.xlarge'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],\n default='gp2'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=50\n ),\n 'iops': SpecValidator(\n type='integer',\n min=100,\n max=64000,\n default=250\n )\n },\n },\n 'dbt2_driver': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=64,\n default=0\n ),\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'\n ] + AWSGlobalInstanceChoices,\n default='m5n.xlarge'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],\n default='gp2'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=50\n ),\n 'iops': SpecValidator(\n type='integer',\n min=100,\n max=64000,\n default=250\n )\n },\n },\n 'hammerdb_server': {\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'\n ] + AWSGlobalInstanceChoices,\n default='m5n.xlarge'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],\n default='gp2'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=50\n ),\n 'iops': SpecValidator(\n type='integer',\n min=100,\n max=64000,\n default=250\n )\n },\n },\n 'pem_server': {\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'c5.large', 'c5.xlarge', 'c5.2xlarge', 'c5.4xlarge',\n 'c5.9xlarge', 'c5.12xlarge', 'c5.18xlarge', 'c5.24xlarge',\n 'c5.metal'\n ] + AWSGlobalInstanceChoices,\n default='c5.xlarge'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],\n default='gp2'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=100\n ),\n 'iops': SpecValidator(\n type='integer',\n min=100,\n max=64000,\n default=250\n )\n }\n }\n}\n\nDefaultAzureSpec = {\n 'available_os': {\n 'CentOS7': {\n 'publisher': SpecValidator(type='string', default=\"OpenLogic\"),\n 'offer': SpecValidator(type='string', default=\"CentOS\"),\n 'sku': SpecValidator(type='string', default=\"7.7\"),\n 'ssh_user': SpecValidator(type='string', default='edbadm')\n },\n 'RedHat7': {\n 'publisher': SpecValidator(type='string', default=\"RedHat\"),\n 'offer': SpecValidator(type='string', default=\"RHEL\"),\n 'sku': SpecValidator(type='string', default=\"7.8\"),\n 'ssh_user': SpecValidator(type='string', default='edbadm')\n },\n 'RedHat8': {\n 'publisher': SpecValidator(type='string', default=\"RedHat\"),\n 'offer': SpecValidator(type='string', default=\"RHEL\"),\n 'sku': SpecValidator(type='string', default=\"8.2\"),\n 'ssh_user': SpecValidator(type='string', default='edbadm')\n },\n 'RockyLinux8': {\n 'publisher': SpecValidator(type='string', default=\"Perforce\"),\n 'offer': SpecValidator(type='string', default=\"rockylinux8\"),\n 'sku': SpecValidator(type='string', default=\"8\"),\n 'ssh_user': SpecValidator(type='string', default='rocky')\n }\n },\n 'dbt2': SpecValidator(\n type='choice',\n choices=[True, False],\n default=False\n ),\n 'dbt2_driver': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=64,\n default=0\n ),\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',\n 'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',\n 'Standard_A8m_v2'\n ],\n default='Standard_A2_v2'\n ),\n 'volume': {\n 'storage_account_type': SpecValidator(\n type='choice',\n choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',\n 'UltraSSD_LRS'],\n default='Standard_LRS'\n )\n }\n },\n 'dbt2_client': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=64,\n default=0\n ),\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',\n 'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',\n 'Standard_A8m_v2'\n ],\n default='Standard_A2_v2'\n ),\n 'volume': {\n 'storage_account_type': SpecValidator(\n type='choice',\n choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',\n 'UltraSSD_LRS'],\n default='Standard_LRS'\n )\n }\n },\n 'pem_server': {\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',\n 'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',\n 'Standard_A8m_v2'\n ],\n default='Standard_A2_v2'\n ),\n 'volume': {\n 'storage_account_type': SpecValidator(\n type='choice',\n choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',\n 'UltraSSD_LRS'],\n default='Standard_LRS'\n )\n }\n },\n 'hammerdb_server': {\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'Standard_D4ds_v4', 'Standard_D8ds_v4'\n ],\n default='Standard_D4ds_v4'\n ),\n 'volume': {\n 'storage_account_type': SpecValidator(\n type='choice',\n choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',\n 'UltraSSD_LRS'],\n default='Standard_LRS'\n )\n },\n 'additional_volumes': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=5,\n default=2\n ),\n 'storage_account_type': SpecValidator(\n type='choice',\n choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',\n 'UltraSSD_LRS'],\n default='StandardSSD_LRS'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=100\n )\n }\n }\n}\n\nDefaultGcloudSpec = {\n 'available_os': {\n 'CentOS7': {\n 'image': SpecValidator(type='string', default=\"centos-7\"),\n 'ssh_user': SpecValidator(type='string', default='edbadm')\n },\n 'RedHat7': {\n 'image': SpecValidator(type='string', default=\"rhel-7\"),\n 'ssh_user': SpecValidator(type='string', default='edbadm')\n },\n 'RedHat8': {\n 'image': SpecValidator(type='string', default=\"rhel-8\"),\n 'ssh_user': SpecValidator(type='string', default='edbadm')\n },\n 'RockyLinux8': {\n 'image': SpecValidator(type='string', default=\"rocky-linux-8\"),\n 'ssh_user': SpecValidator(type='string', default='rocky')\n }\n },\n 'dbt2': SpecValidator(\n type='choice',\n choices=[True, False],\n default=False\n ),\n 'dbt2_client': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=64,\n default=0\n ),\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'c2-standard-4', 'c2-standard-8', 'c2-standard-16'\n ],\n default='c2-standard-4'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['pd-standard', 'pd-ssd'],\n default='pd-standard'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=50\n )\n }\n },\n 'dbt2_driver': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=64,\n default=0\n ),\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'c2-standard-4', 'c2-standard-8', 'c2-standard-16'\n ],\n default='c2-standard-4'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['pd-standard', 'pd-ssd'],\n default='pd-standard'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=50\n )\n }\n },\n 'hammerdb_server': {\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'c2-standard-4', 'c2-standard-8', 'c2-standard-16'\n ],\n default='c2-standard-4'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['pd-standard', 'pd-ssd'],\n default='pd-standard'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=50\n )\n },\n 'additional_volumes': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=5,\n default=2\n ),\n 'type': SpecValidator(\n type='choice',\n choices=['pd-standard', 'pd-ssd'],\n default='pd-ssd'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=65536,\n default=100\n )\n }\n },\n 'pem_server': {\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'e2-standard-2', 'e2-standard-4', 'e2-standard-8',\n 'e2-standard-16', 'e2-standard-32', 'e2-highmem-2',\n 'e2-highmem-4', 'e2-highmem-8', 'e2-highmem-16'\n ],\n default='e2-standard-4'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['pd-standard', 'pd-ssd'],\n default='pd-standard'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=65536,\n default=100\n )\n }\n }\n}\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_lazymap():
data = list(range(10))
lm = LazyMap(data, lambda x: 2 * x)
assert len(lm) == 10
assert lm[1] == 2
assert isinstance(lm[1:4], LazyMap)
assert lm.append == data.append
assert repr(lm) == '<LazyMap [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]>'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_lazymap():
data = list(range(10))
lm = LazyMap(data, lambda x: 2 * x)
assert len(lm) == 10
assert lm[1] == 2
assert isinstance(lm[1:4], LazyMap)
assert lm.append == data.append
assert repr(lm) == '<LazyMap [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]>'
def test_lazymap_iter():
data = list(range(2))
lm = LazyMap(data, lambda x: 2 * x)
iter_lm = iter(lm)
assert iter_lm.next() == 0
assert iter_lm.next() == 2
with raises(StopIteration):
iter_lm.next()
<|reserved_special_token_1|>
from py.test import raises
from ..lazymap import LazyMap
def test_lazymap():
data = list(range(10))
lm = LazyMap(data, lambda x: 2 * x)
assert len(lm) == 10
assert lm[1] == 2
assert isinstance(lm[1:4], LazyMap)
assert lm.append == data.append
assert repr(lm) == '<LazyMap [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]>'
def test_lazymap_iter():
data = list(range(2))
lm = LazyMap(data, lambda x: 2 * x)
iter_lm = iter(lm)
assert iter_lm.next() == 0
assert iter_lm.next() == 2
with raises(StopIteration):
iter_lm.next()
|
flexible
|
{
"blob_id": "3e7d80fdd1adb570934e4b252bc25d5746b4c68e",
"index": 3912,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_lazymap():\n data = list(range(10))\n lm = LazyMap(data, lambda x: 2 * x)\n assert len(lm) == 10\n assert lm[1] == 2\n assert isinstance(lm[1:4], LazyMap)\n assert lm.append == data.append\n assert repr(lm) == '<LazyMap [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]>'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_lazymap():\n data = list(range(10))\n lm = LazyMap(data, lambda x: 2 * x)\n assert len(lm) == 10\n assert lm[1] == 2\n assert isinstance(lm[1:4], LazyMap)\n assert lm.append == data.append\n assert repr(lm) == '<LazyMap [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]>'\n\n\ndef test_lazymap_iter():\n data = list(range(2))\n lm = LazyMap(data, lambda x: 2 * x)\n iter_lm = iter(lm)\n assert iter_lm.next() == 0\n assert iter_lm.next() == 2\n with raises(StopIteration):\n iter_lm.next()\n",
"step-4": "from py.test import raises\nfrom ..lazymap import LazyMap\n\n\ndef test_lazymap():\n data = list(range(10))\n lm = LazyMap(data, lambda x: 2 * x)\n assert len(lm) == 10\n assert lm[1] == 2\n assert isinstance(lm[1:4], LazyMap)\n assert lm.append == data.append\n assert repr(lm) == '<LazyMap [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]>'\n\n\ndef test_lazymap_iter():\n data = list(range(2))\n lm = LazyMap(data, lambda x: 2 * x)\n iter_lm = iter(lm)\n assert iter_lm.next() == 0\n assert iter_lm.next() == 2\n with raises(StopIteration):\n iter_lm.next()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
print('SYL_2整型数组_12 合并排序数组')
|
normal
|
{
"blob_id": "571636be9d213d19bddfd1d04688bc0955c9eae5",
"index": 4427,
"step-1": "<mask token>\n",
"step-2": "print('SYL_2整型数组_12 合并排序数组')\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from eventnotipy import app
import json
json_data = open('eventnotipy/config.json')
data = json.load(json_data)
json_data.close()
username = data['dbuser']
password = data['password']
host = data['dbhost']
db_name = data['database']
email_host = data['email_host']
email_localhost = data['email_localhost']
sms_host = data['sms_host']
sms_localhost = data['sms_localhost']
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://%s:%s@%s/%s' % (
username, password, host, db_name)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = False
app.secret_key = data['session_key']
|
normal
|
{
"blob_id": "1f0680c45afb36439c56a1d202537261df5f9afc",
"index": 5895,
"step-1": "<mask token>\n",
"step-2": "<mask token>\njson_data.close()\n<mask token>\n",
"step-3": "<mask token>\njson_data = open('eventnotipy/config.json')\ndata = json.load(json_data)\njson_data.close()\nusername = data['dbuser']\npassword = data['password']\nhost = data['dbhost']\ndb_name = data['database']\nemail_host = data['email_host']\nemail_localhost = data['email_localhost']\nsms_host = data['sms_host']\nsms_localhost = data['sms_localhost']\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://%s:%s@%s/%s' % (\n username, password, host, db_name)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = False\napp.secret_key = data['session_key']\n",
"step-4": "from eventnotipy import app\nimport json\njson_data = open('eventnotipy/config.json')\ndata = json.load(json_data)\njson_data.close()\nusername = data['dbuser']\npassword = data['password']\nhost = data['dbhost']\ndb_name = data['database']\nemail_host = data['email_host']\nemail_localhost = data['email_localhost']\nsms_host = data['sms_host']\nsms_localhost = data['sms_localhost']\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://%s:%s@%s/%s' % (\n username, password, host, db_name)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = False\napp.secret_key = data['session_key']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='SumoSound',
packages=['SumoSound'],
version='1.0.2',
license='MIT',
description='A python library to add 3D sound to a Sumo traffic simulation.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Patrick Malcolm',
author_email='patmalcolm91@gmail.com',
url='https://github.com/patmalcolm91/SumoSound',
download_url='https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',
keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound', 'OpenAL', 'traffic'],
install_requires=[
'pyopenal',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
package_data={'SumoSound': ['stock_sounds/*.wav']}
)
|
normal
|
{
"blob_id": "81c9cabaa611f8e884708d535f0b99ff83ec1c0d",
"index": 8319,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='SumoSound', packages=['SumoSound'], version='1.0.2', license=\n 'MIT', description=\n 'A python library to add 3D sound to a Sumo traffic simulation.',\n long_description=long_description, long_description_content_type=\n 'text/markdown', author='Patrick Malcolm', author_email=\n 'patmalcolm91@gmail.com', url=\n 'https://github.com/patmalcolm91/SumoSound', download_url=\n 'https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',\n keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound',\n 'OpenAL', 'traffic'], install_requires=['pyopenal'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'], package_data={'SumoSound': [\n 'stock_sounds/*.wav']})\n",
"step-3": "<mask token>\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='SumoSound', packages=['SumoSound'], version='1.0.2', license=\n 'MIT', description=\n 'A python library to add 3D sound to a Sumo traffic simulation.',\n long_description=long_description, long_description_content_type=\n 'text/markdown', author='Patrick Malcolm', author_email=\n 'patmalcolm91@gmail.com', url=\n 'https://github.com/patmalcolm91/SumoSound', download_url=\n 'https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',\n keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound',\n 'OpenAL', 'traffic'], install_requires=['pyopenal'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'], package_data={'SumoSound': [\n 'stock_sounds/*.wav']})\n",
"step-4": "from setuptools import setup\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nsetup(name='SumoSound', packages=['SumoSound'], version='1.0.2', license=\n 'MIT', description=\n 'A python library to add 3D sound to a Sumo traffic simulation.',\n long_description=long_description, long_description_content_type=\n 'text/markdown', author='Patrick Malcolm', author_email=\n 'patmalcolm91@gmail.com', url=\n 'https://github.com/patmalcolm91/SumoSound', download_url=\n 'https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',\n keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound',\n 'OpenAL', 'traffic'], install_requires=['pyopenal'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'], package_data={'SumoSound': [\n 'stock_sounds/*.wav']})\n",
"step-5": "from setuptools import setup\nfrom os import path\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='SumoSound',\n packages=['SumoSound'],\n version='1.0.2',\n license='MIT',\n description='A python library to add 3D sound to a Sumo traffic simulation.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='Patrick Malcolm',\n author_email='patmalcolm91@gmail.com',\n url='https://github.com/patmalcolm91/SumoSound',\n download_url='https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',\n keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound', 'OpenAL', 'traffic'],\n install_requires=[\n 'pyopenal',\n ],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'\n ],\n package_data={'SumoSound': ['stock_sounds/*.wav']}\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
l1: list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print('The original list: ', l1)
<|reserved_special_token_0|>
while i < len(l1):
l1[i] = l1[i] + 100
i = i + 1
print('The modified new list is: ', l1)
<|reserved_special_token_0|>
while True:
guess = int(input('Enter an integer : '))
if guess == number:
print('Congratulations, you guessed it.')
break
elif guess < number:
print('No, it is a little higher than that.')
continue
else:
print('No, it is a little lower than that.')
continue
print('Done')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
l1: list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print('The original list: ', l1)
i = 0
while i < len(l1):
l1[i] = l1[i] + 100
i = i + 1
print('The modified new list is: ', l1)
number = 23
while True:
guess = int(input('Enter an integer : '))
if guess == number:
print('Congratulations, you guessed it.')
break
elif guess < number:
print('No, it is a little higher than that.')
continue
else:
print('No, it is a little lower than that.')
continue
print('Done')
<|reserved_special_token_1|>
'''
The while statement allows you to repeatedly execute a block of statements as long as a condition is true.
A while statement is an example of what is called a looping statement. A while statement can have an optional else clause.
'''
#Modifying the values using while loop in a list
l1: list = [1,2,3,4,5,6,7,8,9,10]
print("The original list: " , l1)
i=0
while (i < len(l1)):
l1[i] = l1[i] + 100
i=i+1
print("The modified new list is: ", l1)
#Guessing game using while-else loop
number = 23
while True:
guess = int(input('Enter an integer : ')) #input statement to enter data from console
if guess == number:
print('Congratulations, you guessed it.')
break
elif guess < number:
print('No, it is a little higher than that.')
continue
else:
print('No, it is a little lower than that.')
continue
# Do anything else you want to do here
print('Done')
|
flexible
|
{
"blob_id": "6a3fd3323ed8792853afdf5af76161f3e20d4896",
"index": 4443,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nl1: list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nprint('The original list: ', l1)\n<mask token>\nwhile i < len(l1):\n l1[i] = l1[i] + 100\n i = i + 1\nprint('The modified new list is: ', l1)\n<mask token>\nwhile True:\n guess = int(input('Enter an integer : '))\n if guess == number:\n print('Congratulations, you guessed it.')\n break\n elif guess < number:\n print('No, it is a little higher than that.')\n continue\n else:\n print('No, it is a little lower than that.')\n continue\nprint('Done')\n",
"step-3": "<mask token>\nl1: list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nprint('The original list: ', l1)\ni = 0\nwhile i < len(l1):\n l1[i] = l1[i] + 100\n i = i + 1\nprint('The modified new list is: ', l1)\nnumber = 23\nwhile True:\n guess = int(input('Enter an integer : '))\n if guess == number:\n print('Congratulations, you guessed it.')\n break\n elif guess < number:\n print('No, it is a little higher than that.')\n continue\n else:\n print('No, it is a little lower than that.')\n continue\nprint('Done')\n",
"step-4": "'''\nThe while statement allows you to repeatedly execute a block of statements as long as a condition is true.\nA while statement is an example of what is called a looping statement. A while statement can have an optional else clause.\n'''\n\n#Modifying the values using while loop in a list\nl1: list = [1,2,3,4,5,6,7,8,9,10]\nprint(\"The original list: \" , l1)\n\ni=0\nwhile (i < len(l1)):\n l1[i] = l1[i] + 100\n i=i+1\nprint(\"The modified new list is: \", l1)\n\n#Guessing game using while-else loop\nnumber = 23\n\nwhile True:\n guess = int(input('Enter an integer : ')) #input statement to enter data from console\n if guess == number:\n print('Congratulations, you guessed it.')\n break\n elif guess < number:\n print('No, it is a little higher than that.')\n continue\n else:\n print('No, it is a little lower than that.')\n continue\n\n# Do anything else you want to do here\nprint('Done')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):
if __eq__ is None:
__eq__ = operator.__eq__
idx = -1
for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):
if not __eq__(a, b):
return idx
else:
return idx + 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):
if __eq__ is None:
__eq__ = operator.__eq__
idx = -1
for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):
if not __eq__(a, b):
return idx
else:
return idx + 1
assert calc_common_prefix_length([], []) == 0
assert calc_common_prefix_length([], [1]) == 0
assert calc_common_prefix_length([1], [1]) == 1
assert calc_common_prefix_length([1, 3], [1, 2]) == 1
<|reserved_special_token_1|>
__all__ = """
calc_common_prefix_length
""".split()
<|reserved_special_token_0|>
def calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):
if __eq__ is None:
__eq__ = operator.__eq__
idx = -1
for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):
if not __eq__(a, b):
return idx
else:
return idx + 1
assert calc_common_prefix_length([], []) == 0
assert calc_common_prefix_length([], [1]) == 0
assert calc_common_prefix_length([1], [1]) == 1
assert calc_common_prefix_length([1, 3], [1, 2]) == 1
<|reserved_special_token_1|>
__all__ = """
calc_common_prefix_length
""".split()
import operator
import itertools
def calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):
if __eq__ is None:
__eq__ = operator.__eq__
idx = -1
for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):
if not __eq__(a, b):
return idx
else:
return idx + 1
assert calc_common_prefix_length([], []) == 0
assert calc_common_prefix_length([], [1]) == 0
assert calc_common_prefix_length([1], [1]) == 1
assert calc_common_prefix_length([1, 3], [1, 2]) == 1
<|reserved_special_token_1|>
__all__ = '''
calc_common_prefix_length
'''.split()
import operator
import itertools
def calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):
if __eq__ is None:
__eq__ = operator.__eq__
idx = -1
for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):
if not __eq__(a, b):
return idx
else:
return idx+1
assert calc_common_prefix_length([], []) == 0
assert calc_common_prefix_length([], [1]) == 0
assert calc_common_prefix_length([1], [1]) == 1
assert calc_common_prefix_length([1,3], [1,2]) == 1
|
flexible
|
{
"blob_id": "2b73c4e07bba7ed5c89a31ebd45655eaa85dcdcc",
"index": 2689,
"step-1": "<mask token>\n\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx + 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx + 1\n\n\nassert calc_common_prefix_length([], []) == 0\nassert calc_common_prefix_length([], [1]) == 0\nassert calc_common_prefix_length([1], [1]) == 1\nassert calc_common_prefix_length([1, 3], [1, 2]) == 1\n",
"step-3": "__all__ = \"\"\"\n calc_common_prefix_length\n \"\"\".split()\n<mask token>\n\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx + 1\n\n\nassert calc_common_prefix_length([], []) == 0\nassert calc_common_prefix_length([], [1]) == 0\nassert calc_common_prefix_length([1], [1]) == 1\nassert calc_common_prefix_length([1, 3], [1, 2]) == 1\n",
"step-4": "__all__ = \"\"\"\n calc_common_prefix_length\n \"\"\".split()\nimport operator\nimport itertools\n\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx + 1\n\n\nassert calc_common_prefix_length([], []) == 0\nassert calc_common_prefix_length([], [1]) == 0\nassert calc_common_prefix_length([1], [1]) == 1\nassert calc_common_prefix_length([1, 3], [1, 2]) == 1\n",
"step-5": "\n__all__ = '''\n calc_common_prefix_length\n '''.split()\nimport operator\nimport itertools\n\ndef calc_common_prefix_length(lhs_iterable, rhs_iterable, /, *, __eq__=None):\n if __eq__ is None:\n __eq__ = operator.__eq__\n\n idx = -1\n for a, b, idx in zip(lhs_iterable, rhs_iterable, itertools.count(0)):\n if not __eq__(a, b):\n return idx\n else:\n return idx+1\n\nassert calc_common_prefix_length([], []) == 0\nassert calc_common_prefix_length([], [1]) == 0\nassert calc_common_prefix_length([1], [1]) == 1\nassert calc_common_prefix_length([1,3], [1,2]) == 1\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python
# -*- coding: latin-1 -*-
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html', titre="Ludovic DELSOL - Portfolio")
@app.route('/etude')
def etude():
return render_template('etude.html', titre="Portfolio Ludovic DELSOL - Etude")
@app.route('/experience')
def experience():
return render_template('experience.html', titre="Portfolio Ludovic DELSOL - Experiences Pros")
@app.route('/competence')
def compentence():
return render_template('compentence.html', titre="Portfolio Ludovic DELSOL - Compétences")
@app.route('/projet')
def project():
return render_template('projet.html', titre="Portfolio Ludovic DELSOL - Projets")
if __name__ == '__main__':
app.run(debug=True)
|
normal
|
{
"blob_id": "c7037b6a576374f211580b304f8447349bbbbea3",
"index": 9583,
"step-1": "<mask token>\n\n\n@app.route('/etude')\ndef etude():\n return render_template('etude.html', titre=\n 'Portfolio Ludovic DELSOL - Etude')\n\n\n@app.route('/experience')\ndef experience():\n return render_template('experience.html', titre=\n 'Portfolio Ludovic DELSOL - Experiences Pros')\n\n\n@app.route('/competence')\ndef compentence():\n return render_template('compentence.html', titre=\n 'Portfolio Ludovic DELSOL - Compétences')\n\n\n@app.route('/projet')\ndef project():\n return render_template('projet.html', titre=\n 'Portfolio Ludovic DELSOL - Projets')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', titre='Ludovic DELSOL - Portfolio')\n\n\n@app.route('/etude')\ndef etude():\n return render_template('etude.html', titre=\n 'Portfolio Ludovic DELSOL - Etude')\n\n\n@app.route('/experience')\ndef experience():\n return render_template('experience.html', titre=\n 'Portfolio Ludovic DELSOL - Experiences Pros')\n\n\n@app.route('/competence')\ndef compentence():\n return render_template('compentence.html', titre=\n 'Portfolio Ludovic DELSOL - Compétences')\n\n\n@app.route('/projet')\ndef project():\n return render_template('projet.html', titre=\n 'Portfolio Ludovic DELSOL - Projets')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', titre='Ludovic DELSOL - Portfolio')\n\n\n@app.route('/etude')\ndef etude():\n return render_template('etude.html', titre=\n 'Portfolio Ludovic DELSOL - Etude')\n\n\n@app.route('/experience')\ndef experience():\n return render_template('experience.html', titre=\n 'Portfolio Ludovic DELSOL - Experiences Pros')\n\n\n@app.route('/competence')\ndef compentence():\n return render_template('compentence.html', titre=\n 'Portfolio Ludovic DELSOL - Compétences')\n\n\n@app.route('/projet')\ndef project():\n return render_template('projet.html', titre=\n 'Portfolio Ludovic DELSOL - Projets')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask, render_template\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', titre='Ludovic DELSOL - Portfolio')\n\n\n@app.route('/etude')\ndef etude():\n return render_template('etude.html', titre=\n 'Portfolio Ludovic DELSOL - Etude')\n\n\n@app.route('/experience')\ndef experience():\n return render_template('experience.html', titre=\n 'Portfolio Ludovic DELSOL - Experiences Pros')\n\n\n@app.route('/competence')\ndef compentence():\n return render_template('compentence.html', titre=\n 'Portfolio Ludovic DELSOL - Compétences')\n\n\n@app.route('/projet')\ndef project():\n return render_template('projet.html', titre=\n 'Portfolio Ludovic DELSOL - Projets')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "#!/usr/bin/python\n# -*- coding: latin-1 -*-\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', titre=\"Ludovic DELSOL - Portfolio\")\n\n@app.route('/etude')\ndef etude():\n return render_template('etude.html', titre=\"Portfolio Ludovic DELSOL - Etude\")\n\n@app.route('/experience')\ndef experience():\n return render_template('experience.html', titre=\"Portfolio Ludovic DELSOL - Experiences Pros\")\n\n@app.route('/competence')\ndef compentence():\n return render_template('compentence.html', titre=\"Portfolio Ludovic DELSOL - Compétences\")\n\n@app.route('/projet')\ndef project():\n return render_template('projet.html', titre=\"Portfolio Ludovic DELSOL - Projets\")\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def scoring(msg, space, score_func, min_score=0.5, **score_func_params):
""" Run the score function over the given message and over a parametric
value x. Return all the values x as a FuzzySet (guess)
which scores is greather than the minimum score.
The parametric space <space> can is defined as:
- a range object
- or any other iterable of the parametric values x
For each possible x, score each using <score_func> and
drop anyone with a score of <min_score> or less.
Extra parameters can be passed to the <score_func> using
<score_func_params>.
Return a FuzzySet with the x values.
"""
assert 0.0 <= min_score <= 1.0
are_bytes_or_fail(msg, 'msg')
params = score_func_params
lengths = FuzzySet(((x, score_func(msg, x, **params)) for x in space),
pr='tuple', min_membership=min_score)
return lengths
<|reserved_special_token_1|>
from .score_funcs import *
from cryptonita.fuzzy_set import FuzzySet
from cryptonita.helpers import are_bytes_or_fail
def scoring(msg, space, score_func, min_score=0.5, **score_func_params):
""" Run the score function over the given message and over a parametric
value x. Return all the values x as a FuzzySet (guess)
which scores is greather than the minimum score.
The parametric space <space> can is defined as:
- a range object
- or any other iterable of the parametric values x
For each possible x, score each using <score_func> and
drop anyone with a score of <min_score> or less.
Extra parameters can be passed to the <score_func> using
<score_func_params>.
Return a FuzzySet with the x values.
"""
assert 0.0 <= min_score <= 1.0
are_bytes_or_fail(msg, 'msg')
params = score_func_params
lengths = FuzzySet(((x, score_func(msg, x, **params)) for x in space),
pr='tuple', min_membership=min_score)
return lengths
<|reserved_special_token_1|>
from .score_funcs import *
from cryptonita.fuzzy_set import FuzzySet
from cryptonita.helpers import are_bytes_or_fail
def scoring(msg, space, score_func, min_score=0.5, **score_func_params):
''' Run the score function over the given message and over a parametric
value x. Return all the values x as a FuzzySet (guess)
which scores is greather than the minimum score.
The parametric space <space> can is defined as:
- a range object
- or any other iterable of the parametric values x
For each possible x, score each using <score_func> and
drop anyone with a score of <min_score> or less.
Extra parameters can be passed to the <score_func> using
<score_func_params>.
Return a FuzzySet with the x values.
'''
assert 0.0 <= min_score <= 1.0
are_bytes_or_fail(msg, 'msg')
params = score_func_params
lengths = FuzzySet(
((x, score_func(msg, x, **params)) for x in space),
pr='tuple',
min_membership=min_score
)
return lengths
|
flexible
|
{
"blob_id": "99048ddb3f42382c8b8b435d832a45011a031cf1",
"index": 8537,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef scoring(msg, space, score_func, min_score=0.5, **score_func_params):\n \"\"\" Run the score function over the given message and over a parametric\n value x. Return all the values x as a FuzzySet (guess)\n which scores is greather than the minimum score.\n\n The parametric space <space> can is defined as:\n - a range object\n - or any other iterable of the parametric values x\n\n For each possible x, score each using <score_func> and\n drop anyone with a score of <min_score> or less.\n\n Extra parameters can be passed to the <score_func> using\n <score_func_params>.\n\n Return a FuzzySet with the x values.\n \"\"\"\n assert 0.0 <= min_score <= 1.0\n are_bytes_or_fail(msg, 'msg')\n params = score_func_params\n lengths = FuzzySet(((x, score_func(msg, x, **params)) for x in space),\n pr='tuple', min_membership=min_score)\n return lengths\n",
"step-3": "from .score_funcs import *\nfrom cryptonita.fuzzy_set import FuzzySet\nfrom cryptonita.helpers import are_bytes_or_fail\n\n\ndef scoring(msg, space, score_func, min_score=0.5, **score_func_params):\n \"\"\" Run the score function over the given message and over a parametric\n value x. Return all the values x as a FuzzySet (guess)\n which scores is greather than the minimum score.\n\n The parametric space <space> can is defined as:\n - a range object\n - or any other iterable of the parametric values x\n\n For each possible x, score each using <score_func> and\n drop anyone with a score of <min_score> or less.\n\n Extra parameters can be passed to the <score_func> using\n <score_func_params>.\n\n Return a FuzzySet with the x values.\n \"\"\"\n assert 0.0 <= min_score <= 1.0\n are_bytes_or_fail(msg, 'msg')\n params = score_func_params\n lengths = FuzzySet(((x, score_func(msg, x, **params)) for x in space),\n pr='tuple', min_membership=min_score)\n return lengths\n",
"step-4": "from .score_funcs import *\n\nfrom cryptonita.fuzzy_set import FuzzySet\nfrom cryptonita.helpers import are_bytes_or_fail\n\n\ndef scoring(msg, space, score_func, min_score=0.5, **score_func_params):\n ''' Run the score function over the given message and over a parametric\n value x. Return all the values x as a FuzzySet (guess)\n which scores is greather than the minimum score.\n\n The parametric space <space> can is defined as:\n - a range object\n - or any other iterable of the parametric values x\n\n For each possible x, score each using <score_func> and\n drop anyone with a score of <min_score> or less.\n\n Extra parameters can be passed to the <score_func> using\n <score_func_params>.\n\n Return a FuzzySet with the x values.\n '''\n assert 0.0 <= min_score <= 1.0\n are_bytes_or_fail(msg, 'msg')\n\n params = score_func_params\n lengths = FuzzySet(\n ((x, score_func(msg, x, **params)) for x in space),\n pr='tuple',\n min_membership=min_score\n )\n return lengths\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python3
"""Locked class module"""
class LockedClass:
"""test class with locked dynamic attruibute creation
"""
__slots__ = 'first_name'
|
normal
|
{
"blob_id": "d90a4b00d97cecf3612915a72e48a363c5dcc97b",
"index": 5006,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LockedClass:\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass LockedClass:\n <mask token>\n __slots__ = 'first_name'\n",
"step-4": "<mask token>\n\n\nclass LockedClass:\n \"\"\"test class with locked dynamic attruibute creation\n \"\"\"\n __slots__ = 'first_name'\n",
"step-5": "#!/usr/bin/python3\n\"\"\"Locked class module\"\"\"\n\n\nclass LockedClass:\n \"\"\"test class with locked dynamic attruibute creation\n \"\"\"\n __slots__ = 'first_name'\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def downgrade():
op.drop_constraint(None, 'user', type_='unique')
op.drop_constraint(None, 'user', type_='unique')
op.drop_column('user', 'money')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def upgrade():
op.add_column('user', sa.Column('money', sa.Integer(), nullable=False))
op.create_unique_constraint(None, 'user', ['password'])
op.create_unique_constraint(None, 'user', ['email'])
def downgrade():
op.drop_constraint(None, 'user', type_='unique')
op.drop_constraint(None, 'user', type_='unique')
op.drop_column('user', 'money')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
revision = '0bb5933fe69f'
down_revision = '09c6fdb3cf81'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('user', sa.Column('money', sa.Integer(), nullable=False))
op.create_unique_constraint(None, 'user', ['password'])
op.create_unique_constraint(None, 'user', ['email'])
def downgrade():
op.drop_constraint(None, 'user', type_='unique')
op.drop_constraint(None, 'user', type_='unique')
op.drop_column('user', 'money')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from alembic import op
import sqlalchemy as sa
revision = '0bb5933fe69f'
down_revision = '09c6fdb3cf81'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('user', sa.Column('money', sa.Integer(), nullable=False))
op.create_unique_constraint(None, 'user', ['password'])
op.create_unique_constraint(None, 'user', ['email'])
def downgrade():
op.drop_constraint(None, 'user', type_='unique')
op.drop_constraint(None, 'user', type_='unique')
op.drop_column('user', 'money')
<|reserved_special_token_1|>
"""empty message
Revision ID: 0bb5933fe69f
Revises: 09c6fdb3cf81
Create Date: 2021-03-11 16:48:06.771046
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0bb5933fe69f'
down_revision = '09c6fdb3cf81'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('money', sa.Integer(), nullable=False))
op.create_unique_constraint(None, 'user', ['password'])
op.create_unique_constraint(None, 'user', ['email'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'user', type_='unique')
op.drop_constraint(None, 'user', type_='unique')
op.drop_column('user', 'money')
# ### end Alembic commands ###
|
flexible
|
{
"blob_id": "f727c0551f20fb0dc72b4d81b7b3ed8ce9b1b6f4",
"index": 2072,
"step-1": "<mask token>\n\n\ndef downgrade():\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_column('user', 'money')\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.add_column('user', sa.Column('money', sa.Integer(), nullable=False))\n op.create_unique_constraint(None, 'user', ['password'])\n op.create_unique_constraint(None, 'user', ['email'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_column('user', 'money')\n",
"step-3": "<mask token>\nrevision = '0bb5933fe69f'\ndown_revision = '09c6fdb3cf81'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('user', sa.Column('money', sa.Integer(), nullable=False))\n op.create_unique_constraint(None, 'user', ['password'])\n op.create_unique_constraint(None, 'user', ['email'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_column('user', 'money')\n",
"step-4": "<mask token>\nfrom alembic import op\nimport sqlalchemy as sa\nrevision = '0bb5933fe69f'\ndown_revision = '09c6fdb3cf81'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('user', sa.Column('money', sa.Integer(), nullable=False))\n op.create_unique_constraint(None, 'user', ['password'])\n op.create_unique_constraint(None, 'user', ['email'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_column('user', 'money')\n",
"step-5": "\"\"\"empty message\n\nRevision ID: 0bb5933fe69f\nRevises: 09c6fdb3cf81\nCreate Date: 2021-03-11 16:48:06.771046\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0bb5933fe69f'\ndown_revision = '09c6fdb3cf81'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('money', sa.Integer(), nullable=False))\n op.create_unique_constraint(None, 'user', ['password'])\n op.create_unique_constraint(None, 'user', ['email'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_column('user', 'money')\n # ### end Alembic commands ###\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while currentWeight > goalWeight:
endDate += datetime.timedelta(days=7)
currentWeight -= avgKgPerWeek
print(endDate, round(currentWeight, 2))
print(f'Start date: {startDate.month.no}, end date: {endDate} ')
print(
f'Weeks to achieve weight goal: {(endDate - startDate).days // 7}, {(endDate - startDate).days} days'
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
currentWeight = 73
goalWeight = 67
avgKgPerWeek = 0.45
startDate = datetime.date.today()
endDate = startDate
while currentWeight > goalWeight:
endDate += datetime.timedelta(days=7)
currentWeight -= avgKgPerWeek
print(endDate, round(currentWeight, 2))
print(f'Start date: {startDate.month.no}, end date: {endDate} ')
print(
f'Weeks to achieve weight goal: {(endDate - startDate).days // 7}, {(endDate - startDate).days} days'
)
<|reserved_special_token_1|>
import datetime
currentWeight = 73
goalWeight = 67
avgKgPerWeek = 0.45
startDate = datetime.date.today()
endDate = startDate
while currentWeight > goalWeight:
endDate += datetime.timedelta(days=7)
currentWeight -= avgKgPerWeek
print(endDate, round(currentWeight, 2))
print(f'Start date: {startDate.month.no}, end date: {endDate} ')
print(
f'Weeks to achieve weight goal: {(endDate - startDate).days // 7}, {(endDate - startDate).days} days'
)
<|reserved_special_token_1|>
import datetime
# weightloss script
currentWeight = 73
goalWeight = 67
avgKgPerWeek = 0.45
startDate = datetime.date.today()
endDate = startDate
while currentWeight > goalWeight:
# adding 7 days to simulate a week passing
endDate += datetime.timedelta(days=7)
currentWeight -= avgKgPerWeek
print(endDate, round(currentWeight, 2))
print(f"Start date: {startDate.month.no}, end date: {endDate} ")
print(f"Weeks to achieve weight goal: {(endDate - startDate).days // 7}, {(endDate - startDate).days} days")
|
flexible
|
{
"blob_id": "7fb568880c40895870a0c541d9a88a8070a79e5b",
"index": 5762,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile currentWeight > goalWeight:\n endDate += datetime.timedelta(days=7)\n currentWeight -= avgKgPerWeek\n print(endDate, round(currentWeight, 2))\nprint(f'Start date: {startDate.month.no}, end date: {endDate} ')\nprint(\n f'Weeks to achieve weight goal: {(endDate - startDate).days // 7}, {(endDate - startDate).days} days'\n )\n",
"step-3": "<mask token>\ncurrentWeight = 73\ngoalWeight = 67\navgKgPerWeek = 0.45\nstartDate = datetime.date.today()\nendDate = startDate\nwhile currentWeight > goalWeight:\n endDate += datetime.timedelta(days=7)\n currentWeight -= avgKgPerWeek\n print(endDate, round(currentWeight, 2))\nprint(f'Start date: {startDate.month.no}, end date: {endDate} ')\nprint(\n f'Weeks to achieve weight goal: {(endDate - startDate).days // 7}, {(endDate - startDate).days} days'\n )\n",
"step-4": "import datetime\ncurrentWeight = 73\ngoalWeight = 67\navgKgPerWeek = 0.45\nstartDate = datetime.date.today()\nendDate = startDate\nwhile currentWeight > goalWeight:\n endDate += datetime.timedelta(days=7)\n currentWeight -= avgKgPerWeek\n print(endDate, round(currentWeight, 2))\nprint(f'Start date: {startDate.month.no}, end date: {endDate} ')\nprint(\n f'Weeks to achieve weight goal: {(endDate - startDate).days // 7}, {(endDate - startDate).days} days'\n )\n",
"step-5": "import datetime\n\n# weightloss script\ncurrentWeight = 73\ngoalWeight = 67\navgKgPerWeek = 0.45\n\nstartDate = datetime.date.today()\nendDate = startDate\n\nwhile currentWeight > goalWeight:\n\n # adding 7 days to simulate a week passing\n endDate += datetime.timedelta(days=7)\n currentWeight -= avgKgPerWeek\n \n print(endDate, round(currentWeight, 2))\n\n\nprint(f\"Start date: {startDate.month.no}, end date: {endDate} \")\nprint(f\"Weeks to achieve weight goal: {(endDate - startDate).days // 7}, {(endDate - startDate).days} days\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
from .models import Predictions
@admin.register(Predictions)
class PredictionsAdmin(admin.ModelAdmin):
pass
|
normal
|
{
"blob_id": "bab78e8a88f9a26cc13fe0c301f82880cee2b680",
"index": 965,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@admin.register(Predictions)\nclass PredictionsAdmin(admin.ModelAdmin):\n pass\n",
"step-3": "from django.contrib import admin\nfrom .models import Predictions\n\n\n@admin.register(Predictions)\nclass PredictionsAdmin(admin.ModelAdmin):\n pass\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import time, json, glob, os, enum
import serial
import threading
import responder
# 環境によって書き換える変数
isMCUConnected = True # マイコンがUSBポートに接続されているか
SERIALPATH_RASPI = '/dev/ttyACM0' # ラズパイのシリアルポート
SERIALPATH_WIN = 'COM16' # Windowsのシリアルポート
# 各種定数
PIN_SERVO1 = 12 # GPIO12 PWM0 Pin
PIN_SERVO2 = 13 # GPIO13 PWM1 Pin
PIN_LED = 16 # GPIO25 LED Pin
SERVO_MIN = 115000 # サーボの最小duty
SERVO_MAX = 26000 # サーボの最大duty
SPEED_MAX = 30 # 速度の最大値 [km/h]
IMM_MAX = 7.5 # 電流の最大値(プラスとマイナス両方に振れる) [A]
RXBUF0 = open("rxdata.json", "r").read().replace("\n","") # シリアル通信しないときにダミーで読み込む受信結果
class Meters():
def __init__(self):
self.pi = None # pigpioオブジェクト
# pigpioのセットアップ
if os.name == 'posix': # ラズパイで動かした場合にはpigpioをインポート
import pigpio
self.pi = pigpio.pi()
self.pi.set_mode(PIN_SERVO1, pigpio.OUTPUT)
self.pi.set_mode(PIN_SERVO2, pigpio.OUTPUT)
self.pi.set_mode(PIN_LED, pigpio.OUTPUT)
def indicate(self, kmh=None, amp=None, led=None):
if self.pi:
if kmh != None:
kmh = SPEED_MAX if (kmh > SPEED_MAX) else kmh # constrain upbound and lowbound
kmh = 0 if (kmh < 0) else kmh
self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh/SPEED_MAX * (SERVO_MAX - SERVO_MIN))) # 速度計
if amp != None:
amp = IMM_MAX if (amp > IMM_MAX) else amp
amp = -IMM_MAX if (amp < -IMM_MAX) else amp
self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5*(1 + amp/IMM_MAX) * (SERVO_MAX - SERVO_MIN))) # 電流計
if led != None:
self.pi.write(PIN_LED, led)
class SerialCom():
def __init__(self, meterObj=None):
self.ser = None # シリアル通信オブジェクト
self.rxdata = {} # 受信したデータを入れておく辞書型変数。外部からこれにアクセスすることでデータを取り出す
self.flagrx = True # Trueの間シリアル通信を実行
self.t1 = None # シリアルの受信を行うThreadingオブジェクト
self.METERS = meterObj # 速度を表示するMetersオブジェクトへの参照をセット # Metersオブジェクトへの参照
# MCUが接続されていればシリアルポートをオープン
print("[serialcom.__init__] open serial port")
if isMCUConnected:
try:
# OSによってポートを切り替え
if os.name == 'posix':
portpath = SERIALPATH_RASPI
elif os.name == 'nt':
portpath = SERIALPATH_WIN
# ポートを開く
self.ser = serial.Serial(portpath, 115200, timeout=None)
# ポートオープン失敗時
except serial.serialutil.SerialException:
print("[serialcom.__init__] failed to open port")
self.rxdata = {"serialfailed":1}
else:
print("[serialcom.__init__] port wasn't opened because isMCUConnected==False.")
def recieve_loop(self):
# シリアルポートから受信を行う無限ループ
if self.ser:
print("[serialcom.recieve_loop] start recieving")
self.ser.readline() # 1回目は不完全なデータなので空読み
while self.flagrx:
rxbuf = self.ser.readline().decode('ascii','ignore')
print(rxbuf)
try:
self.rxdata = json.loads(rxbuf) # JSON形式へデコード
self.rxdata['serialfailed'] = 0
if self.METERS: # メーターに表示
self.METERS.indicate(self.rxdata['speed'], self.rxdata['Imm'], self.rxdata['invstate'])
except json.decoder.JSONDecodeError:
print("[serialcom.recieve_loop] when decoding, error has occured")
self.rxdata['serialfailed'] = 1
self.ser.close()
# シリアルポートが開いていないときは、 rxdataとしてRXBUF0を代入する
else:
print("[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.")
self.rxdata = json.loads(RXBUF0)
self.rxdata['serialfailed'] = 0
while self.flagrx:
time.sleep(0.5)
print("[serialcom.recieve_loop] end recieving")
def recieve_start(self):
if not(self.t1):
self.flagrx = True
self.t1 = threading.Thread(target=self.recieve_loop, daemon=True)
self.t1.start()
def recieve_end(self):
if self.t1:
self.flagrx = False
self.t1.join()
del self.t1
def send(self, txbuf):
if self.ser:
print(bytes(txbuf,"ascii"))
return self.ser.write(bytes(txbuf,"ascii"))
def main():
class Mode(enum.IntEnum):
DEMO = 0
EBIKE = 1
ASSIST = 2
mode = Mode.DEMO # 動作モード
# メーターとシリアル通信のインスタンスを生成、初期化
meters = Meters()
meters.indicate(0, 0, 0)
serialcom = SerialCom(meters)
serialcom.recieve_start()
# サーバを立てる
api = responder.API()
@api.route("/reset")
def reset(req,resp):
serialcom.send("invoff\n")
@api.route("/info")
def get_info(req,resp):
resp.headers = {"Content-Type": "application/json; charset=utf-8"}
resp.media = serialcom.rxdata
@api.route("/cardata")
def get_cardata(req,resp):
text = open("static/cars/cardata.json", "r", encoding='utf-8').read()
resp.headers = {"Content-Type": "application/json; charset=utf-8"}
resp.text = text
@api.route("/command")
async def post_command(req,resp):
data = await req.media()
print(data)
if 'carno' in data:
serialcom.send("invoff\n")
time.sleep(0.5)
while serialcom.rxdata['invstate'] == 1:
time.sleep(0.1)
serialcom.send(f"carno={data['carno']}\n")
if 'mode' in data:
serialcom.send("invoff\n")
time.sleep(0.5)
while serialcom.rxdata['invstate'] == 1:
time.sleep(0.1)
serialcom.send(f"mode={data['mode']}\n")
if 'notch' in data:
if data['notch'] == 'P':
serialcom.send("P\n")
elif data['notch'] == 'N':
serialcom.send("N\n")
elif data['notch'] == 'B':
serialcom.send("B\n")
else:
serialcom.send(f"notch={data['notch']}\n")
if 'invoff' in data:
serialcom.send("invoff\n")
@api.route("/")
def hello_html(req,resp):
resp.html = api.template('index.html')
# web server start
api.run(address='0.0.0.0', port=5042) # 0.0.0.0にすると外部からアクセスできる
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "25532102cc36da139a22a61d226dff613f06ab31",
"index": 4714,
"step-1": "<mask token>\n\n\nclass Meters:\n <mask token>\n\n def indicate(self, kmh=None, amp=None, led=None):\n if self.pi:\n if kmh != None:\n kmh = SPEED_MAX if kmh > SPEED_MAX else kmh\n kmh = 0 if kmh < 0 else kmh\n self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh /\n SPEED_MAX * (SERVO_MAX - SERVO_MIN)))\n if amp != None:\n amp = IMM_MAX if amp > IMM_MAX else amp\n amp = -IMM_MAX if amp < -IMM_MAX else amp\n self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5 *\n (1 + amp / IMM_MAX) * (SERVO_MAX - SERVO_MIN)))\n if led != None:\n self.pi.write(PIN_LED, led)\n\n\nclass SerialCom:\n\n def __init__(self, meterObj=None):\n self.ser = None\n self.rxdata = {}\n self.flagrx = True\n self.t1 = None\n self.METERS = meterObj\n print('[serialcom.__init__] open serial port')\n if isMCUConnected:\n try:\n if os.name == 'posix':\n portpath = SERIALPATH_RASPI\n elif os.name == 'nt':\n portpath = SERIALPATH_WIN\n self.ser = serial.Serial(portpath, 115200, timeout=None)\n except serial.serialutil.SerialException:\n print('[serialcom.__init__] failed to open port')\n self.rxdata = {'serialfailed': 1}\n else:\n print(\n \"[serialcom.__init__] port wasn't opened because isMCUConnected==False.\"\n )\n\n def recieve_loop(self):\n if self.ser:\n print('[serialcom.recieve_loop] start recieving')\n self.ser.readline()\n while self.flagrx:\n rxbuf = self.ser.readline().decode('ascii', 'ignore')\n print(rxbuf)\n try:\n self.rxdata = json.loads(rxbuf)\n self.rxdata['serialfailed'] = 0\n if self.METERS:\n self.METERS.indicate(self.rxdata['speed'], self.\n rxdata['Imm'], self.rxdata['invstate'])\n except json.decoder.JSONDecodeError:\n print(\n '[serialcom.recieve_loop] when decoding, error has occured'\n )\n self.rxdata['serialfailed'] = 1\n self.ser.close()\n else:\n print(\n '[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.'\n )\n self.rxdata = json.loads(RXBUF0)\n self.rxdata['serialfailed'] = 0\n while self.flagrx:\n time.sleep(0.5)\n print('[serialcom.recieve_loop] end recieving')\n\n def recieve_start(self):\n if not self.t1:\n self.flagrx = True\n self.t1 = threading.Thread(target=self.recieve_loop, daemon=True)\n self.t1.start()\n\n def recieve_end(self):\n if self.t1:\n self.flagrx = False\n self.t1.join()\n del self.t1\n\n def send(self, txbuf):\n if self.ser:\n print(bytes(txbuf, 'ascii'))\n return self.ser.write(bytes(txbuf, 'ascii'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Meters:\n\n def __init__(self):\n self.pi = None\n if os.name == 'posix':\n import pigpio\n self.pi = pigpio.pi()\n self.pi.set_mode(PIN_SERVO1, pigpio.OUTPUT)\n self.pi.set_mode(PIN_SERVO2, pigpio.OUTPUT)\n self.pi.set_mode(PIN_LED, pigpio.OUTPUT)\n\n def indicate(self, kmh=None, amp=None, led=None):\n if self.pi:\n if kmh != None:\n kmh = SPEED_MAX if kmh > SPEED_MAX else kmh\n kmh = 0 if kmh < 0 else kmh\n self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh /\n SPEED_MAX * (SERVO_MAX - SERVO_MIN)))\n if amp != None:\n amp = IMM_MAX if amp > IMM_MAX else amp\n amp = -IMM_MAX if amp < -IMM_MAX else amp\n self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5 *\n (1 + amp / IMM_MAX) * (SERVO_MAX - SERVO_MIN)))\n if led != None:\n self.pi.write(PIN_LED, led)\n\n\nclass SerialCom:\n\n def __init__(self, meterObj=None):\n self.ser = None\n self.rxdata = {}\n self.flagrx = True\n self.t1 = None\n self.METERS = meterObj\n print('[serialcom.__init__] open serial port')\n if isMCUConnected:\n try:\n if os.name == 'posix':\n portpath = SERIALPATH_RASPI\n elif os.name == 'nt':\n portpath = SERIALPATH_WIN\n self.ser = serial.Serial(portpath, 115200, timeout=None)\n except serial.serialutil.SerialException:\n print('[serialcom.__init__] failed to open port')\n self.rxdata = {'serialfailed': 1}\n else:\n print(\n \"[serialcom.__init__] port wasn't opened because isMCUConnected==False.\"\n )\n\n def recieve_loop(self):\n if self.ser:\n print('[serialcom.recieve_loop] start recieving')\n self.ser.readline()\n while self.flagrx:\n rxbuf = self.ser.readline().decode('ascii', 'ignore')\n print(rxbuf)\n try:\n self.rxdata = json.loads(rxbuf)\n self.rxdata['serialfailed'] = 0\n if self.METERS:\n self.METERS.indicate(self.rxdata['speed'], self.\n rxdata['Imm'], self.rxdata['invstate'])\n except json.decoder.JSONDecodeError:\n print(\n '[serialcom.recieve_loop] when decoding, error has occured'\n )\n self.rxdata['serialfailed'] = 1\n self.ser.close()\n else:\n print(\n '[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.'\n )\n self.rxdata = json.loads(RXBUF0)\n self.rxdata['serialfailed'] = 0\n while self.flagrx:\n time.sleep(0.5)\n print('[serialcom.recieve_loop] end recieving')\n\n def recieve_start(self):\n if not self.t1:\n self.flagrx = True\n self.t1 = threading.Thread(target=self.recieve_loop, daemon=True)\n self.t1.start()\n\n def recieve_end(self):\n if self.t1:\n self.flagrx = False\n self.t1.join()\n del self.t1\n\n def send(self, txbuf):\n if self.ser:\n print(bytes(txbuf, 'ascii'))\n return self.ser.write(bytes(txbuf, 'ascii'))\n\n\ndef main():\n\n\n class Mode(enum.IntEnum):\n DEMO = 0\n EBIKE = 1\n ASSIST = 2\n mode = Mode.DEMO\n meters = Meters()\n meters.indicate(0, 0, 0)\n serialcom = SerialCom(meters)\n serialcom.recieve_start()\n api = responder.API()\n\n @api.route('/reset')\n def reset(req, resp):\n serialcom.send('invoff\\n')\n\n @api.route('/info')\n def get_info(req, resp):\n resp.headers = {'Content-Type': 'application/json; charset=utf-8'}\n resp.media = serialcom.rxdata\n\n @api.route('/cardata')\n def get_cardata(req, resp):\n text = open('static/cars/cardata.json', 'r', encoding='utf-8').read()\n resp.headers = {'Content-Type': 'application/json; charset=utf-8'}\n resp.text = text\n\n @api.route('/command')\n async def post_command(req, resp):\n data = await req.media()\n print(data)\n if 'carno' in data:\n serialcom.send('invoff\\n')\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"carno={data['carno']}\\n\")\n if 'mode' in data:\n serialcom.send('invoff\\n')\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"mode={data['mode']}\\n\")\n if 'notch' in data:\n if data['notch'] == 'P':\n serialcom.send('P\\n')\n elif data['notch'] == 'N':\n serialcom.send('N\\n')\n elif data['notch'] == 'B':\n serialcom.send('B\\n')\n else:\n serialcom.send(f\"notch={data['notch']}\\n\")\n if 'invoff' in data:\n serialcom.send('invoff\\n')\n\n @api.route('/')\n def hello_html(req, resp):\n resp.html = api.template('index.html')\n api.run(address='0.0.0.0', port=5042)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nisMCUConnected = True\nSERIALPATH_RASPI = '/dev/ttyACM0'\nSERIALPATH_WIN = 'COM16'\nPIN_SERVO1 = 12\nPIN_SERVO2 = 13\nPIN_LED = 16\nSERVO_MIN = 115000\nSERVO_MAX = 26000\nSPEED_MAX = 30\nIMM_MAX = 7.5\nRXBUF0 = open('rxdata.json', 'r').read().replace('\\n', '')\n\n\nclass Meters:\n\n def __init__(self):\n self.pi = None\n if os.name == 'posix':\n import pigpio\n self.pi = pigpio.pi()\n self.pi.set_mode(PIN_SERVO1, pigpio.OUTPUT)\n self.pi.set_mode(PIN_SERVO2, pigpio.OUTPUT)\n self.pi.set_mode(PIN_LED, pigpio.OUTPUT)\n\n def indicate(self, kmh=None, amp=None, led=None):\n if self.pi:\n if kmh != None:\n kmh = SPEED_MAX if kmh > SPEED_MAX else kmh\n kmh = 0 if kmh < 0 else kmh\n self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh /\n SPEED_MAX * (SERVO_MAX - SERVO_MIN)))\n if amp != None:\n amp = IMM_MAX if amp > IMM_MAX else amp\n amp = -IMM_MAX if amp < -IMM_MAX else amp\n self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5 *\n (1 + amp / IMM_MAX) * (SERVO_MAX - SERVO_MIN)))\n if led != None:\n self.pi.write(PIN_LED, led)\n\n\nclass SerialCom:\n\n def __init__(self, meterObj=None):\n self.ser = None\n self.rxdata = {}\n self.flagrx = True\n self.t1 = None\n self.METERS = meterObj\n print('[serialcom.__init__] open serial port')\n if isMCUConnected:\n try:\n if os.name == 'posix':\n portpath = SERIALPATH_RASPI\n elif os.name == 'nt':\n portpath = SERIALPATH_WIN\n self.ser = serial.Serial(portpath, 115200, timeout=None)\n except serial.serialutil.SerialException:\n print('[serialcom.__init__] failed to open port')\n self.rxdata = {'serialfailed': 1}\n else:\n print(\n \"[serialcom.__init__] port wasn't opened because isMCUConnected==False.\"\n )\n\n def recieve_loop(self):\n if self.ser:\n print('[serialcom.recieve_loop] start recieving')\n self.ser.readline()\n while self.flagrx:\n rxbuf = self.ser.readline().decode('ascii', 'ignore')\n print(rxbuf)\n try:\n self.rxdata = json.loads(rxbuf)\n self.rxdata['serialfailed'] = 0\n if self.METERS:\n self.METERS.indicate(self.rxdata['speed'], self.\n rxdata['Imm'], self.rxdata['invstate'])\n except json.decoder.JSONDecodeError:\n print(\n '[serialcom.recieve_loop] when decoding, error has occured'\n )\n self.rxdata['serialfailed'] = 1\n self.ser.close()\n else:\n print(\n '[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.'\n )\n self.rxdata = json.loads(RXBUF0)\n self.rxdata['serialfailed'] = 0\n while self.flagrx:\n time.sleep(0.5)\n print('[serialcom.recieve_loop] end recieving')\n\n def recieve_start(self):\n if not self.t1:\n self.flagrx = True\n self.t1 = threading.Thread(target=self.recieve_loop, daemon=True)\n self.t1.start()\n\n def recieve_end(self):\n if self.t1:\n self.flagrx = False\n self.t1.join()\n del self.t1\n\n def send(self, txbuf):\n if self.ser:\n print(bytes(txbuf, 'ascii'))\n return self.ser.write(bytes(txbuf, 'ascii'))\n\n\ndef main():\n\n\n class Mode(enum.IntEnum):\n DEMO = 0\n EBIKE = 1\n ASSIST = 2\n mode = Mode.DEMO\n meters = Meters()\n meters.indicate(0, 0, 0)\n serialcom = SerialCom(meters)\n serialcom.recieve_start()\n api = responder.API()\n\n @api.route('/reset')\n def reset(req, resp):\n serialcom.send('invoff\\n')\n\n @api.route('/info')\n def get_info(req, resp):\n resp.headers = {'Content-Type': 'application/json; charset=utf-8'}\n resp.media = serialcom.rxdata\n\n @api.route('/cardata')\n def get_cardata(req, resp):\n text = open('static/cars/cardata.json', 'r', encoding='utf-8').read()\n resp.headers = {'Content-Type': 'application/json; charset=utf-8'}\n resp.text = text\n\n @api.route('/command')\n async def post_command(req, resp):\n data = await req.media()\n print(data)\n if 'carno' in data:\n serialcom.send('invoff\\n')\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"carno={data['carno']}\\n\")\n if 'mode' in data:\n serialcom.send('invoff\\n')\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"mode={data['mode']}\\n\")\n if 'notch' in data:\n if data['notch'] == 'P':\n serialcom.send('P\\n')\n elif data['notch'] == 'N':\n serialcom.send('N\\n')\n elif data['notch'] == 'B':\n serialcom.send('B\\n')\n else:\n serialcom.send(f\"notch={data['notch']}\\n\")\n if 'invoff' in data:\n serialcom.send('invoff\\n')\n\n @api.route('/')\n def hello_html(req, resp):\n resp.html = api.template('index.html')\n api.run(address='0.0.0.0', port=5042)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import time, json, glob, os, enum\nimport serial\nimport threading\nimport responder\nisMCUConnected = True\nSERIALPATH_RASPI = '/dev/ttyACM0'\nSERIALPATH_WIN = 'COM16'\nPIN_SERVO1 = 12\nPIN_SERVO2 = 13\nPIN_LED = 16\nSERVO_MIN = 115000\nSERVO_MAX = 26000\nSPEED_MAX = 30\nIMM_MAX = 7.5\nRXBUF0 = open('rxdata.json', 'r').read().replace('\\n', '')\n\n\nclass Meters:\n\n def __init__(self):\n self.pi = None\n if os.name == 'posix':\n import pigpio\n self.pi = pigpio.pi()\n self.pi.set_mode(PIN_SERVO1, pigpio.OUTPUT)\n self.pi.set_mode(PIN_SERVO2, pigpio.OUTPUT)\n self.pi.set_mode(PIN_LED, pigpio.OUTPUT)\n\n def indicate(self, kmh=None, amp=None, led=None):\n if self.pi:\n if kmh != None:\n kmh = SPEED_MAX if kmh > SPEED_MAX else kmh\n kmh = 0 if kmh < 0 else kmh\n self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh /\n SPEED_MAX * (SERVO_MAX - SERVO_MIN)))\n if amp != None:\n amp = IMM_MAX if amp > IMM_MAX else amp\n amp = -IMM_MAX if amp < -IMM_MAX else amp\n self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5 *\n (1 + amp / IMM_MAX) * (SERVO_MAX - SERVO_MIN)))\n if led != None:\n self.pi.write(PIN_LED, led)\n\n\nclass SerialCom:\n\n def __init__(self, meterObj=None):\n self.ser = None\n self.rxdata = {}\n self.flagrx = True\n self.t1 = None\n self.METERS = meterObj\n print('[serialcom.__init__] open serial port')\n if isMCUConnected:\n try:\n if os.name == 'posix':\n portpath = SERIALPATH_RASPI\n elif os.name == 'nt':\n portpath = SERIALPATH_WIN\n self.ser = serial.Serial(portpath, 115200, timeout=None)\n except serial.serialutil.SerialException:\n print('[serialcom.__init__] failed to open port')\n self.rxdata = {'serialfailed': 1}\n else:\n print(\n \"[serialcom.__init__] port wasn't opened because isMCUConnected==False.\"\n )\n\n def recieve_loop(self):\n if self.ser:\n print('[serialcom.recieve_loop] start recieving')\n self.ser.readline()\n while self.flagrx:\n rxbuf = self.ser.readline().decode('ascii', 'ignore')\n print(rxbuf)\n try:\n self.rxdata = json.loads(rxbuf)\n self.rxdata['serialfailed'] = 0\n if self.METERS:\n self.METERS.indicate(self.rxdata['speed'], self.\n rxdata['Imm'], self.rxdata['invstate'])\n except json.decoder.JSONDecodeError:\n print(\n '[serialcom.recieve_loop] when decoding, error has occured'\n )\n self.rxdata['serialfailed'] = 1\n self.ser.close()\n else:\n print(\n '[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.'\n )\n self.rxdata = json.loads(RXBUF0)\n self.rxdata['serialfailed'] = 0\n while self.flagrx:\n time.sleep(0.5)\n print('[serialcom.recieve_loop] end recieving')\n\n def recieve_start(self):\n if not self.t1:\n self.flagrx = True\n self.t1 = threading.Thread(target=self.recieve_loop, daemon=True)\n self.t1.start()\n\n def recieve_end(self):\n if self.t1:\n self.flagrx = False\n self.t1.join()\n del self.t1\n\n def send(self, txbuf):\n if self.ser:\n print(bytes(txbuf, 'ascii'))\n return self.ser.write(bytes(txbuf, 'ascii'))\n\n\ndef main():\n\n\n class Mode(enum.IntEnum):\n DEMO = 0\n EBIKE = 1\n ASSIST = 2\n mode = Mode.DEMO\n meters = Meters()\n meters.indicate(0, 0, 0)\n serialcom = SerialCom(meters)\n serialcom.recieve_start()\n api = responder.API()\n\n @api.route('/reset')\n def reset(req, resp):\n serialcom.send('invoff\\n')\n\n @api.route('/info')\n def get_info(req, resp):\n resp.headers = {'Content-Type': 'application/json; charset=utf-8'}\n resp.media = serialcom.rxdata\n\n @api.route('/cardata')\n def get_cardata(req, resp):\n text = open('static/cars/cardata.json', 'r', encoding='utf-8').read()\n resp.headers = {'Content-Type': 'application/json; charset=utf-8'}\n resp.text = text\n\n @api.route('/command')\n async def post_command(req, resp):\n data = await req.media()\n print(data)\n if 'carno' in data:\n serialcom.send('invoff\\n')\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"carno={data['carno']}\\n\")\n if 'mode' in data:\n serialcom.send('invoff\\n')\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"mode={data['mode']}\\n\")\n if 'notch' in data:\n if data['notch'] == 'P':\n serialcom.send('P\\n')\n elif data['notch'] == 'N':\n serialcom.send('N\\n')\n elif data['notch'] == 'B':\n serialcom.send('B\\n')\n else:\n serialcom.send(f\"notch={data['notch']}\\n\")\n if 'invoff' in data:\n serialcom.send('invoff\\n')\n\n @api.route('/')\n def hello_html(req, resp):\n resp.html = api.template('index.html')\n api.run(address='0.0.0.0', port=5042)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import time, json, glob, os, enum\nimport serial\nimport threading\nimport responder\n\n# 環境によって書き換える変数\nisMCUConnected = True # マイコンがUSBポートに接続されているか\nSERIALPATH_RASPI = '/dev/ttyACM0' # ラズパイのシリアルポート\nSERIALPATH_WIN = 'COM16' # Windowsのシリアルポート\n\n# 各種定数\nPIN_SERVO1 = 12 # GPIO12 PWM0 Pin\nPIN_SERVO2 = 13 # GPIO13 PWM1 Pin\nPIN_LED = 16 # GPIO25 LED Pin\nSERVO_MIN = 115000 # サーボの最小duty\nSERVO_MAX = 26000 # サーボの最大duty\nSPEED_MAX = 30 # 速度の最大値 [km/h]\nIMM_MAX = 7.5 # 電流の最大値(プラスとマイナス両方に振れる) [A]\nRXBUF0 = open(\"rxdata.json\", \"r\").read().replace(\"\\n\",\"\") # シリアル通信しないときにダミーで読み込む受信結果\n\nclass Meters():\n def __init__(self):\n self.pi = None # pigpioオブジェクト\n # pigpioのセットアップ\n if os.name == 'posix': # ラズパイで動かした場合にはpigpioをインポート\n import pigpio\n self.pi = pigpio.pi()\n self.pi.set_mode(PIN_SERVO1, pigpio.OUTPUT)\n self.pi.set_mode(PIN_SERVO2, pigpio.OUTPUT)\n self.pi.set_mode(PIN_LED, pigpio.OUTPUT)\n\n def indicate(self, kmh=None, amp=None, led=None):\n if self.pi:\n if kmh != None:\n kmh = SPEED_MAX if (kmh > SPEED_MAX) else kmh # constrain upbound and lowbound\n kmh = 0 if (kmh < 0) else kmh\n self.pi.hardware_PWM(PIN_SERVO1, 50, int(SERVO_MIN + kmh/SPEED_MAX * (SERVO_MAX - SERVO_MIN))) # 速度計\n if amp != None:\n amp = IMM_MAX if (amp > IMM_MAX) else amp\n amp = -IMM_MAX if (amp < -IMM_MAX) else amp\n self.pi.hardware_PWM(PIN_SERVO2, 50, int(SERVO_MIN + 0.5*(1 + amp/IMM_MAX) * (SERVO_MAX - SERVO_MIN))) # 電流計\n if led != None:\n self.pi.write(PIN_LED, led)\n\nclass SerialCom():\n def __init__(self, meterObj=None):\n self.ser = None # シリアル通信オブジェクト\n self.rxdata = {} # 受信したデータを入れておく辞書型変数。外部からこれにアクセスすることでデータを取り出す\n self.flagrx = True # Trueの間シリアル通信を実行\n self.t1 = None # シリアルの受信を行うThreadingオブジェクト\n self.METERS = meterObj # 速度を表示するMetersオブジェクトへの参照をセット # Metersオブジェクトへの参照\n\n # MCUが接続されていればシリアルポートをオープン\n print(\"[serialcom.__init__] open serial port\")\n if isMCUConnected:\n try:\n # OSによってポートを切り替え\n if os.name == 'posix':\n portpath = SERIALPATH_RASPI\n elif os.name == 'nt':\n portpath = SERIALPATH_WIN\n\n # ポートを開く\n self.ser = serial.Serial(portpath, 115200, timeout=None)\n\n # ポートオープン失敗時\n except serial.serialutil.SerialException:\n print(\"[serialcom.__init__] failed to open port\")\n self.rxdata = {\"serialfailed\":1}\n \n else:\n print(\"[serialcom.__init__] port wasn't opened because isMCUConnected==False.\")\n \n def recieve_loop(self):\n # シリアルポートから受信を行う無限ループ\n if self.ser:\n print(\"[serialcom.recieve_loop] start recieving\")\n self.ser.readline() # 1回目は不完全なデータなので空読み\n while self.flagrx:\n rxbuf = self.ser.readline().decode('ascii','ignore')\n print(rxbuf)\n try:\n self.rxdata = json.loads(rxbuf) # JSON形式へデコード\n self.rxdata['serialfailed'] = 0\n if self.METERS: # メーターに表示\n self.METERS.indicate(self.rxdata['speed'], self.rxdata['Imm'], self.rxdata['invstate'])\n except json.decoder.JSONDecodeError:\n print(\"[serialcom.recieve_loop] when decoding, error has occured\")\n self.rxdata['serialfailed'] = 1\n self.ser.close()\n\n # シリアルポートが開いていないときは、 rxdataとしてRXBUF0を代入する\n else:\n print(\"[serialcom.recieve_loop] Because MCU is not connected, RXBUF0 is set to rxdata.\")\n self.rxdata = json.loads(RXBUF0)\n self.rxdata['serialfailed'] = 0\n while self.flagrx:\n time.sleep(0.5)\n \n print(\"[serialcom.recieve_loop] end recieving\")\n\n def recieve_start(self):\n if not(self.t1):\n self.flagrx = True\n self.t1 = threading.Thread(target=self.recieve_loop, daemon=True)\n self.t1.start()\n\n def recieve_end(self):\n if self.t1:\n self.flagrx = False\n self.t1.join()\n del self.t1\n \n def send(self, txbuf):\n if self.ser:\n print(bytes(txbuf,\"ascii\"))\n return self.ser.write(bytes(txbuf,\"ascii\"))\n \ndef main():\n class Mode(enum.IntEnum):\n DEMO = 0\n EBIKE = 1\n ASSIST = 2\n \n mode = Mode.DEMO # 動作モード\n \n # メーターとシリアル通信のインスタンスを生成、初期化\n meters = Meters()\n meters.indicate(0, 0, 0)\n serialcom = SerialCom(meters)\n serialcom.recieve_start()\n\n # サーバを立てる\n api = responder.API()\n\n @api.route(\"/reset\")\n def reset(req,resp):\n serialcom.send(\"invoff\\n\")\n\n @api.route(\"/info\")\n def get_info(req,resp):\n resp.headers = {\"Content-Type\": \"application/json; charset=utf-8\"}\n resp.media = serialcom.rxdata\n\n @api.route(\"/cardata\")\n def get_cardata(req,resp):\n text = open(\"static/cars/cardata.json\", \"r\", encoding='utf-8').read()\n resp.headers = {\"Content-Type\": \"application/json; charset=utf-8\"}\n resp.text = text\n\n @api.route(\"/command\")\n async def post_command(req,resp):\n data = await req.media()\n print(data)\n if 'carno' in data:\n serialcom.send(\"invoff\\n\")\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"carno={data['carno']}\\n\")\n if 'mode' in data:\n serialcom.send(\"invoff\\n\")\n time.sleep(0.5)\n while serialcom.rxdata['invstate'] == 1:\n time.sleep(0.1)\n serialcom.send(f\"mode={data['mode']}\\n\")\n if 'notch' in data:\n if data['notch'] == 'P':\n serialcom.send(\"P\\n\")\n elif data['notch'] == 'N':\n serialcom.send(\"N\\n\")\n elif data['notch'] == 'B':\n serialcom.send(\"B\\n\")\n else:\n serialcom.send(f\"notch={data['notch']}\\n\")\n if 'invoff' in data:\n serialcom.send(\"invoff\\n\")\n\n @api.route(\"/\")\n def hello_html(req,resp):\n resp.html = api.template('index.html')\n\n # web server start\n api.run(address='0.0.0.0', port=5042) # 0.0.0.0にすると外部からアクセスできる\n \n \nif __name__ == '__main__':\n main()\n",
"step-ids": [
8,
11,
12,
13,
14
]
}
|
[
8,
11,
12,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(file_name, 'r') as f:
stop = 1
while stop != 0:
line = f.readline()
if len(line) < 1:
break
tot += float(line)
print(tot)
<|reserved_special_token_1|>
file_name = '013_largeSum_data'
tot = 0
with open(file_name, 'r') as f:
stop = 1
while stop != 0:
line = f.readline()
if len(line) < 1:
break
tot += float(line)
print(tot)
<|reserved_special_token_1|>
#!/usr/bin/python3
###################################################
### Euler project
### zdrassvouitie @ 10/2016
###################################################
file_name = '013_largeSum_data'
tot = 0
with open(file_name, "r") as f:
stop = 1
while stop != 0:
line = f.readline()
if len(line) < 1:
break
tot += float(line)
print(tot)
|
flexible
|
{
"blob_id": "bcdf1c03d996520f3d4d8d12ec4ef34ea63ef3cf",
"index": 3936,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(file_name, 'r') as f:\n stop = 1\n while stop != 0:\n line = f.readline()\n if len(line) < 1:\n break\n tot += float(line)\nprint(tot)\n",
"step-3": "file_name = '013_largeSum_data'\ntot = 0\nwith open(file_name, 'r') as f:\n stop = 1\n while stop != 0:\n line = f.readline()\n if len(line) < 1:\n break\n tot += float(line)\nprint(tot)\n",
"step-4": "#!/usr/bin/python3\n\n###################################################\n### Euler project\n### zdrassvouitie @ 10/2016\n###################################################\n\nfile_name = '013_largeSum_data'\ntot = 0\nwith open(file_name, \"r\") as f:\n stop = 1\n while stop != 0:\n line = f.readline()\n if len(line) < 1:\n break\n tot += float(line)\n\nprint(tot)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class LoginPage(BasePage, LoginPageLocators):
def __init__(self, driver=None):
super(LoginPage, self).__init__(driver=driver)
self.identifier = self.IDENTIFIER
<|reserved_special_token_0|>
def get_error_messages(self):
invalid_user = self.get_text(self.USERNAME_MISSING_DIV)
invalid_pass = self.get_text(self.PASSWORD_MISSING_DIV)
if not (invalid_pass or invalid_user):
return False
return invalid_pass, invalid_user
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LoginPage(BasePage, LoginPageLocators):
def __init__(self, driver=None):
super(LoginPage, self).__init__(driver=driver)
self.identifier = self.IDENTIFIER
<|reserved_special_token_0|>
def get_error_messages(self):
invalid_user = self.get_text(self.USERNAME_MISSING_DIV)
invalid_pass = self.get_text(self.PASSWORD_MISSING_DIV)
if not (invalid_pass or invalid_user):
return False
return invalid_pass, invalid_user
def forgot_password(self):
if not self.click(self.FORGOT_PASSWORD_LINK):
return False
return True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LoginPage(BasePage, LoginPageLocators):
def __init__(self, driver=None):
super(LoginPage, self).__init__(driver=driver)
self.identifier = self.IDENTIFIER
def login(self, email=None, password=None, remember_me=False):
self.navigate()
if not self.wait_for_page_to_load():
return False
if not email:
email = self.std.login_user
if not password:
password = self.std.login_password
if not self.set_text(self.USERNAME_INPUT, email):
return False
if not self.set_text(self.PASSWORD_INPUT, password):
return False
if remember_me:
if not self.click(self.REMEMBER_CHECKBOX):
return False
if not self.click(self.SIGN_IN_BTN):
return False
return True
def get_error_messages(self):
invalid_user = self.get_text(self.USERNAME_MISSING_DIV)
invalid_pass = self.get_text(self.PASSWORD_MISSING_DIV)
if not (invalid_pass or invalid_user):
return False
return invalid_pass, invalid_user
def forgot_password(self):
if not self.click(self.FORGOT_PASSWORD_LINK):
return False
return True
<|reserved_special_token_1|>
from ui.pages import BasePage
from ui.locators.login_page_locators import LoginPageLocators
class LoginPage(BasePage, LoginPageLocators):
def __init__(self, driver=None):
super(LoginPage, self).__init__(driver=driver)
self.identifier = self.IDENTIFIER
def login(self, email=None, password=None, remember_me=False):
self.navigate()
if not self.wait_for_page_to_load():
return False
if not email:
email = self.std.login_user
if not password:
password = self.std.login_password
if not self.set_text(self.USERNAME_INPUT, email):
return False
if not self.set_text(self.PASSWORD_INPUT, password):
return False
if remember_me:
if not self.click(self.REMEMBER_CHECKBOX):
return False
if not self.click(self.SIGN_IN_BTN):
return False
return True
def get_error_messages(self):
invalid_user = self.get_text(self.USERNAME_MISSING_DIV)
invalid_pass = self.get_text(self.PASSWORD_MISSING_DIV)
if not (invalid_pass or invalid_user):
return False
return invalid_pass, invalid_user
def forgot_password(self):
if not self.click(self.FORGOT_PASSWORD_LINK):
return False
return True
|
flexible
|
{
"blob_id": "c1bcce809aa073ecd6e64dfa65ead9bd48aee3ff",
"index": 7406,
"step-1": "<mask token>\n\n\nclass LoginPage(BasePage, LoginPageLocators):\n\n def __init__(self, driver=None):\n super(LoginPage, self).__init__(driver=driver)\n self.identifier = self.IDENTIFIER\n <mask token>\n\n def get_error_messages(self):\n invalid_user = self.get_text(self.USERNAME_MISSING_DIV)\n invalid_pass = self.get_text(self.PASSWORD_MISSING_DIV)\n if not (invalid_pass or invalid_user):\n return False\n return invalid_pass, invalid_user\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass LoginPage(BasePage, LoginPageLocators):\n\n def __init__(self, driver=None):\n super(LoginPage, self).__init__(driver=driver)\n self.identifier = self.IDENTIFIER\n <mask token>\n\n def get_error_messages(self):\n invalid_user = self.get_text(self.USERNAME_MISSING_DIV)\n invalid_pass = self.get_text(self.PASSWORD_MISSING_DIV)\n if not (invalid_pass or invalid_user):\n return False\n return invalid_pass, invalid_user\n\n def forgot_password(self):\n if not self.click(self.FORGOT_PASSWORD_LINK):\n return False\n return True\n",
"step-3": "<mask token>\n\n\nclass LoginPage(BasePage, LoginPageLocators):\n\n def __init__(self, driver=None):\n super(LoginPage, self).__init__(driver=driver)\n self.identifier = self.IDENTIFIER\n\n def login(self, email=None, password=None, remember_me=False):\n self.navigate()\n if not self.wait_for_page_to_load():\n return False\n if not email:\n email = self.std.login_user\n if not password:\n password = self.std.login_password\n if not self.set_text(self.USERNAME_INPUT, email):\n return False\n if not self.set_text(self.PASSWORD_INPUT, password):\n return False\n if remember_me:\n if not self.click(self.REMEMBER_CHECKBOX):\n return False\n if not self.click(self.SIGN_IN_BTN):\n return False\n return True\n\n def get_error_messages(self):\n invalid_user = self.get_text(self.USERNAME_MISSING_DIV)\n invalid_pass = self.get_text(self.PASSWORD_MISSING_DIV)\n if not (invalid_pass or invalid_user):\n return False\n return invalid_pass, invalid_user\n\n def forgot_password(self):\n if not self.click(self.FORGOT_PASSWORD_LINK):\n return False\n return True\n",
"step-4": "from ui.pages import BasePage\nfrom ui.locators.login_page_locators import LoginPageLocators\n\n\nclass LoginPage(BasePage, LoginPageLocators):\n\n def __init__(self, driver=None):\n super(LoginPage, self).__init__(driver=driver)\n self.identifier = self.IDENTIFIER\n\n def login(self, email=None, password=None, remember_me=False):\n self.navigate()\n if not self.wait_for_page_to_load():\n return False\n if not email:\n email = self.std.login_user\n if not password:\n password = self.std.login_password\n if not self.set_text(self.USERNAME_INPUT, email):\n return False\n if not self.set_text(self.PASSWORD_INPUT, password):\n return False\n if remember_me:\n if not self.click(self.REMEMBER_CHECKBOX):\n return False\n if not self.click(self.SIGN_IN_BTN):\n return False\n return True\n\n def get_error_messages(self):\n invalid_user = self.get_text(self.USERNAME_MISSING_DIV)\n invalid_pass = self.get_text(self.PASSWORD_MISSING_DIV)\n if not (invalid_pass or invalid_user):\n return False\n return invalid_pass, invalid_user\n\n def forgot_password(self):\n if not self.click(self.FORGOT_PASSWORD_LINK):\n return False\n return True\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
import asyncio
import secrets
import pytest
from libp2p.host.ping import ID, PING_LENGTH
from libp2p.tools.factories import pair_of_connected_hosts
@pytest.mark.asyncio
async def test_ping_once():
async with pair_of_connected_hosts() as (host_a, host_b):
stream = await host_b.new_stream(host_a.get_id(), (ID,))
some_ping = secrets.token_bytes(PING_LENGTH)
await stream.write(some_ping)
some_pong = await stream.read(PING_LENGTH)
assert some_ping == some_pong
await stream.close()
SOME_PING_COUNT = 3
@pytest.mark.asyncio
async def test_ping_several():
async with pair_of_connected_hosts() as (host_a, host_b):
stream = await host_b.new_stream(host_a.get_id(), (ID,))
for _ in range(SOME_PING_COUNT):
some_ping = secrets.token_bytes(PING_LENGTH)
await stream.write(some_ping)
some_pong = await stream.read(PING_LENGTH)
assert some_ping == some_pong
# NOTE: simulate some time to sleep to mirror a real
# world usage where a peer sends pings on some periodic interval
# NOTE: this interval can be `0` for this test.
await asyncio.sleep(0)
await stream.close()
|
normal
|
{
"blob_id": "0233b46da3b9351f110ffc7f8622ca8f9ee9944d",
"index": 3000,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.mark.asyncio\nasync def test_ping_once():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await stream.close()\n\n\n<mask token>\n\n\n@pytest.mark.asyncio\nasync def test_ping_several():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n for _ in range(SOME_PING_COUNT):\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await asyncio.sleep(0)\n await stream.close()\n",
"step-3": "<mask token>\n\n\n@pytest.mark.asyncio\nasync def test_ping_once():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await stream.close()\n\n\nSOME_PING_COUNT = 3\n\n\n@pytest.mark.asyncio\nasync def test_ping_several():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n for _ in range(SOME_PING_COUNT):\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await asyncio.sleep(0)\n await stream.close()\n",
"step-4": "import asyncio\nimport secrets\nimport pytest\nfrom libp2p.host.ping import ID, PING_LENGTH\nfrom libp2p.tools.factories import pair_of_connected_hosts\n\n\n@pytest.mark.asyncio\nasync def test_ping_once():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await stream.close()\n\n\nSOME_PING_COUNT = 3\n\n\n@pytest.mark.asyncio\nasync def test_ping_several():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n for _ in range(SOME_PING_COUNT):\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await asyncio.sleep(0)\n await stream.close()\n",
"step-5": "import asyncio\nimport secrets\n\nimport pytest\n\nfrom libp2p.host.ping import ID, PING_LENGTH\nfrom libp2p.tools.factories import pair_of_connected_hosts\n\n\n@pytest.mark.asyncio\nasync def test_ping_once():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await stream.close()\n\n\nSOME_PING_COUNT = 3\n\n\n@pytest.mark.asyncio\nasync def test_ping_several():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n for _ in range(SOME_PING_COUNT):\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n # NOTE: simulate some time to sleep to mirror a real\n # world usage where a peer sends pings on some periodic interval\n # NOTE: this interval can be `0` for this test.\n await asyncio.sleep(0)\n await stream.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
ss = str(input())
print(len(ss) - ss.count(' '))
|
normal
|
{
"blob_id": "7f72f6a2ff0c7ceacb0f893d04c20402e850421a",
"index": 1840,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(len(ss) - ss.count(' '))\n",
"step-3": "ss = str(input())\nprint(len(ss) - ss.count(' '))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import datetime
import logging
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional
from dagster import check
from dagster.core.utils import coerce_valid_log_level, make_new_run_id
if TYPE_CHECKING:
from dagster.core.events import DagsterEvent
DAGSTER_META_KEY = "dagster_meta"
class DagsterMessageProps(
NamedTuple(
"_DagsterMessageProps",
[
("orig_message", Optional[str]),
("log_message_id", Optional[str]),
("log_timestamp", Optional[str]),
("dagster_event", Optional[Any]),
],
)
):
"""Internal class used to represent specific attributes about a logged message"""
def __new__(
cls,
orig_message: str,
log_message_id: Optional[str] = None,
log_timestamp: Optional[str] = None,
dagster_event: Optional["DagsterEvent"] = None,
):
return super().__new__(
cls,
orig_message=check.str_param(orig_message, "orig_message"),
log_message_id=check.opt_str_param(
log_message_id, "log_message_id", default=make_new_run_id()
),
log_timestamp=check.opt_str_param(
log_timestamp, "log_timestamp", default=datetime.datetime.utcnow().isoformat()
),
dagster_event=dagster_event,
)
@property
def error_str(self) -> Optional[str]:
if self.dagster_event is None:
return None
event_specific_data = self.dagster_event.event_specific_data
if not event_specific_data:
return None
error = getattr(event_specific_data, "error", None)
if error:
return "\n\n" + getattr(event_specific_data, "error_display_string", error.to_string())
return None
@property
def pid(self) -> Optional[str]:
if self.dagster_event is None or self.dagster_event.pid is None:
return None
return str(self.dagster_event.pid)
@property
def step_key(self) -> Optional[str]:
if self.dagster_event is None:
return None
return self.dagster_event.step_key
@property
def event_type_value(self) -> Optional[str]:
if self.dagster_event is None:
return None
return self.dagster_event.event_type_value
class DagsterLoggingMetadata(
NamedTuple(
"_DagsterLoggingMetadata",
[
("run_id", Optional[str]),
("pipeline_name", Optional[str]),
("pipeline_tags", Dict[str, str]),
("step_key", Optional[str]),
("solid_name", Optional[str]),
("resource_name", Optional[str]),
("resource_fn_name", Optional[str]),
],
)
):
"""Internal class used to represent the context in which a given message was logged (i.e. the
step, pipeline run, resource, etc.)
"""
def __new__(
cls,
run_id: str = None,
pipeline_name: str = None,
pipeline_tags: Dict[str, str] = None,
step_key: str = None,
solid_name: str = None,
resource_name: str = None,
resource_fn_name: str = None,
):
return super().__new__(
cls,
run_id=run_id,
pipeline_name=pipeline_name,
pipeline_tags=pipeline_tags or {},
step_key=step_key,
solid_name=solid_name,
resource_name=resource_name,
resource_fn_name=resource_fn_name,
)
@property
def log_source(self):
if self.resource_name is None:
return self.pipeline_name or "system"
return f"resource:{self.resource_name}"
def to_tags(self) -> Dict[str, str]:
# converts all values into strings
return {k: str(v) for k, v in self._asdict().items()}
def construct_log_string(
logging_metadata: DagsterLoggingMetadata, message_props: DagsterMessageProps
) -> str:
return (
" - ".join(
filter(
None,
(
logging_metadata.log_source,
logging_metadata.run_id,
message_props.pid,
logging_metadata.step_key,
message_props.event_type_value,
message_props.orig_message,
),
)
)
+ (message_props.error_str or "")
)
class DagsterLogManager(logging.Logger):
def __init__(
self,
logging_metadata: DagsterLoggingMetadata,
loggers: List[logging.Logger],
handlers: Optional[List[logging.Handler]] = None,
):
self._logging_metadata = check.inst_param(
logging_metadata, "logging_metadata", DagsterLoggingMetadata
)
self._loggers = check.list_param(loggers, "loggers", of_type=logging.Logger)
super().__init__(name="dagster", level=logging.DEBUG)
handlers = check.opt_list_param(handlers, "handlers", of_type=logging.Handler)
for handler in handlers:
self.addHandler(handler)
@property
def logging_metadata(self) -> DagsterLoggingMetadata:
return self._logging_metadata
@property
def loggers(self) -> List[logging.Logger]:
return self._loggers
def log_dagster_event(self, level: int, msg: str, dagster_event: "DagsterEvent"):
self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})
def log(self, level, msg, *args, **kwargs):
# allow for string level names
super().log(coerce_valid_log_level(level), msg, *args, **kwargs)
def _log(
self, level, msg, args, exc_info=None, extra=None, stack_info=False
): # pylint: disable=arguments-differ
# we stash dagster meta information in the extra field
extra = extra or {}
dagster_message_props = DagsterMessageProps(
orig_message=msg, dagster_event=extra.get(DAGSTER_META_KEY)
)
# convert the message to our preferred format
msg = construct_log_string(self.logging_metadata, dagster_message_props)
# combine all dagster meta information into a single dictionary
meta_dict = {
**self.logging_metadata._asdict(),
**dagster_message_props._asdict(),
}
# step-level events can be logged from a pipeline context. for these cases, pull the step
# key from the underlying DagsterEvent
if meta_dict["step_key"] is None:
meta_dict["step_key"] = dagster_message_props.step_key
extra[DAGSTER_META_KEY] = meta_dict
for logger in self._loggers:
logger.log(level, msg, *args, extra=extra)
super()._log(level, msg, args, exc_info=exc_info, extra=extra, stack_info=stack_info)
def with_tags(self, **new_tags):
"""Add new tags in "new_tags" to the set of tags attached to this log manager instance, and
return a new DagsterLogManager with the merged set of tags.
Args:
tags (Dict[str,str]): Dictionary of tags
Returns:
DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same
run ID and loggers.
"""
return DagsterLogManager(
logging_metadata=self.logging_metadata._replace(**new_tags),
loggers=self._loggers,
handlers=self.handlers,
)
|
normal
|
{
"blob_id": "f900e08c06ae736f5e32ac748e282700f9d0a969",
"index": 7922,
"step-1": "<mask token>\n\n\nclass DagsterMessageProps(NamedTuple('_DagsterMessageProps', [(\n 'orig_message', Optional[str]), ('log_message_id', Optional[str]), (\n 'log_timestamp', Optional[str]), ('dagster_event', Optional[Any])])):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def step_key(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.step_key\n\n @property\n def event_type_value(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.event_type_value\n\n\nclass DagsterLoggingMetadata(NamedTuple('_DagsterLoggingMetadata', [(\n 'run_id', Optional[str]), ('pipeline_name', Optional[str]), (\n 'pipeline_tags', Dict[str, str]), ('step_key', Optional[str]), (\n 'solid_name', Optional[str]), ('resource_name', Optional[str]), (\n 'resource_fn_name', Optional[str])])):\n \"\"\"Internal class used to represent the context in which a given message was logged (i.e. the\n step, pipeline run, resource, etc.)\n \"\"\"\n\n def __new__(cls, run_id: str=None, pipeline_name: str=None,\n pipeline_tags: Dict[str, str]=None, step_key: str=None, solid_name:\n str=None, resource_name: str=None, resource_fn_name: str=None):\n return super().__new__(cls, run_id=run_id, pipeline_name=\n pipeline_name, pipeline_tags=pipeline_tags or {}, step_key=\n step_key, solid_name=solid_name, resource_name=resource_name,\n resource_fn_name=resource_fn_name)\n\n @property\n def log_source(self):\n if self.resource_name is None:\n return self.pipeline_name or 'system'\n return f'resource:{self.resource_name}'\n\n def to_tags(self) ->Dict[str, str]:\n return {k: str(v) for k, v in self._asdict().items()}\n\n\n<mask token>\n\n\nclass DagsterLogManager(logging.Logger):\n\n def __init__(self, logging_metadata: DagsterLoggingMetadata, loggers:\n List[logging.Logger], handlers: Optional[List[logging.Handler]]=None):\n self._logging_metadata = check.inst_param(logging_metadata,\n 'logging_metadata', DagsterLoggingMetadata)\n self._loggers = check.list_param(loggers, 'loggers', of_type=\n logging.Logger)\n super().__init__(name='dagster', level=logging.DEBUG)\n handlers = check.opt_list_param(handlers, 'handlers', of_type=\n logging.Handler)\n for handler in handlers:\n self.addHandler(handler)\n\n @property\n def logging_metadata(self) ->DagsterLoggingMetadata:\n return self._logging_metadata\n\n @property\n def loggers(self) ->List[logging.Logger]:\n return self._loggers\n\n def log_dagster_event(self, level: int, msg: str, dagster_event:\n 'DagsterEvent'):\n self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})\n\n def log(self, level, msg, *args, **kwargs):\n super().log(coerce_valid_log_level(level), msg, *args, **kwargs)\n\n def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=\n False):\n extra = extra or {}\n dagster_message_props = DagsterMessageProps(orig_message=msg,\n dagster_event=extra.get(DAGSTER_META_KEY))\n msg = construct_log_string(self.logging_metadata, dagster_message_props\n )\n meta_dict = {**self.logging_metadata._asdict(), **\n dagster_message_props._asdict()}\n if meta_dict['step_key'] is None:\n meta_dict['step_key'] = dagster_message_props.step_key\n extra[DAGSTER_META_KEY] = meta_dict\n for logger in self._loggers:\n logger.log(level, msg, *args, extra=extra)\n super()._log(level, msg, args, exc_info=exc_info, extra=extra,\n stack_info=stack_info)\n\n def with_tags(self, **new_tags):\n \"\"\"Add new tags in \"new_tags\" to the set of tags attached to this log manager instance, and\n return a new DagsterLogManager with the merged set of tags.\n\n Args:\n tags (Dict[str,str]): Dictionary of tags\n\n Returns:\n DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same\n run ID and loggers.\n \"\"\"\n return DagsterLogManager(logging_metadata=self.logging_metadata.\n _replace(**new_tags), loggers=self._loggers, handlers=self.handlers\n )\n",
"step-2": "<mask token>\n\n\nclass DagsterMessageProps(NamedTuple('_DagsterMessageProps', [(\n 'orig_message', Optional[str]), ('log_message_id', Optional[str]), (\n 'log_timestamp', Optional[str]), ('dagster_event', Optional[Any])])):\n <mask token>\n\n def __new__(cls, orig_message: str, log_message_id: Optional[str]=None,\n log_timestamp: Optional[str]=None, dagster_event: Optional[\n 'DagsterEvent']=None):\n return super().__new__(cls, orig_message=check.str_param(\n orig_message, 'orig_message'), log_message_id=check.\n opt_str_param(log_message_id, 'log_message_id', default=\n make_new_run_id()), log_timestamp=check.opt_str_param(\n log_timestamp, 'log_timestamp', default=datetime.datetime.\n utcnow().isoformat()), dagster_event=dagster_event)\n <mask token>\n\n @property\n def pid(self) ->Optional[str]:\n if self.dagster_event is None or self.dagster_event.pid is None:\n return None\n return str(self.dagster_event.pid)\n\n @property\n def step_key(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.step_key\n\n @property\n def event_type_value(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.event_type_value\n\n\nclass DagsterLoggingMetadata(NamedTuple('_DagsterLoggingMetadata', [(\n 'run_id', Optional[str]), ('pipeline_name', Optional[str]), (\n 'pipeline_tags', Dict[str, str]), ('step_key', Optional[str]), (\n 'solid_name', Optional[str]), ('resource_name', Optional[str]), (\n 'resource_fn_name', Optional[str])])):\n \"\"\"Internal class used to represent the context in which a given message was logged (i.e. the\n step, pipeline run, resource, etc.)\n \"\"\"\n\n def __new__(cls, run_id: str=None, pipeline_name: str=None,\n pipeline_tags: Dict[str, str]=None, step_key: str=None, solid_name:\n str=None, resource_name: str=None, resource_fn_name: str=None):\n return super().__new__(cls, run_id=run_id, pipeline_name=\n pipeline_name, pipeline_tags=pipeline_tags or {}, step_key=\n step_key, solid_name=solid_name, resource_name=resource_name,\n resource_fn_name=resource_fn_name)\n\n @property\n def log_source(self):\n if self.resource_name is None:\n return self.pipeline_name or 'system'\n return f'resource:{self.resource_name}'\n\n def to_tags(self) ->Dict[str, str]:\n return {k: str(v) for k, v in self._asdict().items()}\n\n\n<mask token>\n\n\nclass DagsterLogManager(logging.Logger):\n\n def __init__(self, logging_metadata: DagsterLoggingMetadata, loggers:\n List[logging.Logger], handlers: Optional[List[logging.Handler]]=None):\n self._logging_metadata = check.inst_param(logging_metadata,\n 'logging_metadata', DagsterLoggingMetadata)\n self._loggers = check.list_param(loggers, 'loggers', of_type=\n logging.Logger)\n super().__init__(name='dagster', level=logging.DEBUG)\n handlers = check.opt_list_param(handlers, 'handlers', of_type=\n logging.Handler)\n for handler in handlers:\n self.addHandler(handler)\n\n @property\n def logging_metadata(self) ->DagsterLoggingMetadata:\n return self._logging_metadata\n\n @property\n def loggers(self) ->List[logging.Logger]:\n return self._loggers\n\n def log_dagster_event(self, level: int, msg: str, dagster_event:\n 'DagsterEvent'):\n self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})\n\n def log(self, level, msg, *args, **kwargs):\n super().log(coerce_valid_log_level(level), msg, *args, **kwargs)\n\n def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=\n False):\n extra = extra or {}\n dagster_message_props = DagsterMessageProps(orig_message=msg,\n dagster_event=extra.get(DAGSTER_META_KEY))\n msg = construct_log_string(self.logging_metadata, dagster_message_props\n )\n meta_dict = {**self.logging_metadata._asdict(), **\n dagster_message_props._asdict()}\n if meta_dict['step_key'] is None:\n meta_dict['step_key'] = dagster_message_props.step_key\n extra[DAGSTER_META_KEY] = meta_dict\n for logger in self._loggers:\n logger.log(level, msg, *args, extra=extra)\n super()._log(level, msg, args, exc_info=exc_info, extra=extra,\n stack_info=stack_info)\n\n def with_tags(self, **new_tags):\n \"\"\"Add new tags in \"new_tags\" to the set of tags attached to this log manager instance, and\n return a new DagsterLogManager with the merged set of tags.\n\n Args:\n tags (Dict[str,str]): Dictionary of tags\n\n Returns:\n DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same\n run ID and loggers.\n \"\"\"\n return DagsterLogManager(logging_metadata=self.logging_metadata.\n _replace(**new_tags), loggers=self._loggers, handlers=self.handlers\n )\n",
"step-3": "<mask token>\n\n\nclass DagsterMessageProps(NamedTuple('_DagsterMessageProps', [(\n 'orig_message', Optional[str]), ('log_message_id', Optional[str]), (\n 'log_timestamp', Optional[str]), ('dagster_event', Optional[Any])])):\n \"\"\"Internal class used to represent specific attributes about a logged message\"\"\"\n\n def __new__(cls, orig_message: str, log_message_id: Optional[str]=None,\n log_timestamp: Optional[str]=None, dagster_event: Optional[\n 'DagsterEvent']=None):\n return super().__new__(cls, orig_message=check.str_param(\n orig_message, 'orig_message'), log_message_id=check.\n opt_str_param(log_message_id, 'log_message_id', default=\n make_new_run_id()), log_timestamp=check.opt_str_param(\n log_timestamp, 'log_timestamp', default=datetime.datetime.\n utcnow().isoformat()), dagster_event=dagster_event)\n\n @property\n def error_str(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n event_specific_data = self.dagster_event.event_specific_data\n if not event_specific_data:\n return None\n error = getattr(event_specific_data, 'error', None)\n if error:\n return '\\n\\n' + getattr(event_specific_data,\n 'error_display_string', error.to_string())\n return None\n\n @property\n def pid(self) ->Optional[str]:\n if self.dagster_event is None or self.dagster_event.pid is None:\n return None\n return str(self.dagster_event.pid)\n\n @property\n def step_key(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.step_key\n\n @property\n def event_type_value(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.event_type_value\n\n\nclass DagsterLoggingMetadata(NamedTuple('_DagsterLoggingMetadata', [(\n 'run_id', Optional[str]), ('pipeline_name', Optional[str]), (\n 'pipeline_tags', Dict[str, str]), ('step_key', Optional[str]), (\n 'solid_name', Optional[str]), ('resource_name', Optional[str]), (\n 'resource_fn_name', Optional[str])])):\n \"\"\"Internal class used to represent the context in which a given message was logged (i.e. the\n step, pipeline run, resource, etc.)\n \"\"\"\n\n def __new__(cls, run_id: str=None, pipeline_name: str=None,\n pipeline_tags: Dict[str, str]=None, step_key: str=None, solid_name:\n str=None, resource_name: str=None, resource_fn_name: str=None):\n return super().__new__(cls, run_id=run_id, pipeline_name=\n pipeline_name, pipeline_tags=pipeline_tags or {}, step_key=\n step_key, solid_name=solid_name, resource_name=resource_name,\n resource_fn_name=resource_fn_name)\n\n @property\n def log_source(self):\n if self.resource_name is None:\n return self.pipeline_name or 'system'\n return f'resource:{self.resource_name}'\n\n def to_tags(self) ->Dict[str, str]:\n return {k: str(v) for k, v in self._asdict().items()}\n\n\ndef construct_log_string(logging_metadata: DagsterLoggingMetadata,\n message_props: DagsterMessageProps) ->str:\n return ' - '.join(filter(None, (logging_metadata.log_source,\n logging_metadata.run_id, message_props.pid, logging_metadata.\n step_key, message_props.event_type_value, message_props.orig_message))\n ) + (message_props.error_str or '')\n\n\nclass DagsterLogManager(logging.Logger):\n\n def __init__(self, logging_metadata: DagsterLoggingMetadata, loggers:\n List[logging.Logger], handlers: Optional[List[logging.Handler]]=None):\n self._logging_metadata = check.inst_param(logging_metadata,\n 'logging_metadata', DagsterLoggingMetadata)\n self._loggers = check.list_param(loggers, 'loggers', of_type=\n logging.Logger)\n super().__init__(name='dagster', level=logging.DEBUG)\n handlers = check.opt_list_param(handlers, 'handlers', of_type=\n logging.Handler)\n for handler in handlers:\n self.addHandler(handler)\n\n @property\n def logging_metadata(self) ->DagsterLoggingMetadata:\n return self._logging_metadata\n\n @property\n def loggers(self) ->List[logging.Logger]:\n return self._loggers\n\n def log_dagster_event(self, level: int, msg: str, dagster_event:\n 'DagsterEvent'):\n self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})\n\n def log(self, level, msg, *args, **kwargs):\n super().log(coerce_valid_log_level(level), msg, *args, **kwargs)\n\n def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=\n False):\n extra = extra or {}\n dagster_message_props = DagsterMessageProps(orig_message=msg,\n dagster_event=extra.get(DAGSTER_META_KEY))\n msg = construct_log_string(self.logging_metadata, dagster_message_props\n )\n meta_dict = {**self.logging_metadata._asdict(), **\n dagster_message_props._asdict()}\n if meta_dict['step_key'] is None:\n meta_dict['step_key'] = dagster_message_props.step_key\n extra[DAGSTER_META_KEY] = meta_dict\n for logger in self._loggers:\n logger.log(level, msg, *args, extra=extra)\n super()._log(level, msg, args, exc_info=exc_info, extra=extra,\n stack_info=stack_info)\n\n def with_tags(self, **new_tags):\n \"\"\"Add new tags in \"new_tags\" to the set of tags attached to this log manager instance, and\n return a new DagsterLogManager with the merged set of tags.\n\n Args:\n tags (Dict[str,str]): Dictionary of tags\n\n Returns:\n DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same\n run ID and loggers.\n \"\"\"\n return DagsterLogManager(logging_metadata=self.logging_metadata.\n _replace(**new_tags), loggers=self._loggers, handlers=self.handlers\n )\n",
"step-4": "<mask token>\nif TYPE_CHECKING:\n from dagster.core.events import DagsterEvent\n<mask token>\n\n\nclass DagsterMessageProps(NamedTuple('_DagsterMessageProps', [(\n 'orig_message', Optional[str]), ('log_message_id', Optional[str]), (\n 'log_timestamp', Optional[str]), ('dagster_event', Optional[Any])])):\n \"\"\"Internal class used to represent specific attributes about a logged message\"\"\"\n\n def __new__(cls, orig_message: str, log_message_id: Optional[str]=None,\n log_timestamp: Optional[str]=None, dagster_event: Optional[\n 'DagsterEvent']=None):\n return super().__new__(cls, orig_message=check.str_param(\n orig_message, 'orig_message'), log_message_id=check.\n opt_str_param(log_message_id, 'log_message_id', default=\n make_new_run_id()), log_timestamp=check.opt_str_param(\n log_timestamp, 'log_timestamp', default=datetime.datetime.\n utcnow().isoformat()), dagster_event=dagster_event)\n\n @property\n def error_str(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n event_specific_data = self.dagster_event.event_specific_data\n if not event_specific_data:\n return None\n error = getattr(event_specific_data, 'error', None)\n if error:\n return '\\n\\n' + getattr(event_specific_data,\n 'error_display_string', error.to_string())\n return None\n\n @property\n def pid(self) ->Optional[str]:\n if self.dagster_event is None or self.dagster_event.pid is None:\n return None\n return str(self.dagster_event.pid)\n\n @property\n def step_key(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.step_key\n\n @property\n def event_type_value(self) ->Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.event_type_value\n\n\nclass DagsterLoggingMetadata(NamedTuple('_DagsterLoggingMetadata', [(\n 'run_id', Optional[str]), ('pipeline_name', Optional[str]), (\n 'pipeline_tags', Dict[str, str]), ('step_key', Optional[str]), (\n 'solid_name', Optional[str]), ('resource_name', Optional[str]), (\n 'resource_fn_name', Optional[str])])):\n \"\"\"Internal class used to represent the context in which a given message was logged (i.e. the\n step, pipeline run, resource, etc.)\n \"\"\"\n\n def __new__(cls, run_id: str=None, pipeline_name: str=None,\n pipeline_tags: Dict[str, str]=None, step_key: str=None, solid_name:\n str=None, resource_name: str=None, resource_fn_name: str=None):\n return super().__new__(cls, run_id=run_id, pipeline_name=\n pipeline_name, pipeline_tags=pipeline_tags or {}, step_key=\n step_key, solid_name=solid_name, resource_name=resource_name,\n resource_fn_name=resource_fn_name)\n\n @property\n def log_source(self):\n if self.resource_name is None:\n return self.pipeline_name or 'system'\n return f'resource:{self.resource_name}'\n\n def to_tags(self) ->Dict[str, str]:\n return {k: str(v) for k, v in self._asdict().items()}\n\n\ndef construct_log_string(logging_metadata: DagsterLoggingMetadata,\n message_props: DagsterMessageProps) ->str:\n return ' - '.join(filter(None, (logging_metadata.log_source,\n logging_metadata.run_id, message_props.pid, logging_metadata.\n step_key, message_props.event_type_value, message_props.orig_message))\n ) + (message_props.error_str or '')\n\n\nclass DagsterLogManager(logging.Logger):\n\n def __init__(self, logging_metadata: DagsterLoggingMetadata, loggers:\n List[logging.Logger], handlers: Optional[List[logging.Handler]]=None):\n self._logging_metadata = check.inst_param(logging_metadata,\n 'logging_metadata', DagsterLoggingMetadata)\n self._loggers = check.list_param(loggers, 'loggers', of_type=\n logging.Logger)\n super().__init__(name='dagster', level=logging.DEBUG)\n handlers = check.opt_list_param(handlers, 'handlers', of_type=\n logging.Handler)\n for handler in handlers:\n self.addHandler(handler)\n\n @property\n def logging_metadata(self) ->DagsterLoggingMetadata:\n return self._logging_metadata\n\n @property\n def loggers(self) ->List[logging.Logger]:\n return self._loggers\n\n def log_dagster_event(self, level: int, msg: str, dagster_event:\n 'DagsterEvent'):\n self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})\n\n def log(self, level, msg, *args, **kwargs):\n super().log(coerce_valid_log_level(level), msg, *args, **kwargs)\n\n def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=\n False):\n extra = extra or {}\n dagster_message_props = DagsterMessageProps(orig_message=msg,\n dagster_event=extra.get(DAGSTER_META_KEY))\n msg = construct_log_string(self.logging_metadata, dagster_message_props\n )\n meta_dict = {**self.logging_metadata._asdict(), **\n dagster_message_props._asdict()}\n if meta_dict['step_key'] is None:\n meta_dict['step_key'] = dagster_message_props.step_key\n extra[DAGSTER_META_KEY] = meta_dict\n for logger in self._loggers:\n logger.log(level, msg, *args, extra=extra)\n super()._log(level, msg, args, exc_info=exc_info, extra=extra,\n stack_info=stack_info)\n\n def with_tags(self, **new_tags):\n \"\"\"Add new tags in \"new_tags\" to the set of tags attached to this log manager instance, and\n return a new DagsterLogManager with the merged set of tags.\n\n Args:\n tags (Dict[str,str]): Dictionary of tags\n\n Returns:\n DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same\n run ID and loggers.\n \"\"\"\n return DagsterLogManager(logging_metadata=self.logging_metadata.\n _replace(**new_tags), loggers=self._loggers, handlers=self.handlers\n )\n",
"step-5": "import datetime\nimport logging\nfrom typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional\n\nfrom dagster import check\nfrom dagster.core.utils import coerce_valid_log_level, make_new_run_id\n\nif TYPE_CHECKING:\n from dagster.core.events import DagsterEvent\n\nDAGSTER_META_KEY = \"dagster_meta\"\n\n\nclass DagsterMessageProps(\n NamedTuple(\n \"_DagsterMessageProps\",\n [\n (\"orig_message\", Optional[str]),\n (\"log_message_id\", Optional[str]),\n (\"log_timestamp\", Optional[str]),\n (\"dagster_event\", Optional[Any]),\n ],\n )\n):\n \"\"\"Internal class used to represent specific attributes about a logged message\"\"\"\n\n def __new__(\n cls,\n orig_message: str,\n log_message_id: Optional[str] = None,\n log_timestamp: Optional[str] = None,\n dagster_event: Optional[\"DagsterEvent\"] = None,\n ):\n return super().__new__(\n cls,\n orig_message=check.str_param(orig_message, \"orig_message\"),\n log_message_id=check.opt_str_param(\n log_message_id, \"log_message_id\", default=make_new_run_id()\n ),\n log_timestamp=check.opt_str_param(\n log_timestamp, \"log_timestamp\", default=datetime.datetime.utcnow().isoformat()\n ),\n dagster_event=dagster_event,\n )\n\n @property\n def error_str(self) -> Optional[str]:\n if self.dagster_event is None:\n return None\n\n event_specific_data = self.dagster_event.event_specific_data\n if not event_specific_data:\n return None\n\n error = getattr(event_specific_data, \"error\", None)\n if error:\n return \"\\n\\n\" + getattr(event_specific_data, \"error_display_string\", error.to_string())\n return None\n\n @property\n def pid(self) -> Optional[str]:\n if self.dagster_event is None or self.dagster_event.pid is None:\n return None\n return str(self.dagster_event.pid)\n\n @property\n def step_key(self) -> Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.step_key\n\n @property\n def event_type_value(self) -> Optional[str]:\n if self.dagster_event is None:\n return None\n return self.dagster_event.event_type_value\n\n\nclass DagsterLoggingMetadata(\n NamedTuple(\n \"_DagsterLoggingMetadata\",\n [\n (\"run_id\", Optional[str]),\n (\"pipeline_name\", Optional[str]),\n (\"pipeline_tags\", Dict[str, str]),\n (\"step_key\", Optional[str]),\n (\"solid_name\", Optional[str]),\n (\"resource_name\", Optional[str]),\n (\"resource_fn_name\", Optional[str]),\n ],\n )\n):\n \"\"\"Internal class used to represent the context in which a given message was logged (i.e. the\n step, pipeline run, resource, etc.)\n \"\"\"\n\n def __new__(\n cls,\n run_id: str = None,\n pipeline_name: str = None,\n pipeline_tags: Dict[str, str] = None,\n step_key: str = None,\n solid_name: str = None,\n resource_name: str = None,\n resource_fn_name: str = None,\n ):\n return super().__new__(\n cls,\n run_id=run_id,\n pipeline_name=pipeline_name,\n pipeline_tags=pipeline_tags or {},\n step_key=step_key,\n solid_name=solid_name,\n resource_name=resource_name,\n resource_fn_name=resource_fn_name,\n )\n\n @property\n def log_source(self):\n if self.resource_name is None:\n return self.pipeline_name or \"system\"\n return f\"resource:{self.resource_name}\"\n\n def to_tags(self) -> Dict[str, str]:\n # converts all values into strings\n return {k: str(v) for k, v in self._asdict().items()}\n\n\ndef construct_log_string(\n logging_metadata: DagsterLoggingMetadata, message_props: DagsterMessageProps\n) -> str:\n\n return (\n \" - \".join(\n filter(\n None,\n (\n logging_metadata.log_source,\n logging_metadata.run_id,\n message_props.pid,\n logging_metadata.step_key,\n message_props.event_type_value,\n message_props.orig_message,\n ),\n )\n )\n + (message_props.error_str or \"\")\n )\n\n\nclass DagsterLogManager(logging.Logger):\n def __init__(\n self,\n logging_metadata: DagsterLoggingMetadata,\n loggers: List[logging.Logger],\n handlers: Optional[List[logging.Handler]] = None,\n ):\n self._logging_metadata = check.inst_param(\n logging_metadata, \"logging_metadata\", DagsterLoggingMetadata\n )\n self._loggers = check.list_param(loggers, \"loggers\", of_type=logging.Logger)\n\n super().__init__(name=\"dagster\", level=logging.DEBUG)\n\n handlers = check.opt_list_param(handlers, \"handlers\", of_type=logging.Handler)\n for handler in handlers:\n self.addHandler(handler)\n\n @property\n def logging_metadata(self) -> DagsterLoggingMetadata:\n return self._logging_metadata\n\n @property\n def loggers(self) -> List[logging.Logger]:\n return self._loggers\n\n def log_dagster_event(self, level: int, msg: str, dagster_event: \"DagsterEvent\"):\n self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})\n\n def log(self, level, msg, *args, **kwargs):\n # allow for string level names\n super().log(coerce_valid_log_level(level), msg, *args, **kwargs)\n\n def _log(\n self, level, msg, args, exc_info=None, extra=None, stack_info=False\n ): # pylint: disable=arguments-differ\n\n # we stash dagster meta information in the extra field\n extra = extra or {}\n\n dagster_message_props = DagsterMessageProps(\n orig_message=msg, dagster_event=extra.get(DAGSTER_META_KEY)\n )\n\n # convert the message to our preferred format\n msg = construct_log_string(self.logging_metadata, dagster_message_props)\n\n # combine all dagster meta information into a single dictionary\n meta_dict = {\n **self.logging_metadata._asdict(),\n **dagster_message_props._asdict(),\n }\n # step-level events can be logged from a pipeline context. for these cases, pull the step\n # key from the underlying DagsterEvent\n if meta_dict[\"step_key\"] is None:\n meta_dict[\"step_key\"] = dagster_message_props.step_key\n\n extra[DAGSTER_META_KEY] = meta_dict\n\n for logger in self._loggers:\n logger.log(level, msg, *args, extra=extra)\n\n super()._log(level, msg, args, exc_info=exc_info, extra=extra, stack_info=stack_info)\n\n def with_tags(self, **new_tags):\n \"\"\"Add new tags in \"new_tags\" to the set of tags attached to this log manager instance, and\n return a new DagsterLogManager with the merged set of tags.\n\n Args:\n tags (Dict[str,str]): Dictionary of tags\n\n Returns:\n DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same\n run ID and loggers.\n \"\"\"\n return DagsterLogManager(\n logging_metadata=self.logging_metadata._replace(**new_tags),\n loggers=self._loggers,\n handlers=self.handlers,\n )\n",
"step-ids": [
16,
18,
21,
22,
25
]
}
|
[
16,
18,
21,
22,
25
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def build_shift_dict(self, shift):
"""
Creates a dictionary that can be used to apply a cipher to a letter.
The dictionary maps every uppercase and lowercase letter to a
character shifted down the alphabet by the input shift. The dictionary
should have 52 keys of all the uppercase letters and all the lowercase
letters only.
shift (integer): the amount by which to shift every letter of the
alphabet. 0 <= shift < 26
Returns: a dictionary mapping a letter (string) to
another letter (string).
"""
shifted_lowercase = list(string.ascii_lowercase[shift:]) + list(string.
ascii_lowercase[:shift])
shifted_uppercase = list(string.ascii_uppercase[shift:]) + list(string.
ascii_uppercase[:shift])
d = {}
for l in range(len(string.ascii_lowercase)):
d[string.ascii_lowercase[l]] = shifted_lowercase[l]
for l in range(len(string.ascii_uppercase)):
d[string.ascii_uppercase[l]] = shifted_uppercase[l]
return d
<|reserved_special_token_1|>
def build_shift_dict(self, shift):
'''
Creates a dictionary that can be used to apply a cipher to a letter.
The dictionary maps every uppercase and lowercase letter to a
character shifted down the alphabet by the input shift. The dictionary
should have 52 keys of all the uppercase letters and all the lowercase
letters only.
shift (integer): the amount by which to shift every letter of the
alphabet. 0 <= shift < 26
Returns: a dictionary mapping a letter (string) to
another letter (string).
'''
# create a new list of letters based on the shift
shifted_lowercase = list(string.ascii_lowercase[shift:]) + list(string.ascii_lowercase[:shift])
shifted_uppercase = list(string.ascii_uppercase[shift:]) + list(string.ascii_uppercase[:shift])
# empty dict
d = {}
# populate dict for lowercase
for l in range(len(string.ascii_lowercase)):
d[string.ascii_lowercase[l]] = shifted_lowercase[l]
# populate dict for uppercase
for l in range(len(string.ascii_uppercase)):
d[string.ascii_uppercase[l]] = shifted_uppercase[l]
return d
|
flexible
|
{
"blob_id": "07d2da14d0122ad2c8407bb13b8567ca62356bef",
"index": 7515,
"step-1": "<mask token>\n",
"step-2": "def build_shift_dict(self, shift):\n \"\"\"\n Creates a dictionary that can be used to apply a cipher to a letter.\n The dictionary maps every uppercase and lowercase letter to a\n character shifted down the alphabet by the input shift. The dictionary\n should have 52 keys of all the uppercase letters and all the lowercase\n letters only.\n\n shift (integer): the amount by which to shift every letter of the\n alphabet. 0 <= shift < 26\n\n Returns: a dictionary mapping a letter (string) to\n another letter (string).\n \"\"\"\n shifted_lowercase = list(string.ascii_lowercase[shift:]) + list(string.\n ascii_lowercase[:shift])\n shifted_uppercase = list(string.ascii_uppercase[shift:]) + list(string.\n ascii_uppercase[:shift])\n d = {}\n for l in range(len(string.ascii_lowercase)):\n d[string.ascii_lowercase[l]] = shifted_lowercase[l]\n for l in range(len(string.ascii_uppercase)):\n d[string.ascii_uppercase[l]] = shifted_uppercase[l]\n return d\n",
"step-3": "def build_shift_dict(self, shift):\n '''\n Creates a dictionary that can be used to apply a cipher to a letter.\n The dictionary maps every uppercase and lowercase letter to a\n character shifted down the alphabet by the input shift. The dictionary\n should have 52 keys of all the uppercase letters and all the lowercase\n letters only.\n\n shift (integer): the amount by which to shift every letter of the\n alphabet. 0 <= shift < 26\n\n Returns: a dictionary mapping a letter (string) to\n another letter (string).\n '''\n # create a new list of letters based on the shift\n shifted_lowercase = list(string.ascii_lowercase[shift:]) + list(string.ascii_lowercase[:shift])\n shifted_uppercase = list(string.ascii_uppercase[shift:]) + list(string.ascii_uppercase[:shift])\n\n # empty dict\n d = {}\n\n # populate dict for lowercase\n for l in range(len(string.ascii_lowercase)):\n d[string.ascii_lowercase[l]] = shifted_lowercase[l]\n\n # populate dict for uppercase\n for l in range(len(string.ascii_uppercase)):\n d[string.ascii_uppercase[l]] = shifted_uppercase[l]\n\n return d",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def create_account_genre_view(request):
title = request.data.get('title', '0')
try:
genre = Genre.objects.get(title=title)
except Genre.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
data = {}
data['genre'] = genre.pk
data['account'] = request.user.pk
serializer = AccountGenreSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def detail_genre_view(request):
title = request.data.get('title', '0')
try:
genre = Genre.objects.get(title=title)
except Genre.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = GenreSerializer(genre)
return Response(serializer.data)
@api_view(['POST'])
def create_song_view(request):
serializer = SongSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
<|reserved_special_token_0|>
@api_view(['PUT'])
def update_song_view(request):
id = request.data.get('id', '0')
try:
song = Song.objects.get(id=id)
except Song.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'PUT':
serializer = SongSerializer(song, data=request.data)
data = {}
if serializer.is_valid():
serializer.save()
data['success'] = 'update successful'
return Response(data=data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def create_account_genre_view(request):
title = request.data.get('title', '0')
try:
genre = Genre.objects.get(title=title)
except Genre.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
data = {}
data['genre'] = genre.pk
data['account'] = request.user.pk
serializer = AccountGenreSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def detail_genre_view(request):
title = request.data.get('title', '0')
try:
genre = Genre.objects.get(title=title)
except Genre.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = GenreSerializer(genre)
return Response(serializer.data)
@api_view(['POST'])
def create_song_view(request):
serializer = SongSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
<|reserved_special_token_0|>
@api_view(['PUT'])
def update_song_view(request):
id = request.data.get('id', '0')
try:
song = Song.objects.get(id=id)
except Song.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'PUT':
serializer = SongSerializer(song, data=request.data)
data = {}
if serializer.is_valid():
serializer.save()
data['success'] = 'update successful'
return Response(data=data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['DELETE'])
def delete_song_view(request):
id = request.data.get('id', '0')
try:
song = Song.objects.get(id=id)
except Song.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'DELETE':
operation = song.delete()
data = {}
if operation:
data['success'] = 'delete successful'
else:
data['failure'] = 'delete failure'
return Response(data=data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def create_account_genre_view(request):
title = request.data.get('title', '0')
try:
genre = Genre.objects.get(title=title)
except Genre.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
data = {}
data['genre'] = genre.pk
data['account'] = request.user.pk
serializer = AccountGenreSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def detail_genre_view(request):
title = request.data.get('title', '0')
try:
genre = Genre.objects.get(title=title)
except Genre.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = GenreSerializer(genre)
return Response(serializer.data)
@api_view(['POST'])
def create_song_view(request):
serializer = SongSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def detail_song_view(request):
id = request.data.get('id', '0')
try:
song = Song.objects.get(id=id)
except Song.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = SongSerializer(song)
return Response(serializer.data)
@api_view(['PUT'])
def update_song_view(request):
id = request.data.get('id', '0')
try:
song = Song.objects.get(id=id)
except Song.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'PUT':
serializer = SongSerializer(song, data=request.data)
data = {}
if serializer.is_valid():
serializer.save()
data['success'] = 'update successful'
return Response(data=data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['DELETE'])
def delete_song_view(request):
id = request.data.get('id', '0')
try:
song = Song.objects.get(id=id)
except Song.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'DELETE':
operation = song.delete()
data = {}
if operation:
data['success'] = 'delete successful'
else:
data['failure'] = 'delete failure'
return Response(data=data)
<|reserved_special_token_1|>
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from playlist.models import Song, AccountSong, Genre, AccountGenre
from account.models import Account
from playlist.api.serializers import GenreSerializer, SongSerializer, AccountGenreSerializer
from rest_framework.generics import ListAPIView
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def create_account_genre_view(request):
title = request.data.get('title', '0')
try:
genre = Genre.objects.get(title=title)
except Genre.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
data = {}
data['genre'] = genre.pk
data['account'] = request.user.pk
serializer = AccountGenreSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def detail_genre_view(request):
title = request.data.get('title', '0')
try:
genre = Genre.objects.get(title=title)
except Genre.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = GenreSerializer(genre)
return Response(serializer.data)
@api_view(['POST'])
def create_song_view(request):
serializer = SongSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def detail_song_view(request):
id = request.data.get('id', '0')
try:
song = Song.objects.get(id=id)
except Song.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = SongSerializer(song)
return Response(serializer.data)
@api_view(['PUT'])
def update_song_view(request):
id = request.data.get('id', '0')
try:
song = Song.objects.get(id=id)
except Song.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'PUT':
serializer = SongSerializer(song, data=request.data)
data = {}
if serializer.is_valid():
serializer.save()
data['success'] = 'update successful'
return Response(data=data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['DELETE'])
def delete_song_view(request):
id = request.data.get('id', '0')
try:
song = Song.objects.get(id=id)
except Song.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'DELETE':
operation = song.delete()
data = {}
if operation:
data['success'] = 'delete successful'
else:
data['failure'] = 'delete failure'
return Response(data=data)
<|reserved_special_token_1|>
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from playlist.models import Song, AccountSong, Genre, AccountGenre
from account.models import Account
from playlist.api.serializers import GenreSerializer, SongSerializer, AccountGenreSerializer
from rest_framework.generics import ListAPIView
# create a specific genre details by title
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def create_account_genre_view(request):
title = request.data.get('title', '0')
try:
genre = Genre.objects.get(title=title)
except Genre.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
data = {}
data['genre'] = genre.pk
data['account'] = request.user.pk
serializer = AccountGenreSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Get a specific genre details by title
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def detail_genre_view(request):
title = request.data.get('title', '0')
try:
genre = Genre.objects.get(title=title)
except Genre.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = GenreSerializer(genre)
return Response(serializer.data)
# post a new song
@api_view(['POST'])
#admin
def create_song_view(request):
serializer = SongSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Get a specific song details by id
@api_view(['GET'])
def detail_song_view(request):
id = request.data.get('id', '0')
try:
song = Song.objects.get(id=id)
except Song.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = SongSerializer(song)
return Response(serializer.data)
# update a specific song details by id
@api_view(['PUT'])
#admin
def update_song_view(request):
id = request.data.get('id', '0')
try:
song = Song.objects.get(id=id)
except Song.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == "PUT":
serializer = SongSerializer(song, data=request.data)
data = {}
if serializer.is_valid():
serializer.save()
data["success"] = "update successful"
return Response(data=data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# delete a specific song details by id
@api_view(['DELETE'])
#admin
def delete_song_view(request):
id = request.data.get('id', '0')
try:
song = Song.objects.get(id=id)
except Song.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == "DELETE":
operation = song.delete()
data = {}
if operation:
data["success"] = "delete successful"
else:
data["failure"] = "delete failure"
return Response(data=data)
# GET all genres
# GET all account genres
# DELETE all account genres
# GET all songs (by account genres)
# POST liked genres
# POST liked songs
# GET whatsapp link
# POST create playlist
|
flexible
|
{
"blob_id": "ff53a549222b0d5e2fcb518c1e44b656c45ce76e",
"index": 5183,
"step-1": "<mask token>\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,))\ndef create_account_genre_view(request):\n title = request.data.get('title', '0')\n try:\n genre = Genre.objects.get(title=title)\n except Genre.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n data = {}\n data['genre'] = genre.pk\n data['account'] = request.user.pk\n serializer = AccountGenreSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET'])\n@permission_classes((IsAuthenticated,))\ndef detail_genre_view(request):\n title = request.data.get('title', '0')\n try:\n genre = Genre.objects.get(title=title)\n except Genre.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = GenreSerializer(genre)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef create_song_view(request):\n serializer = SongSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n<mask token>\n\n\n@api_view(['PUT'])\ndef update_song_view(request):\n id = request.data.get('id', '0')\n try:\n song = Song.objects.get(id=id)\n except Song.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n if request.method == 'PUT':\n serializer = SongSerializer(song, data=request.data)\n data = {}\n if serializer.is_valid():\n serializer.save()\n data['success'] = 'update successful'\n return Response(data=data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,))\ndef create_account_genre_view(request):\n title = request.data.get('title', '0')\n try:\n genre = Genre.objects.get(title=title)\n except Genre.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n data = {}\n data['genre'] = genre.pk\n data['account'] = request.user.pk\n serializer = AccountGenreSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET'])\n@permission_classes((IsAuthenticated,))\ndef detail_genre_view(request):\n title = request.data.get('title', '0')\n try:\n genre = Genre.objects.get(title=title)\n except Genre.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = GenreSerializer(genre)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef create_song_view(request):\n serializer = SongSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n<mask token>\n\n\n@api_view(['PUT'])\ndef update_song_view(request):\n id = request.data.get('id', '0')\n try:\n song = Song.objects.get(id=id)\n except Song.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n if request.method == 'PUT':\n serializer = SongSerializer(song, data=request.data)\n data = {}\n if serializer.is_valid():\n serializer.save()\n data['success'] = 'update successful'\n return Response(data=data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['DELETE'])\ndef delete_song_view(request):\n id = request.data.get('id', '0')\n try:\n song = Song.objects.get(id=id)\n except Song.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n if request.method == 'DELETE':\n operation = song.delete()\n data = {}\n if operation:\n data['success'] = 'delete successful'\n else:\n data['failure'] = 'delete failure'\n return Response(data=data)\n",
"step-3": "<mask token>\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,))\ndef create_account_genre_view(request):\n title = request.data.get('title', '0')\n try:\n genre = Genre.objects.get(title=title)\n except Genre.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n data = {}\n data['genre'] = genre.pk\n data['account'] = request.user.pk\n serializer = AccountGenreSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET'])\n@permission_classes((IsAuthenticated,))\ndef detail_genre_view(request):\n title = request.data.get('title', '0')\n try:\n genre = Genre.objects.get(title=title)\n except Genre.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = GenreSerializer(genre)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef create_song_view(request):\n serializer = SongSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET'])\ndef detail_song_view(request):\n id = request.data.get('id', '0')\n try:\n song = Song.objects.get(id=id)\n except Song.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = SongSerializer(song)\n return Response(serializer.data)\n\n\n@api_view(['PUT'])\ndef update_song_view(request):\n id = request.data.get('id', '0')\n try:\n song = Song.objects.get(id=id)\n except Song.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n if request.method == 'PUT':\n serializer = SongSerializer(song, data=request.data)\n data = {}\n if serializer.is_valid():\n serializer.save()\n data['success'] = 'update successful'\n return Response(data=data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['DELETE'])\ndef delete_song_view(request):\n id = request.data.get('id', '0')\n try:\n song = Song.objects.get(id=id)\n except Song.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n if request.method == 'DELETE':\n operation = song.delete()\n data = {}\n if operation:\n data['success'] = 'delete successful'\n else:\n data['failure'] = 'delete failure'\n return Response(data=data)\n",
"step-4": "from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom playlist.models import Song, AccountSong, Genre, AccountGenre\nfrom account.models import Account\nfrom playlist.api.serializers import GenreSerializer, SongSerializer, AccountGenreSerializer\nfrom rest_framework.generics import ListAPIView\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,))\ndef create_account_genre_view(request):\n title = request.data.get('title', '0')\n try:\n genre = Genre.objects.get(title=title)\n except Genre.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n data = {}\n data['genre'] = genre.pk\n data['account'] = request.user.pk\n serializer = AccountGenreSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET'])\n@permission_classes((IsAuthenticated,))\ndef detail_genre_view(request):\n title = request.data.get('title', '0')\n try:\n genre = Genre.objects.get(title=title)\n except Genre.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = GenreSerializer(genre)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef create_song_view(request):\n serializer = SongSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET'])\ndef detail_song_view(request):\n id = request.data.get('id', '0')\n try:\n song = Song.objects.get(id=id)\n except Song.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = SongSerializer(song)\n return Response(serializer.data)\n\n\n@api_view(['PUT'])\ndef update_song_view(request):\n id = request.data.get('id', '0')\n try:\n song = Song.objects.get(id=id)\n except Song.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n if request.method == 'PUT':\n serializer = SongSerializer(song, data=request.data)\n data = {}\n if serializer.is_valid():\n serializer.save()\n data['success'] = 'update successful'\n return Response(data=data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['DELETE'])\ndef delete_song_view(request):\n id = request.data.get('id', '0')\n try:\n song = Song.objects.get(id=id)\n except Song.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n if request.method == 'DELETE':\n operation = song.delete()\n data = {}\n if operation:\n data['success'] = 'delete successful'\n else:\n data['failure'] = 'delete failure'\n return Response(data=data)\n",
"step-5": "from rest_framework import status\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.decorators import api_view, permission_classes\r\nfrom rest_framework.permissions import IsAuthenticated\r\n\r\nfrom playlist.models import Song, AccountSong, Genre, AccountGenre\r\nfrom account.models import Account\r\nfrom playlist.api.serializers import GenreSerializer, SongSerializer, AccountGenreSerializer\r\nfrom rest_framework.generics import ListAPIView\r\n\r\n\r\n# create a specific genre details by title\r\n@api_view(['POST'])\r\n@permission_classes((IsAuthenticated,))\r\ndef create_account_genre_view(request):\r\n title = request.data.get('title', '0')\r\n try:\r\n genre = Genre.objects.get(title=title)\r\n except Genre.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n data = {}\r\n data['genre'] = genre.pk\r\n data['account'] = request.user.pk\r\n serializer = AccountGenreSerializer(data=data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\n# Get a specific genre details by title\r\n@api_view(['GET'])\r\n@permission_classes((IsAuthenticated,))\r\ndef detail_genre_view(request):\r\n title = request.data.get('title', '0')\r\n try:\r\n genre = Genre.objects.get(title=title)\r\n except Genre.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n serializer = GenreSerializer(genre)\r\n return Response(serializer.data)\r\n\r\n\r\n# post a new song\r\n@api_view(['POST'])\r\n#admin\r\ndef create_song_view(request):\r\n serializer = SongSerializer(data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\n# Get a specific song details by id\r\n@api_view(['GET'])\r\ndef detail_song_view(request):\r\n id = request.data.get('id', '0')\r\n try:\r\n song = Song.objects.get(id=id)\r\n except Song.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n serializer = SongSerializer(song)\r\n return Response(serializer.data)\r\n\r\n\r\n# update a specific song details by id\r\n@api_view(['PUT'])\r\n#admin\r\ndef update_song_view(request):\r\n id = request.data.get('id', '0')\r\n try:\r\n song = Song.objects.get(id=id)\r\n except Song.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n\r\n if request.method == \"PUT\":\r\n serializer = SongSerializer(song, data=request.data)\r\n data = {}\r\n if serializer.is_valid():\r\n serializer.save()\r\n data[\"success\"] = \"update successful\"\r\n return Response(data=data)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\n# delete a specific song details by id\r\n@api_view(['DELETE'])\r\n#admin\r\ndef delete_song_view(request):\r\n id = request.data.get('id', '0')\r\n try:\r\n song = Song.objects.get(id=id)\r\n except Song.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n\r\n if request.method == \"DELETE\":\r\n operation = song.delete()\r\n data = {}\r\n if operation:\r\n data[\"success\"] = \"delete successful\"\r\n else:\r\n data[\"failure\"] = \"delete failure\"\r\n return Response(data=data)\r\n\r\n\r\n# GET all genres\r\n# GET all account genres\r\n# DELETE all account genres\r\n# GET all songs (by account genres)\r\n# POST liked genres\r\n# POST liked songs\r\n# GET whatsapp link\r\n# POST create playlist\r\n\r\n\r\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
flow_data.registerTempTable('flowtab')
<|reserved_special_token_0|>
df.show(1000)
<|reserved_special_token_0|>
df.show(1000)
<|reserved_special_token_0|>
for dstip in dstIPs:
sql = (
"select src_address, dst_address, sum(fwd_bytes) from flowtab where dst_address like '"
+ dstip + "' group by src_address, dst_address")
print(sql)
sqlContext.sql(sql).show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sqlContext = SQLContext(sc)
flow_data = sc._jvm.com.tetration.apps.IO.read(sqlContext._ssql_ctx,
'/tetration/flows/', 'PARQUET', 'LASTHOUR')
flow_data.registerTempTable('flowtab')
df = sqlContext.sql(
"select src_address, dst_address from flowtab where dst_address like '10.66.239.%' group by src_address, dst_address order by dst_address"
)
df.show(1000)
df = sqlContext.sql(
"select dst_address from flowtab where dst_address like '10.66.239.%' group by dst_address order by dst_address"
)
df.show(1000)
dstIPs = df.rdd.map(lambda p: '' + p.dst_address).collect()
for dstip in dstIPs:
sql = (
"select src_address, dst_address, sum(fwd_bytes) from flowtab where dst_address like '"
+ dstip + "' group by src_address, dst_address")
print(sql)
sqlContext.sql(sql).show()
<|reserved_special_token_1|>
from pyspark.sql import SQLContext
sqlContext = SQLContext(sc)
flow_data = sc._jvm.com.tetration.apps.IO.read(sqlContext._ssql_ctx,
'/tetration/flows/', 'PARQUET', 'LASTHOUR')
flow_data.registerTempTable('flowtab')
df = sqlContext.sql(
"select src_address, dst_address from flowtab where dst_address like '10.66.239.%' group by src_address, dst_address order by dst_address"
)
df.show(1000)
df = sqlContext.sql(
"select dst_address from flowtab where dst_address like '10.66.239.%' group by dst_address order by dst_address"
)
df.show(1000)
dstIPs = df.rdd.map(lambda p: '' + p.dst_address).collect()
for dstip in dstIPs:
sql = (
"select src_address, dst_address, sum(fwd_bytes) from flowtab where dst_address like '"
+ dstip + "' group by src_address, dst_address")
print(sql)
sqlContext.sql(sql).show()
<|reserved_special_token_1|>
# Simple read based on the py _sql context
from pyspark.sql import SQLContext
sqlContext = SQLContext(sc)
flow_data = sc._jvm.com.tetration.apps.IO.read(sqlContext._ssql_ctx, "/tetration/flows/", "PARQUET", "LASTHOUR")
flow_data.registerTempTable("flowtab")
# show the unique src_address and dst_address pairs
df = sqlContext.sql("select src_address, dst_address from flowtab where dst_address like '10.66.239.%' group by src_address, dst_address order by dst_address")
df.show(1000)
# show the unique dst_addresses
df = sqlContext.sql("select dst_address from flowtab where dst_address like '10.66.239.%' group by dst_address order by dst_address")
df.show(1000)
# show the sum of fwd_bytes of each dst_address
dstIPs = df.rdd.map(lambda p: "" + p.dst_address).collect()
for dstip in dstIPs:
sql = "select src_address, dst_address, sum(fwd_bytes) from flowtab where dst_address like \'" + dstip + "\' group by src_address, dst_address"
print(sql)
sqlContext.sql(sql).show()
|
flexible
|
{
"blob_id": "691075aa5c629e2d0c486ec288cd39bc142cdc7a",
"index": 3448,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nflow_data.registerTempTable('flowtab')\n<mask token>\ndf.show(1000)\n<mask token>\ndf.show(1000)\n<mask token>\nfor dstip in dstIPs:\n sql = (\n \"select src_address, dst_address, sum(fwd_bytes) from flowtab where dst_address like '\"\n + dstip + \"' group by src_address, dst_address\")\n print(sql)\n sqlContext.sql(sql).show()\n",
"step-3": "<mask token>\nsqlContext = SQLContext(sc)\nflow_data = sc._jvm.com.tetration.apps.IO.read(sqlContext._ssql_ctx,\n '/tetration/flows/', 'PARQUET', 'LASTHOUR')\nflow_data.registerTempTable('flowtab')\ndf = sqlContext.sql(\n \"select src_address, dst_address from flowtab where dst_address like '10.66.239.%' group by src_address, dst_address order by dst_address\"\n )\ndf.show(1000)\ndf = sqlContext.sql(\n \"select dst_address from flowtab where dst_address like '10.66.239.%' group by dst_address order by dst_address\"\n )\ndf.show(1000)\ndstIPs = df.rdd.map(lambda p: '' + p.dst_address).collect()\nfor dstip in dstIPs:\n sql = (\n \"select src_address, dst_address, sum(fwd_bytes) from flowtab where dst_address like '\"\n + dstip + \"' group by src_address, dst_address\")\n print(sql)\n sqlContext.sql(sql).show()\n",
"step-4": "from pyspark.sql import SQLContext\nsqlContext = SQLContext(sc)\nflow_data = sc._jvm.com.tetration.apps.IO.read(sqlContext._ssql_ctx,\n '/tetration/flows/', 'PARQUET', 'LASTHOUR')\nflow_data.registerTempTable('flowtab')\ndf = sqlContext.sql(\n \"select src_address, dst_address from flowtab where dst_address like '10.66.239.%' group by src_address, dst_address order by dst_address\"\n )\ndf.show(1000)\ndf = sqlContext.sql(\n \"select dst_address from flowtab where dst_address like '10.66.239.%' group by dst_address order by dst_address\"\n )\ndf.show(1000)\ndstIPs = df.rdd.map(lambda p: '' + p.dst_address).collect()\nfor dstip in dstIPs:\n sql = (\n \"select src_address, dst_address, sum(fwd_bytes) from flowtab where dst_address like '\"\n + dstip + \"' group by src_address, dst_address\")\n print(sql)\n sqlContext.sql(sql).show()\n",
"step-5": "# Simple read based on the py _sql context\nfrom pyspark.sql import SQLContext\nsqlContext = SQLContext(sc)\n\n\nflow_data = sc._jvm.com.tetration.apps.IO.read(sqlContext._ssql_ctx, \"/tetration/flows/\", \"PARQUET\", \"LASTHOUR\")\nflow_data.registerTempTable(\"flowtab\")\n\n# show the unique src_address and dst_address pairs\ndf = sqlContext.sql(\"select src_address, dst_address from flowtab where dst_address like '10.66.239.%' group by src_address, dst_address order by dst_address\")\ndf.show(1000)\n\n# show the unique dst_addresses\ndf = sqlContext.sql(\"select dst_address from flowtab where dst_address like '10.66.239.%' group by dst_address order by dst_address\")\ndf.show(1000)\n\n# show the sum of fwd_bytes of each dst_address\ndstIPs = df.rdd.map(lambda p: \"\" + p.dst_address).collect()\nfor dstip in dstIPs:\n sql = \"select src_address, dst_address, sum(fwd_bytes) from flowtab where dst_address like \\'\" + dstip + \"\\' group by src_address, dst_address\"\n print(sql)\n sqlContext.sql(sql).show()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def datingClassTest():
horatio = 0.1
data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')
normMat = KNN_3.autoNorm(data)
ml = normMat.shape[0]
numTestset = int(ml * horatio)
errorcount = 0
a = clf.classify0(normMat[0:numTestset, :], normMat[numTestset:ml, :],
3, datalabels[numTestset:ml])
for i in range(len(a)):
if a[i] != datalabels[i]:
errorcount += 1
c = errorcount / 100
return c
def predictperson():
level = ['not at all', 'in small does', 'in large does']
percenttats = float(input('percentage of time spent playing video games?'))
ffmiles = float(input('frequent flier miles earned per year?'))
icecream = float(input('liters of ice cream consumed per year?'))
data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')
normMat = KNN_3.autoNorm(data)
test_dataset = array([[percenttats, ffmiles, icecream]])
a = clf.classify0(test_dataset, data, 3, datalabels)
print(level[a[0] - 1])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def datingClassTest():
horatio = 0.1
data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')
normMat = KNN_3.autoNorm(data)
ml = normMat.shape[0]
numTestset = int(ml * horatio)
errorcount = 0
a = clf.classify0(normMat[0:numTestset, :], normMat[numTestset:ml, :],
3, datalabels[numTestset:ml])
for i in range(len(a)):
if a[i] != datalabels[i]:
errorcount += 1
c = errorcount / 100
return c
def predictperson():
level = ['not at all', 'in small does', 'in large does']
percenttats = float(input('percentage of time spent playing video games?'))
ffmiles = float(input('frequent flier miles earned per year?'))
icecream = float(input('liters of ice cream consumed per year?'))
data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')
normMat = KNN_3.autoNorm(data)
test_dataset = array([[percenttats, ffmiles, icecream]])
a = clf.classify0(test_dataset, data, 3, datalabels)
print(level[a[0] - 1])
predictperson()
<|reserved_special_token_1|>
from numpy import *
import KNN_1
import KNN_3
import KNN_suanfa as clf
def datingClassTest():
horatio = 0.1
data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')
normMat = KNN_3.autoNorm(data)
ml = normMat.shape[0]
numTestset = int(ml * horatio)
errorcount = 0
a = clf.classify0(normMat[0:numTestset, :], normMat[numTestset:ml, :],
3, datalabels[numTestset:ml])
for i in range(len(a)):
if a[i] != datalabels[i]:
errorcount += 1
c = errorcount / 100
return c
def predictperson():
level = ['not at all', 'in small does', 'in large does']
percenttats = float(input('percentage of time spent playing video games?'))
ffmiles = float(input('frequent flier miles earned per year?'))
icecream = float(input('liters of ice cream consumed per year?'))
data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')
normMat = KNN_3.autoNorm(data)
test_dataset = array([[percenttats, ffmiles, icecream]])
a = clf.classify0(test_dataset, data, 3, datalabels)
print(level[a[0] - 1])
predictperson()
<|reserved_special_token_1|>
from numpy import *
import KNN_1
import KNN_3
import KNN_suanfa as clf
def datingClassTest():
horatio = 0.1
data, datalabels = KNN_1.filel2matrix("datingTestSet2.txt")
normMat = KNN_3.autoNorm(data)
ml = normMat.shape[0]
numTestset = int(ml*horatio)
errorcount = 0
a=clf.classify0(normMat[0:numTestset,:],normMat[numTestset:ml,:],3,datalabels[numTestset:ml])
for i in range(len(a)):
if a[i] != datalabels[i]:
errorcount += 1
c = errorcount/100
return c
def predictperson():
level = ['not at all','in small does','in large does']
percenttats = float(input("percentage of time spent playing video games?"))
ffmiles = float(input("frequent flier miles earned per year?"))
icecream = float(input("liters of ice cream consumed per year?"))
data, datalabels = KNN_1.filel2matrix("datingTestSet2.txt")
normMat = KNN_3.autoNorm(data)
test_dataset = array([[percenttats,ffmiles,icecream]])
a = clf.classify0(test_dataset,data,3,datalabels)
print(level[a[0]-1])
predictperson()
|
flexible
|
{
"blob_id": "3086f62d4057812fc7fb4e21a18bc7d0ba786865",
"index": 2526,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef datingClassTest():\n horatio = 0.1\n data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')\n normMat = KNN_3.autoNorm(data)\n ml = normMat.shape[0]\n numTestset = int(ml * horatio)\n errorcount = 0\n a = clf.classify0(normMat[0:numTestset, :], normMat[numTestset:ml, :], \n 3, datalabels[numTestset:ml])\n for i in range(len(a)):\n if a[i] != datalabels[i]:\n errorcount += 1\n c = errorcount / 100\n return c\n\n\ndef predictperson():\n level = ['not at all', 'in small does', 'in large does']\n percenttats = float(input('percentage of time spent playing video games?'))\n ffmiles = float(input('frequent flier miles earned per year?'))\n icecream = float(input('liters of ice cream consumed per year?'))\n data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')\n normMat = KNN_3.autoNorm(data)\n test_dataset = array([[percenttats, ffmiles, icecream]])\n a = clf.classify0(test_dataset, data, 3, datalabels)\n print(level[a[0] - 1])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef datingClassTest():\n horatio = 0.1\n data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')\n normMat = KNN_3.autoNorm(data)\n ml = normMat.shape[0]\n numTestset = int(ml * horatio)\n errorcount = 0\n a = clf.classify0(normMat[0:numTestset, :], normMat[numTestset:ml, :], \n 3, datalabels[numTestset:ml])\n for i in range(len(a)):\n if a[i] != datalabels[i]:\n errorcount += 1\n c = errorcount / 100\n return c\n\n\ndef predictperson():\n level = ['not at all', 'in small does', 'in large does']\n percenttats = float(input('percentage of time spent playing video games?'))\n ffmiles = float(input('frequent flier miles earned per year?'))\n icecream = float(input('liters of ice cream consumed per year?'))\n data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')\n normMat = KNN_3.autoNorm(data)\n test_dataset = array([[percenttats, ffmiles, icecream]])\n a = clf.classify0(test_dataset, data, 3, datalabels)\n print(level[a[0] - 1])\n\n\npredictperson()\n",
"step-4": "from numpy import *\nimport KNN_1\nimport KNN_3\nimport KNN_suanfa as clf\n\n\ndef datingClassTest():\n horatio = 0.1\n data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')\n normMat = KNN_3.autoNorm(data)\n ml = normMat.shape[0]\n numTestset = int(ml * horatio)\n errorcount = 0\n a = clf.classify0(normMat[0:numTestset, :], normMat[numTestset:ml, :], \n 3, datalabels[numTestset:ml])\n for i in range(len(a)):\n if a[i] != datalabels[i]:\n errorcount += 1\n c = errorcount / 100\n return c\n\n\ndef predictperson():\n level = ['not at all', 'in small does', 'in large does']\n percenttats = float(input('percentage of time spent playing video games?'))\n ffmiles = float(input('frequent flier miles earned per year?'))\n icecream = float(input('liters of ice cream consumed per year?'))\n data, datalabels = KNN_1.filel2matrix('datingTestSet2.txt')\n normMat = KNN_3.autoNorm(data)\n test_dataset = array([[percenttats, ffmiles, icecream]])\n a = clf.classify0(test_dataset, data, 3, datalabels)\n print(level[a[0] - 1])\n\n\npredictperson()\n",
"step-5": "from numpy import *\nimport KNN_1\nimport KNN_3\nimport KNN_suanfa as clf\ndef datingClassTest():\n horatio = 0.1\n data, datalabels = KNN_1.filel2matrix(\"datingTestSet2.txt\")\n normMat = KNN_3.autoNorm(data)\n ml = normMat.shape[0]\n numTestset = int(ml*horatio)\n errorcount = 0\n a=clf.classify0(normMat[0:numTestset,:],normMat[numTestset:ml,:],3,datalabels[numTestset:ml])\n for i in range(len(a)):\n if a[i] != datalabels[i]:\n errorcount += 1\n c = errorcount/100\n return c\n\ndef predictperson():\n level = ['not at all','in small does','in large does']\n percenttats = float(input(\"percentage of time spent playing video games?\"))\n ffmiles = float(input(\"frequent flier miles earned per year?\"))\n icecream = float(input(\"liters of ice cream consumed per year?\"))\n data, datalabels = KNN_1.filel2matrix(\"datingTestSet2.txt\")\n normMat = KNN_3.autoNorm(data)\n test_dataset = array([[percenttats,ffmiles,icecream]])\n a = clf.classify0(test_dataset,data,3,datalabels)\n print(level[a[0]-1])\npredictperson()\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
"""These are views that are used for viewing and editing characters."""
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin,\
LoginRequiredMixin, PermissionRequiredMixin
from django.db import transaction
from django.db.models import F
from django.http import HttpResponseRedirect
from django.template.loader import render_to_string
from django.urls import reverse, reverse_lazy
from django.views import View
from django.views.generic.edit import FormMixin, CreateView, UpdateView
from django.views.generic import DeleteView, DetailView, FormView, ListView
from rest_framework.status import HTTP_412_PRECONDITION_FAILED
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import BasePermission
from rest_framework.response import Response
from rest_framework.views import APIView
from talesofvalor import get_query
from talesofvalor.events.models import Event
from talesofvalor.players.models import Registration
from talesofvalor.skills.models import Header, HeaderSkill
from .models import Character
from .forms import CharacterForm, CharacterSkillForm,\
CharacterConceptApproveForm, CharacterHistoryApproveForm
class OwnsCharacter(BasePermission):
"""
The current user is staff or owns the that is being manipulated.
"""
message = "You don't own this character"
def has_object_permission(self, request, view, obj):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
class CharacterCreateView(LoginRequiredMixin, CreateView):
model = Character
form_class = CharacterForm
def get_initial(self):
# Get the initial dictionary from the superclass method
initial = super(CharacterCreateView, self).get_initial()
# Copy the dictionary so we don't accidentally change a mutable dict
initial = initial.copy()
# default to getting the player from the query String.
try:
initial['player'] = self.request.GET['player']
except KeyError:
initial['player'] = self.request.user.player
# etc...
return initial
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user # pass the 'user' in kwargs
return kwargs
def get_success_url(self):
return reverse(
'characters:character_skill_update',
kwargs={'pk': self.object.pk}
)
def form_valid(self, form):
"""
If this form is valid, then add the current player to the character
if the current user is not an admin
If the user doesn't have any other active characters, set this one
to active.
"""
if not self.request.user.has_perm('players.view_any_player'):
form.instance.player = self.request.user.player
if not form.instance.player.character_set.filter(active_flag=True).exists():
form.instance.active_flag = True
messages.info(self.request, 'New Character, "{}" created.'.format(
form.instance.name
))
return super().form_valid(form)
class CharacterUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Character
form_class = CharacterForm
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user # pass the 'user' in kwargs
return kwargs
def get_success_url(self):
return reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
)
class CharacterDeleteView(
PermissionRequiredMixin,
UserPassesTestMixin,
DeleteView
):
"""
Removes a character permanantly.
Removing a character may have strange effects on other views.
"""
model = Character
permission_required = ('characters.change_character', )
success_url = reverse_lazy('characters:character_list')
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
class CharacterResetView(
PermissionRequiredMixin,
UserPassesTestMixin,
View
):
"""
Resets a characters skills to none and returns their points to them.
"""
model = Character
permission_required = ('characters.change_character', )
success_url = reverse_lazy('characters:character_list')
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the
character they are setting active
"""
with transaction.atomic():
character = self.model.objects.get(pk=self.kwargs['pk'])
character.cp_available += character.cp_spent
character.cp_spent = 0
character.save(update_fields=['cp_available', 'cp_spent'])
character.characterskills_set.all().delete()
character.headers.clear()
messages.info(self.request, 'Character skills reset for {}.'.format(
character.name
))
return HttpResponseRedirect(
self.request.META.get(
'HTTP_REFERER',
reverse(
'characters:character_detail',
kwargs={'pk': self.kwargs['pk']}
)
)
)
class CharacterSetActiveView(
LoginRequiredMixin,
UserPassesTestMixin,
View
):
"""
Set the active character for the characters player to the sent id.
"""
model = Character
fields = '__all__'
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the
character they are setting active
"""
character = self.model.objects.get(pk=self.kwargs['pk'])
character.player.character_set.update(active_flag=False)
character.active_flag = True
character.save()
messages.info(self.request, 'Active Character changed to {}.'.format(
character.name
))
return HttpResponseRedirect(
self.request.META.get(
'HTTP_REFERER',
reverse(
'characters:character_detail',
kwargs={'pk': self.kwargs['pk']}
)
)
)
class CharacterSkillUpdateView(
LoginRequiredMixin,
UserPassesTestMixin,
FormMixin,
DetailView):
"""
Allow a user to update their chosen skills
"""
template_name = 'characters/character_skill_form.html'
form_class = CharacterSkillForm
model = Character
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
def get_success_url(self):
return reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
self.skills = Header.objects\
.order_by('hidden_flag', 'category', 'name')\
.all()
kwargs.update({'skills': self.skills})
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**self.kwargs)
# remove skills not in the hash.
available_skills = self.object.skillhash.keys()
context['skills'] = filter(lambda x: x.id in available_skills or self.request.user.has_perm('player.view_any_player'), self.skills)
context['skill_hash'] = self.object.skillhash
# add the bare skills granted by the rules
context['granted_skills'] = self.object.skill_grants
return context
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
"""
Form is valid. Save the skills to that character and remove the
appropriate number of characters points.
"""
return super().form_valid(form)
class ResetPointsView(
PermissionRequiredMixin,
View
):
"""
Resets the points for the season.
"""
permission_required = ('characters.reset_points', )
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the main
page if the referrer isn't set.
"""
Character.objects.all().update(cp_transferred=0)
messages.info(self.request, 'Point cap reset!')
return HttpResponseRedirect(
self.request.META.get(
'HTTP_REFERER',
'/'
)
)
'''
Put the AJAX work for Characters here
'''
class CharacterAddHeaderView(APIView):
'''
Set of AJAX views for a Characters
This handles different API calls for character actions.
'''
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
cp_available = int(request.POST.get('cp_available', 0))
# get the character and then see if the header is allowed
header = Header.objects.get(pk=header_id)
character = Character.objects.get(pk=character_id)
# Default to error.
content = {
'error': "prerequisites not met"
}
status = None
# if the prerequisites are met, add the header to the user and return
# the list of skills
if character.check_header_prerequisites(header):
# see if the character has enough points to add the header
if (cp_available - header.cost) >= 0:
character.cp_available -= header.cost
character.cp_spent += header.cost
character.headers.add(header)
character.save()
skill_item_template_string = render_to_string(
"characters/includes/character_skill_update_item.html",
{
'header': header,
'header_skills': header.skills.all(),
'header_costs': character.skillhash[header.id]
},
request
)
content = {
'success': header.cost * -1,
'skills': skill_item_template_string
}
else:
content = {
'error': "You don't have enough points available for this character to add this header."
}
status = HTTP_412_PRECONDITION_FAILED
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterDropHeaderView(APIView):
'''
Set of AJAX views for a Characters
This handles different API calls for character actions.
'''
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
# get the character and header
header = Header.objects.get(pk=header_id)
character = Character.objects.get(pk=character_id)
# Default to error.
content = {
'error': "Header is not already bought!"
}
status = None
# if the character has the header, drop it and refund the CP
content['header_list'] = []
if header in character.headers.all():
print(f'Header present! Dropping and adding back in {header.cost} CP...')
character.cp_available += header.cost
character.cp_spent -= header.cost
character.headers.remove(header)
skill_item_template_string = render_to_string(
"characters/includes/character_skill_update_item.html",
{
'header': header,
'header_skills': header.skills.all(),
'header_costs': character.skillhash[header.id]
},
request
)
content = {
'success': header.cost,
}
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterAddSkillView(APIView):
'''
Set of AJAX views for a Characters
This handles different API calls for character actions.
'''
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
skill_id = int(request.POST.get('skill_id', 0))
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
cp_available = int(request.POST.get('cp_available', 0))
try:
vector = int(request.POST.get('vector'))
except AttributeError:
return {
'error': "No change indicated"
}
# get the character and then see if the skill is allowed
header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id=header_id)
character = Character.objects.get(pk=character_id)
# check that the skill is allowed.
# if the prerequisites are met, add the header to the user and return
# the list of skills
# otherwise, return an error
content = {
'success': "testing right now"
}
status = None
if character.check_skill_prerequisites(header_skill.skill, header_skill.header):
# since vector is the direction, we want to reverse it when
# dealing with what we want to change for the available points
# see if the character has enough points to add the header
cost = character.skill_cost(header_skill) * vector
if (cp_available - cost) >= 0:
# when this is returned, change the available costs
(character_skill, created) = character.characterskills_set.get_or_create(
skill=header_skill
)
if character_skill.count and (character_skill.count + vector < 0):
content = {
'error': f"You don't have any points in {header_skill.skill}"
}
status = HTTP_412_PRECONDITION_FAILED
else:
content = {
'success': cost * -1
}
character_skill.count = F('count') + vector
character_skill.save()
character.cp_spent = F('cp_spent') + cost
character.cp_available = F('cp_available') - cost
character.save()
else:
content = {
'error': "You don't have enough points available to purchase this skill . . ."
}
status = HTTP_412_PRECONDITION_FAILED
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):
"""
Show the details for a character.
From here you can edit the details of a character or choose skills.
"""
model = Character
fields = '__all__'
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
class CharacterConceptApproveView(PermissionRequiredMixin, FormView):
"""
Approve the concept for a character.
Grant the CP for the character
Set the history approved flag.
"""
permission_required = 'players.change_any_player'
form_class = CharacterConceptApproveForm
def form_valid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data['character_id'])
self.object.player.cp_available += 3
self.object.player.save(update_fields=['cp_available'])
self.object.concept_approved_flag = True
self.object.save(update_fields=['concept_approved_flag'])
messages.info(self.request, f"{self.object} concept approved!")
return super().form_valid(form)
def form_invalid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data['character_id'])
for key, error in form.errors.items():
messages.error(self.request, error.as_text())
return HttpResponseRedirect(reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
))
def get_success_url(self):
return reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
)
class CharacterHistoryApproveView(PermissionRequiredMixin, FormView):
"""
Approve the history for a character.
Grant the CP for the character
Set the history approved flag.
"""
permission_required = 'players.change_any_player'
form_class = CharacterHistoryApproveForm
def form_valid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data['character_id'])
self.object.player.cp_available += 3
self.object.player.save(update_fields=['cp_available'])
self.object.history_approved_flag = True
self.object.save(update_fields=['history_approved_flag'])
messages.info(self.request, f"{self.object} history approved!")
return super().form_valid(form)
def form_invalid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data['character_id'])
for key, error in form.errors.items():
messages.error(self.request, error.as_text())
return HttpResponseRedirect(reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
))
def get_success_url(self):
return reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
)
class CharacterListView(LoginRequiredMixin, ListView):
"""
Show the list of characters.
From here, you can view, edit, delete a character.
"""
model = Character
paginate_by = 25
def get_queryset(self):
queryset = super().get_queryset()
criteria = self.request.GET.get('criteria', '')
if (criteria.strip()):
entry_query = get_query(
criteria,
['name', 'description', 'concept', 'history', 'player_notes']
)
queryset = queryset.filter(entry_query)
history_approved_flag = self.request.GET.get('history_approved_flag', False)
if history_approved_flag:
queryset = queryset.filter(history_approved_flag=True)
concept_approved_flag = self.request.GET.get('concept_approved_flag', False)
if concept_approved_flag:
queryset = queryset.filter(concept_approved_flag=True)
return queryset
def get_context_data(self, **kwargs):
'''
Add the form so we can filter the characters.
'''
# get the context data to add to.
context_data = super().get_context_data(**kwargs)
context_data.update(**self.request.GET)
# return the resulting context
return context_data
class CharacterPrintListView(LoginRequiredMixin, ListView):
"""
Show a list of characters to print.
"""
model = Character
template_name = "characters/character_print_list.html"
def get_queryset(self):
queryset = super().get_queryset() # filter by event
event_id = self.kwargs.get('event_id', None)
if not event_id:
event_id = Event.next_event().id
player_ids = Registration.objects.filter(event__id=event_id).values_list('player_id', flat=True)
queryset = queryset.filter(player__id__in=player_ids, npc_flag=False, active_flag=True)
return queryset
|
normal
|
{
"blob_id": "55ea522b096b189ff67b0da0058af777b0a910e3",
"index": 4970,
"step-1": "<mask token>\n\n\nclass CharacterDropHeaderView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n content = {'error': 'Header is not already bought!'}\n status = None\n content['header_list'] = []\n if header in character.headers.all():\n print(\n f'Header present! Dropping and adding back in {header.cost} CP...'\n )\n character.cp_available += header.cost\n character.cp_spent -= header.cost\n character.headers.remove(header)\n skill_item_template_string = render_to_string(\n 'characters/includes/character_skill_update_item.html', {\n 'header': header, 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]}, request)\n content = {'success': header.cost}\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterAddSkillView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n skill_id = int(request.POST.get('skill_id', 0))\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n try:\n vector = int(request.POST.get('vector'))\n except AttributeError:\n return {'error': 'No change indicated'}\n header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id\n =header_id)\n character = Character.objects.get(pk=character_id)\n content = {'success': 'testing right now'}\n status = None\n if character.check_skill_prerequisites(header_skill.skill,\n header_skill.header):\n cost = character.skill_cost(header_skill) * vector\n if cp_available - cost >= 0:\n character_skill, created = (character.characterskills_set.\n get_or_create(skill=header_skill))\n if (character_skill.count and character_skill.count +\n vector < 0):\n content = {'error':\n f\"You don't have any points in {header_skill.skill}\"}\n status = HTTP_412_PRECONDITION_FAILED\n else:\n content = {'success': cost * -1}\n character_skill.count = F('count') + vector\n character_skill.save()\n character.cp_spent = F('cp_spent') + cost\n character.cp_available = F('cp_available') - cost\n character.save()\n else:\n content = {'error':\n \"You don't have enough points available to purchase this skill . . .\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):\n \"\"\"\n Show the details for a character.\n\n From here you can edit the details of a character or choose skills.\n \"\"\"\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterConceptApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the concept for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterConceptApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.concept_approved_flag = True\n self.object.save(update_fields=['concept_approved_flag'])\n messages.info(self.request, f'{self.object} concept approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterHistoryApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the history for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterHistoryApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.history_approved_flag = True\n self.object.save(update_fields=['history_approved_flag'])\n messages.info(self.request, f'{self.object} history approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show the list of characters.\n\n From here, you can view, edit, delete a character.\n \"\"\"\n model = Character\n paginate_by = 25\n\n def get_queryset(self):\n queryset = super().get_queryset()\n criteria = self.request.GET.get('criteria', '')\n if criteria.strip():\n entry_query = get_query(criteria, ['name', 'description',\n 'concept', 'history', 'player_notes'])\n queryset = queryset.filter(entry_query)\n history_approved_flag = self.request.GET.get('history_approved_flag',\n False)\n if history_approved_flag:\n queryset = queryset.filter(history_approved_flag=True)\n concept_approved_flag = self.request.GET.get('concept_approved_flag',\n False)\n if concept_approved_flag:\n queryset = queryset.filter(concept_approved_flag=True)\n return queryset\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Add the form so we can filter the characters.\n \"\"\"\n context_data = super().get_context_data(**kwargs)\n context_data.update(**self.request.GET)\n return context_data\n\n\nclass CharacterPrintListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show a list of characters to print.\n\n \"\"\"\n model = Character\n template_name = 'characters/character_print_list.html'\n\n def get_queryset(self):\n queryset = super().get_queryset()\n event_id = self.kwargs.get('event_id', None)\n if not event_id:\n event_id = Event.next_event().id\n player_ids = Registration.objects.filter(event__id=event_id\n ).values_list('player_id', flat=True)\n queryset = queryset.filter(player__id__in=player_ids, npc_flag=\n False, active_flag=True)\n return queryset\n",
"step-2": "<mask token>\n\n\nclass CharacterSkillUpdateView(LoginRequiredMixin, UserPassesTestMixin,\n FormMixin, DetailView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n self.skills = Header.objects.order_by('hidden_flag', 'category', 'name'\n ).all()\n kwargs.update({'skills': self.skills})\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**self.kwargs)\n available_skills = self.object.skillhash.keys()\n context['skills'] = filter(lambda x: x.id in available_skills or\n self.request.user.has_perm('player.view_any_player'), self.skills)\n context['skill_hash'] = self.object.skillhash\n context['granted_skills'] = self.object.skill_grants\n return context\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n def form_valid(self, form):\n \"\"\"\n Form is valid. Save the skills to that character and remove the\n appropriate number of characters points.\n \"\"\"\n return super().form_valid(form)\n\n\nclass ResetPointsView(PermissionRequiredMixin, View):\n \"\"\"\n Resets the points for the season.\n \"\"\"\n permission_required = 'characters.reset_points',\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the main \n page if the referrer isn't set.\n \"\"\"\n Character.objects.all().update(cp_transferred=0)\n messages.info(self.request, 'Point cap reset!')\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER', '/'))\n\n\n<mask token>\n\n\nclass CharacterAddHeaderView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n content = {'error': 'prerequisites not met'}\n status = None\n if character.check_header_prerequisites(header):\n if cp_available - header.cost >= 0:\n character.cp_available -= header.cost\n character.cp_spent += header.cost\n character.headers.add(header)\n character.save()\n skill_item_template_string = render_to_string(\n 'characters/includes/character_skill_update_item.html',\n {'header': header, 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]}, request)\n content = {'success': header.cost * -1, 'skills':\n skill_item_template_string}\n else:\n content = {'error':\n \"You don't have enough points available for this character to add this header.\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDropHeaderView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n content = {'error': 'Header is not already bought!'}\n status = None\n content['header_list'] = []\n if header in character.headers.all():\n print(\n f'Header present! Dropping and adding back in {header.cost} CP...'\n )\n character.cp_available += header.cost\n character.cp_spent -= header.cost\n character.headers.remove(header)\n skill_item_template_string = render_to_string(\n 'characters/includes/character_skill_update_item.html', {\n 'header': header, 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]}, request)\n content = {'success': header.cost}\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterAddSkillView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n skill_id = int(request.POST.get('skill_id', 0))\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n try:\n vector = int(request.POST.get('vector'))\n except AttributeError:\n return {'error': 'No change indicated'}\n header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id\n =header_id)\n character = Character.objects.get(pk=character_id)\n content = {'success': 'testing right now'}\n status = None\n if character.check_skill_prerequisites(header_skill.skill,\n header_skill.header):\n cost = character.skill_cost(header_skill) * vector\n if cp_available - cost >= 0:\n character_skill, created = (character.characterskills_set.\n get_or_create(skill=header_skill))\n if (character_skill.count and character_skill.count +\n vector < 0):\n content = {'error':\n f\"You don't have any points in {header_skill.skill}\"}\n status = HTTP_412_PRECONDITION_FAILED\n else:\n content = {'success': cost * -1}\n character_skill.count = F('count') + vector\n character_skill.save()\n character.cp_spent = F('cp_spent') + cost\n character.cp_available = F('cp_available') - cost\n character.save()\n else:\n content = {'error':\n \"You don't have enough points available to purchase this skill . . .\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):\n \"\"\"\n Show the details for a character.\n\n From here you can edit the details of a character or choose skills.\n \"\"\"\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterConceptApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the concept for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterConceptApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.concept_approved_flag = True\n self.object.save(update_fields=['concept_approved_flag'])\n messages.info(self.request, f'{self.object} concept approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterHistoryApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the history for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterHistoryApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.history_approved_flag = True\n self.object.save(update_fields=['history_approved_flag'])\n messages.info(self.request, f'{self.object} history approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show the list of characters.\n\n From here, you can view, edit, delete a character.\n \"\"\"\n model = Character\n paginate_by = 25\n\n def get_queryset(self):\n queryset = super().get_queryset()\n criteria = self.request.GET.get('criteria', '')\n if criteria.strip():\n entry_query = get_query(criteria, ['name', 'description',\n 'concept', 'history', 'player_notes'])\n queryset = queryset.filter(entry_query)\n history_approved_flag = self.request.GET.get('history_approved_flag',\n False)\n if history_approved_flag:\n queryset = queryset.filter(history_approved_flag=True)\n concept_approved_flag = self.request.GET.get('concept_approved_flag',\n False)\n if concept_approved_flag:\n queryset = queryset.filter(concept_approved_flag=True)\n return queryset\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Add the form so we can filter the characters.\n \"\"\"\n context_data = super().get_context_data(**kwargs)\n context_data.update(**self.request.GET)\n return context_data\n\n\nclass CharacterPrintListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show a list of characters to print.\n\n \"\"\"\n model = Character\n template_name = 'characters/character_print_list.html'\n\n def get_queryset(self):\n queryset = super().get_queryset()\n event_id = self.kwargs.get('event_id', None)\n if not event_id:\n event_id = Event.next_event().id\n player_ids = Registration.objects.filter(event__id=event_id\n ).values_list('player_id', flat=True)\n queryset = queryset.filter(player__id__in=player_ids, npc_flag=\n False, active_flag=True)\n return queryset\n",
"step-3": "<mask token>\n\n\nclass CharacterResetView(PermissionRequiredMixin, UserPassesTestMixin, View):\n <mask token>\n model = Character\n permission_required = 'characters.change_character',\n success_url = reverse_lazy('characters:character_list')\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the\n character they are setting active\n \"\"\"\n with transaction.atomic():\n character = self.model.objects.get(pk=self.kwargs['pk'])\n character.cp_available += character.cp_spent\n character.cp_spent = 0\n character.save(update_fields=['cp_available', 'cp_spent'])\n character.characterskills_set.all().delete()\n character.headers.clear()\n messages.info(self.request, 'Character skills reset for {}.'.format\n (character.name))\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER',\n reverse('characters:character_detail', kwargs={'pk': self.\n kwargs['pk']})))\n\n\nclass CharacterSetActiveView(LoginRequiredMixin, UserPassesTestMixin, View):\n \"\"\"\n Set the active character for the characters player to the sent id.\n \"\"\"\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the\n character they are setting active\n \"\"\"\n character = self.model.objects.get(pk=self.kwargs['pk'])\n character.player.character_set.update(active_flag=False)\n character.active_flag = True\n character.save()\n messages.info(self.request, 'Active Character changed to {}.'.\n format(character.name))\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER',\n reverse('characters:character_detail', kwargs={'pk': self.\n kwargs['pk']})))\n\n\nclass CharacterSkillUpdateView(LoginRequiredMixin, UserPassesTestMixin,\n FormMixin, DetailView):\n \"\"\"\n Allow a user to update their chosen skills\n \"\"\"\n template_name = 'characters/character_skill_form.html'\n form_class = CharacterSkillForm\n model = Character\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n self.skills = Header.objects.order_by('hidden_flag', 'category', 'name'\n ).all()\n kwargs.update({'skills': self.skills})\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**self.kwargs)\n available_skills = self.object.skillhash.keys()\n context['skills'] = filter(lambda x: x.id in available_skills or\n self.request.user.has_perm('player.view_any_player'), self.skills)\n context['skill_hash'] = self.object.skillhash\n context['granted_skills'] = self.object.skill_grants\n return context\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n def form_valid(self, form):\n \"\"\"\n Form is valid. Save the skills to that character and remove the\n appropriate number of characters points.\n \"\"\"\n return super().form_valid(form)\n\n\nclass ResetPointsView(PermissionRequiredMixin, View):\n \"\"\"\n Resets the points for the season.\n \"\"\"\n permission_required = 'characters.reset_points',\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the main \n page if the referrer isn't set.\n \"\"\"\n Character.objects.all().update(cp_transferred=0)\n messages.info(self.request, 'Point cap reset!')\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER', '/'))\n\n\n<mask token>\n\n\nclass CharacterAddHeaderView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n content = {'error': 'prerequisites not met'}\n status = None\n if character.check_header_prerequisites(header):\n if cp_available - header.cost >= 0:\n character.cp_available -= header.cost\n character.cp_spent += header.cost\n character.headers.add(header)\n character.save()\n skill_item_template_string = render_to_string(\n 'characters/includes/character_skill_update_item.html',\n {'header': header, 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]}, request)\n content = {'success': header.cost * -1, 'skills':\n skill_item_template_string}\n else:\n content = {'error':\n \"You don't have enough points available for this character to add this header.\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDropHeaderView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n content = {'error': 'Header is not already bought!'}\n status = None\n content['header_list'] = []\n if header in character.headers.all():\n print(\n f'Header present! Dropping and adding back in {header.cost} CP...'\n )\n character.cp_available += header.cost\n character.cp_spent -= header.cost\n character.headers.remove(header)\n skill_item_template_string = render_to_string(\n 'characters/includes/character_skill_update_item.html', {\n 'header': header, 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]}, request)\n content = {'success': header.cost}\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterAddSkillView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n skill_id = int(request.POST.get('skill_id', 0))\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n try:\n vector = int(request.POST.get('vector'))\n except AttributeError:\n return {'error': 'No change indicated'}\n header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id\n =header_id)\n character = Character.objects.get(pk=character_id)\n content = {'success': 'testing right now'}\n status = None\n if character.check_skill_prerequisites(header_skill.skill,\n header_skill.header):\n cost = character.skill_cost(header_skill) * vector\n if cp_available - cost >= 0:\n character_skill, created = (character.characterskills_set.\n get_or_create(skill=header_skill))\n if (character_skill.count and character_skill.count +\n vector < 0):\n content = {'error':\n f\"You don't have any points in {header_skill.skill}\"}\n status = HTTP_412_PRECONDITION_FAILED\n else:\n content = {'success': cost * -1}\n character_skill.count = F('count') + vector\n character_skill.save()\n character.cp_spent = F('cp_spent') + cost\n character.cp_available = F('cp_available') - cost\n character.save()\n else:\n content = {'error':\n \"You don't have enough points available to purchase this skill . . .\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):\n \"\"\"\n Show the details for a character.\n\n From here you can edit the details of a character or choose skills.\n \"\"\"\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterConceptApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the concept for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterConceptApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.concept_approved_flag = True\n self.object.save(update_fields=['concept_approved_flag'])\n messages.info(self.request, f'{self.object} concept approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterHistoryApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the history for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterHistoryApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.history_approved_flag = True\n self.object.save(update_fields=['history_approved_flag'])\n messages.info(self.request, f'{self.object} history approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show the list of characters.\n\n From here, you can view, edit, delete a character.\n \"\"\"\n model = Character\n paginate_by = 25\n\n def get_queryset(self):\n queryset = super().get_queryset()\n criteria = self.request.GET.get('criteria', '')\n if criteria.strip():\n entry_query = get_query(criteria, ['name', 'description',\n 'concept', 'history', 'player_notes'])\n queryset = queryset.filter(entry_query)\n history_approved_flag = self.request.GET.get('history_approved_flag',\n False)\n if history_approved_flag:\n queryset = queryset.filter(history_approved_flag=True)\n concept_approved_flag = self.request.GET.get('concept_approved_flag',\n False)\n if concept_approved_flag:\n queryset = queryset.filter(concept_approved_flag=True)\n return queryset\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Add the form so we can filter the characters.\n \"\"\"\n context_data = super().get_context_data(**kwargs)\n context_data.update(**self.request.GET)\n return context_data\n\n\nclass CharacterPrintListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show a list of characters to print.\n\n \"\"\"\n model = Character\n template_name = 'characters/character_print_list.html'\n\n def get_queryset(self):\n queryset = super().get_queryset()\n event_id = self.kwargs.get('event_id', None)\n if not event_id:\n event_id = Event.next_event().id\n player_ids = Registration.objects.filter(event__id=event_id\n ).values_list('player_id', flat=True)\n queryset = queryset.filter(player__id__in=player_ids, npc_flag=\n False, active_flag=True)\n return queryset\n",
"step-4": "<mask token>\n\n\nclass CharacterUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n <mask token>\n <mask token>\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['user'] = self.request.user\n return kwargs\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterDeleteView(PermissionRequiredMixin, UserPassesTestMixin,\n DeleteView):\n \"\"\"\n Removes a character permanantly.\n\n Removing a character may have strange effects on other views.\n \"\"\"\n model = Character\n permission_required = 'characters.change_character',\n success_url = reverse_lazy('characters:character_list')\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterResetView(PermissionRequiredMixin, UserPassesTestMixin, View):\n \"\"\"\n Resets a characters skills to none and returns their points to them.\n \"\"\"\n model = Character\n permission_required = 'characters.change_character',\n success_url = reverse_lazy('characters:character_list')\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the\n character they are setting active\n \"\"\"\n with transaction.atomic():\n character = self.model.objects.get(pk=self.kwargs['pk'])\n character.cp_available += character.cp_spent\n character.cp_spent = 0\n character.save(update_fields=['cp_available', 'cp_spent'])\n character.characterskills_set.all().delete()\n character.headers.clear()\n messages.info(self.request, 'Character skills reset for {}.'.format\n (character.name))\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER',\n reverse('characters:character_detail', kwargs={'pk': self.\n kwargs['pk']})))\n\n\nclass CharacterSetActiveView(LoginRequiredMixin, UserPassesTestMixin, View):\n \"\"\"\n Set the active character for the characters player to the sent id.\n \"\"\"\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the\n character they are setting active\n \"\"\"\n character = self.model.objects.get(pk=self.kwargs['pk'])\n character.player.character_set.update(active_flag=False)\n character.active_flag = True\n character.save()\n messages.info(self.request, 'Active Character changed to {}.'.\n format(character.name))\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER',\n reverse('characters:character_detail', kwargs={'pk': self.\n kwargs['pk']})))\n\n\nclass CharacterSkillUpdateView(LoginRequiredMixin, UserPassesTestMixin,\n FormMixin, DetailView):\n \"\"\"\n Allow a user to update their chosen skills\n \"\"\"\n template_name = 'characters/character_skill_form.html'\n form_class = CharacterSkillForm\n model = Character\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n self.skills = Header.objects.order_by('hidden_flag', 'category', 'name'\n ).all()\n kwargs.update({'skills': self.skills})\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**self.kwargs)\n available_skills = self.object.skillhash.keys()\n context['skills'] = filter(lambda x: x.id in available_skills or\n self.request.user.has_perm('player.view_any_player'), self.skills)\n context['skill_hash'] = self.object.skillhash\n context['granted_skills'] = self.object.skill_grants\n return context\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n def form_valid(self, form):\n \"\"\"\n Form is valid. Save the skills to that character and remove the\n appropriate number of characters points.\n \"\"\"\n return super().form_valid(form)\n\n\nclass ResetPointsView(PermissionRequiredMixin, View):\n \"\"\"\n Resets the points for the season.\n \"\"\"\n permission_required = 'characters.reset_points',\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the main \n page if the referrer isn't set.\n \"\"\"\n Character.objects.all().update(cp_transferred=0)\n messages.info(self.request, 'Point cap reset!')\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER', '/'))\n\n\n<mask token>\n\n\nclass CharacterAddHeaderView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n content = {'error': 'prerequisites not met'}\n status = None\n if character.check_header_prerequisites(header):\n if cp_available - header.cost >= 0:\n character.cp_available -= header.cost\n character.cp_spent += header.cost\n character.headers.add(header)\n character.save()\n skill_item_template_string = render_to_string(\n 'characters/includes/character_skill_update_item.html',\n {'header': header, 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]}, request)\n content = {'success': header.cost * -1, 'skills':\n skill_item_template_string}\n else:\n content = {'error':\n \"You don't have enough points available for this character to add this header.\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDropHeaderView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n content = {'error': 'Header is not already bought!'}\n status = None\n content['header_list'] = []\n if header in character.headers.all():\n print(\n f'Header present! Dropping and adding back in {header.cost} CP...'\n )\n character.cp_available += header.cost\n character.cp_spent -= header.cost\n character.headers.remove(header)\n skill_item_template_string = render_to_string(\n 'characters/includes/character_skill_update_item.html', {\n 'header': header, 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]}, request)\n content = {'success': header.cost}\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterAddSkillView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n skill_id = int(request.POST.get('skill_id', 0))\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n try:\n vector = int(request.POST.get('vector'))\n except AttributeError:\n return {'error': 'No change indicated'}\n header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id\n =header_id)\n character = Character.objects.get(pk=character_id)\n content = {'success': 'testing right now'}\n status = None\n if character.check_skill_prerequisites(header_skill.skill,\n header_skill.header):\n cost = character.skill_cost(header_skill) * vector\n if cp_available - cost >= 0:\n character_skill, created = (character.characterskills_set.\n get_or_create(skill=header_skill))\n if (character_skill.count and character_skill.count +\n vector < 0):\n content = {'error':\n f\"You don't have any points in {header_skill.skill}\"}\n status = HTTP_412_PRECONDITION_FAILED\n else:\n content = {'success': cost * -1}\n character_skill.count = F('count') + vector\n character_skill.save()\n character.cp_spent = F('cp_spent') + cost\n character.cp_available = F('cp_available') - cost\n character.save()\n else:\n content = {'error':\n \"You don't have enough points available to purchase this skill . . .\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):\n \"\"\"\n Show the details for a character.\n\n From here you can edit the details of a character or choose skills.\n \"\"\"\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterConceptApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the concept for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterConceptApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.concept_approved_flag = True\n self.object.save(update_fields=['concept_approved_flag'])\n messages.info(self.request, f'{self.object} concept approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterHistoryApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the history for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterHistoryApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.history_approved_flag = True\n self.object.save(update_fields=['history_approved_flag'])\n messages.info(self.request, f'{self.object} history approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show the list of characters.\n\n From here, you can view, edit, delete a character.\n \"\"\"\n model = Character\n paginate_by = 25\n\n def get_queryset(self):\n queryset = super().get_queryset()\n criteria = self.request.GET.get('criteria', '')\n if criteria.strip():\n entry_query = get_query(criteria, ['name', 'description',\n 'concept', 'history', 'player_notes'])\n queryset = queryset.filter(entry_query)\n history_approved_flag = self.request.GET.get('history_approved_flag',\n False)\n if history_approved_flag:\n queryset = queryset.filter(history_approved_flag=True)\n concept_approved_flag = self.request.GET.get('concept_approved_flag',\n False)\n if concept_approved_flag:\n queryset = queryset.filter(concept_approved_flag=True)\n return queryset\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Add the form so we can filter the characters.\n \"\"\"\n context_data = super().get_context_data(**kwargs)\n context_data.update(**self.request.GET)\n return context_data\n\n\nclass CharacterPrintListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show a list of characters to print.\n\n \"\"\"\n model = Character\n template_name = 'characters/character_print_list.html'\n\n def get_queryset(self):\n queryset = super().get_queryset()\n event_id = self.kwargs.get('event_id', None)\n if not event_id:\n event_id = Event.next_event().id\n player_ids = Registration.objects.filter(event__id=event_id\n ).values_list('player_id', flat=True)\n queryset = queryset.filter(player__id__in=player_ids, npc_flag=\n False, active_flag=True)\n return queryset\n",
"step-5": "\"\"\"These are views that are used for viewing and editing characters.\"\"\"\n\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import UserPassesTestMixin,\\\n LoginRequiredMixin, PermissionRequiredMixin\nfrom django.db import transaction\nfrom django.db.models import F\nfrom django.http import HttpResponseRedirect\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse, reverse_lazy\nfrom django.views import View\nfrom django.views.generic.edit import FormMixin, CreateView, UpdateView\nfrom django.views.generic import DeleteView, DetailView, FormView, ListView\n\nfrom rest_framework.status import HTTP_412_PRECONDITION_FAILED\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.permissions import BasePermission\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\n\nfrom talesofvalor import get_query\nfrom talesofvalor.events.models import Event\nfrom talesofvalor.players.models import Registration\nfrom talesofvalor.skills.models import Header, HeaderSkill\n\nfrom .models import Character\nfrom .forms import CharacterForm, CharacterSkillForm,\\\n CharacterConceptApproveForm, CharacterHistoryApproveForm\n\n\nclass OwnsCharacter(BasePermission):\n \"\"\"\n The current user is staff or owns the that is being manipulated.\n \"\"\"\n message = \"You don't own this character\"\n\n def has_object_permission(self, request, view, obj):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return (player.user == self.request.user)\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterCreateView(LoginRequiredMixin, CreateView):\n model = Character\n form_class = CharacterForm\n\n def get_initial(self):\n # Get the initial dictionary from the superclass method\n initial = super(CharacterCreateView, self).get_initial()\n # Copy the dictionary so we don't accidentally change a mutable dict\n initial = initial.copy()\n # default to getting the player from the query String.\n try:\n initial['player'] = self.request.GET['player']\n except KeyError:\n initial['player'] = self.request.user.player\n # etc...\n return initial\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['user'] = self.request.user # pass the 'user' in kwargs\n return kwargs\n\n def get_success_url(self):\n return reverse(\n 'characters:character_skill_update',\n kwargs={'pk': self.object.pk}\n )\n\n def form_valid(self, form):\n \"\"\"\n If this form is valid, then add the current player to the character\n if the current user is not an admin\n\n If the user doesn't have any other active characters, set this one\n to active.\n \"\"\"\n if not self.request.user.has_perm('players.view_any_player'):\n form.instance.player = self.request.user.player\n\n if not form.instance.player.character_set.filter(active_flag=True).exists():\n form.instance.active_flag = True\n\n messages.info(self.request, 'New Character, \"{}\" created.'.format(\n form.instance.name\n ))\n return super().form_valid(form)\n\n\nclass CharacterUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = Character\n form_class = CharacterForm\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return (player.user == self.request.user)\n except Character.DoesNotExist:\n return False\n return False\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['user'] = self.request.user # pass the 'user' in kwargs\n return kwargs\n\n def get_success_url(self):\n return reverse(\n 'characters:character_detail',\n kwargs={'pk': self.object.pk}\n )\n\n\nclass CharacterDeleteView(\n PermissionRequiredMixin,\n UserPassesTestMixin,\n DeleteView\n ):\n \"\"\"\n Removes a character permanantly.\n\n Removing a character may have strange effects on other views.\n \"\"\"\n\n model = Character\n permission_required = ('characters.change_character', )\n success_url = reverse_lazy('characters:character_list')\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return (player.user == self.request.user)\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterResetView(\n PermissionRequiredMixin,\n UserPassesTestMixin,\n View\n ):\n \"\"\"\n Resets a characters skills to none and returns their points to them.\n \"\"\"\n\n model = Character\n permission_required = ('characters.change_character', )\n success_url = reverse_lazy('characters:character_list')\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return (player.user == self.request.user)\n except Character.DoesNotExist:\n return False\n return False\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the\n character they are setting active\n \"\"\"\n\n with transaction.atomic():\n character = self.model.objects.get(pk=self.kwargs['pk'])\n character.cp_available += character.cp_spent\n character.cp_spent = 0\n character.save(update_fields=['cp_available', 'cp_spent'])\n character.characterskills_set.all().delete()\n character.headers.clear()\n messages.info(self.request, 'Character skills reset for {}.'.format(\n character.name\n ))\n return HttpResponseRedirect(\n self.request.META.get(\n 'HTTP_REFERER',\n reverse(\n 'characters:character_detail',\n kwargs={'pk': self.kwargs['pk']}\n )\n )\n )\n\n\nclass CharacterSetActiveView(\n LoginRequiredMixin,\n UserPassesTestMixin,\n View\n ):\n \"\"\"\n Set the active character for the characters player to the sent id.\n \"\"\"\n\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return (player.user == self.request.user)\n except Character.DoesNotExist:\n return False\n return False\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the\n character they are setting active\n \"\"\"\n\n character = self.model.objects.get(pk=self.kwargs['pk'])\n character.player.character_set.update(active_flag=False)\n character.active_flag = True\n character.save()\n messages.info(self.request, 'Active Character changed to {}.'.format(\n character.name\n ))\n return HttpResponseRedirect(\n self.request.META.get(\n 'HTTP_REFERER',\n reverse(\n 'characters:character_detail',\n kwargs={'pk': self.kwargs['pk']}\n )\n )\n )\n\n\nclass CharacterSkillUpdateView(\n LoginRequiredMixin,\n UserPassesTestMixin,\n FormMixin,\n DetailView):\n \"\"\"\n Allow a user to update their chosen skills\n \"\"\"\n\n template_name = 'characters/character_skill_form.html'\n form_class = CharacterSkillForm\n model = Character\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return (player.user == self.request.user)\n except Character.DoesNotExist:\n return False\n return False\n\n def get_success_url(self):\n return reverse(\n 'characters:character_detail',\n kwargs={'pk': self.object.pk}\n )\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n self.skills = Header.objects\\\n .order_by('hidden_flag', 'category', 'name')\\\n .all()\n kwargs.update({'skills': self.skills})\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**self.kwargs)\n\n # remove skills not in the hash.\n available_skills = self.object.skillhash.keys()\n context['skills'] = filter(lambda x: x.id in available_skills or self.request.user.has_perm('player.view_any_player'), self.skills)\n context['skill_hash'] = self.object.skillhash\n # add the bare skills granted by the rules\n context['granted_skills'] = self.object.skill_grants\n return context\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n def form_valid(self, form):\n \"\"\"\n Form is valid. Save the skills to that character and remove the\n appropriate number of characters points.\n \"\"\"\n return super().form_valid(form)\n\n\nclass ResetPointsView(\n PermissionRequiredMixin,\n View\n ):\n \"\"\"\n Resets the points for the season.\n \"\"\"\n\n permission_required = ('characters.reset_points', )\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the main \n page if the referrer isn't set.\n \"\"\"\n Character.objects.all().update(cp_transferred=0)\n messages.info(self.request, 'Point cap reset!')\n return HttpResponseRedirect(\n self.request.META.get(\n 'HTTP_REFERER',\n '/'\n )\n )\n\n\n'''\nPut the AJAX work for Characters here\n'''\n\n\nclass CharacterAddHeaderView(APIView):\n '''\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n '''\n\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n # get the character and then see if the header is allowed\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n # Default to error.\n content = {\n 'error': \"prerequisites not met\"\n }\n status = None\n # if the prerequisites are met, add the header to the user and return\n # the list of skills\n if character.check_header_prerequisites(header):\n # see if the character has enough points to add the header\n if (cp_available - header.cost) >= 0:\n character.cp_available -= header.cost\n character.cp_spent += header.cost\n character.headers.add(header)\n character.save()\n skill_item_template_string = render_to_string(\n \"characters/includes/character_skill_update_item.html\",\n {\n 'header': header,\n 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]\n },\n request\n )\n content = {\n 'success': header.cost * -1,\n 'skills': skill_item_template_string\n }\n else: \n content = {\n 'error': \"You don't have enough points available for this character to add this header.\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDropHeaderView(APIView):\n '''\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n '''\n\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n # get the character and header\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n # Default to error.\n content = {\n 'error': \"Header is not already bought!\"\n }\n status = None\n # if the character has the header, drop it and refund the CP\n content['header_list'] = []\n\n if header in character.headers.all():\n print(f'Header present! Dropping and adding back in {header.cost} CP...')\n character.cp_available += header.cost\n character.cp_spent -= header.cost\n character.headers.remove(header)\n skill_item_template_string = render_to_string(\n \"characters/includes/character_skill_update_item.html\",\n {\n 'header': header,\n 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]\n },\n request\n )\n content = {\n 'success': header.cost,\n }\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterAddSkillView(APIView):\n '''\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n '''\n\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n skill_id = int(request.POST.get('skill_id', 0))\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n try:\n vector = int(request.POST.get('vector'))\n except AttributeError:\n return {\n 'error': \"No change indicated\"\n }\n # get the character and then see if the skill is allowed\n header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id=header_id)\n character = Character.objects.get(pk=character_id)\n # check that the skill is allowed.\n # if the prerequisites are met, add the header to the user and return\n # the list of skills\n # otherwise, return an error\n content = {\n 'success': \"testing right now\"\n }\n status = None\n if character.check_skill_prerequisites(header_skill.skill, header_skill.header):\n # since vector is the direction, we want to reverse it when\n # dealing with what we want to change for the available points\n # see if the character has enough points to add the header\n cost = character.skill_cost(header_skill) * vector\n if (cp_available - cost) >= 0:\n # when this is returned, change the available costs\n (character_skill, created) = character.characterskills_set.get_or_create(\n skill=header_skill\n )\n if character_skill.count and (character_skill.count + vector < 0):\n content = {\n 'error': f\"You don't have any points in {header_skill.skill}\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else: \n content = {\n 'success': cost * -1\n }\n character_skill.count = F('count') + vector\n character_skill.save()\n character.cp_spent = F('cp_spent') + cost\n character.cp_available = F('cp_available') - cost\n character.save()\n else: \n content = {\n 'error': \"You don't have enough points available to purchase this skill . . .\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):\n \"\"\"\n Show the details for a character.\n\n From here you can edit the details of a character or choose skills.\n \"\"\"\n\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return (player.user == self.request.user)\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterConceptApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the concept for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterConceptApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data['character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.concept_approved_flag = True\n self.object.save(update_fields=['concept_approved_flag'])\n messages.info(self.request, f\"{self.object} concept approved!\")\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data['character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse(\n 'characters:character_detail',\n kwargs={'pk': self.object.pk}\n ))\n\n def get_success_url(self):\n return reverse(\n 'characters:character_detail',\n kwargs={'pk': self.object.pk}\n ) \n\n\nclass CharacterHistoryApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the history for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterHistoryApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data['character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.history_approved_flag = True\n self.object.save(update_fields=['history_approved_flag'])\n messages.info(self.request, f\"{self.object} history approved!\")\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data['character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse(\n 'characters:character_detail',\n kwargs={'pk': self.object.pk}\n ))\n\n def get_success_url(self):\n return reverse(\n 'characters:character_detail',\n kwargs={'pk': self.object.pk}\n ) \n\n\nclass CharacterListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show the list of characters.\n\n From here, you can view, edit, delete a character.\n \"\"\"\n\n model = Character\n paginate_by = 25\n\n def get_queryset(self):\n queryset = super().get_queryset()\n criteria = self.request.GET.get('criteria', '')\n if (criteria.strip()):\n entry_query = get_query(\n criteria,\n ['name', 'description', 'concept', 'history', 'player_notes']\n )\n queryset = queryset.filter(entry_query)\n history_approved_flag = self.request.GET.get('history_approved_flag', False)\n if history_approved_flag:\n queryset = queryset.filter(history_approved_flag=True)\n concept_approved_flag = self.request.GET.get('concept_approved_flag', False)\n if concept_approved_flag:\n queryset = queryset.filter(concept_approved_flag=True)\n return queryset\n\n def get_context_data(self, **kwargs):\n '''\n Add the form so we can filter the characters.\n '''\n # get the context data to add to.\n context_data = super().get_context_data(**kwargs)\n context_data.update(**self.request.GET)\n # return the resulting context\n return context_data\n\n\nclass CharacterPrintListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show a list of characters to print.\n\n \"\"\"\n\n model = Character\n template_name = \"characters/character_print_list.html\"\n\n def get_queryset(self):\n queryset = super().get_queryset() # filter by event\n event_id = self.kwargs.get('event_id', None)\n if not event_id:\n event_id = Event.next_event().id\n player_ids = Registration.objects.filter(event__id=event_id).values_list('player_id', flat=True)\n queryset = queryset.filter(player__id__in=player_ids, npc_flag=False, active_flag=True)\n \n return queryset\n",
"step-ids": [
33,
48,
59,
68,
81
]
}
|
[
33,
48,
59,
68,
81
] |
<|reserved_special_token_0|>
class TestTNSWatcher:
<|reserved_special_token_0|>
@pytest.mark.xfail(raises=pandas.errors.ParserError)
def test_tns_watcher(self):
log('Connecting to DB')
mongo = Mongo(host=config['database']['host'], port=config[
'database']['port'], replica_set=config['database'][
'replica_set'], username=config['database']['username'],
password=config['database']['password'], db=config['database'][
'db'], verbose=True)
log('Successfully connected')
collection = config['database']['collections']['tns']
log('Grabbing most recent object from the TNS and ingesting that into the database'
)
get_tns(grab_all=False, test=True)
log('Done')
fetched_entries = list(mongo.db[collection].find({}, {'_id': 1}))
assert len(fetched_entries) > 0
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestTNSWatcher:
"""
Test TNS monitoring
"""
@pytest.mark.xfail(raises=pandas.errors.ParserError)
def test_tns_watcher(self):
log('Connecting to DB')
mongo = Mongo(host=config['database']['host'], port=config[
'database']['port'], replica_set=config['database'][
'replica_set'], username=config['database']['username'],
password=config['database']['password'], db=config['database'][
'db'], verbose=True)
log('Successfully connected')
collection = config['database']['collections']['tns']
log('Grabbing most recent object from the TNS and ingesting that into the database'
)
get_tns(grab_all=False, test=True)
log('Done')
fetched_entries = list(mongo.db[collection].find({}, {'_id': 1}))
assert len(fetched_entries) > 0
<|reserved_special_token_1|>
<|reserved_special_token_0|>
config = load_config(config_file='config.yaml')['kowalski']
class TestTNSWatcher:
"""
Test TNS monitoring
"""
@pytest.mark.xfail(raises=pandas.errors.ParserError)
def test_tns_watcher(self):
log('Connecting to DB')
mongo = Mongo(host=config['database']['host'], port=config[
'database']['port'], replica_set=config['database'][
'replica_set'], username=config['database']['username'],
password=config['database']['password'], db=config['database'][
'db'], verbose=True)
log('Successfully connected')
collection = config['database']['collections']['tns']
log('Grabbing most recent object from the TNS and ingesting that into the database'
)
get_tns(grab_all=False, test=True)
log('Done')
fetched_entries = list(mongo.db[collection].find({}, {'_id': 1}))
assert len(fetched_entries) > 0
<|reserved_special_token_1|>
import pandas
import pytest
from tns_watcher import get_tns
from utils import load_config, log, Mongo
<|reserved_special_token_0|>
config = load_config(config_file='config.yaml')['kowalski']
class TestTNSWatcher:
"""
Test TNS monitoring
"""
@pytest.mark.xfail(raises=pandas.errors.ParserError)
def test_tns_watcher(self):
log('Connecting to DB')
mongo = Mongo(host=config['database']['host'], port=config[
'database']['port'], replica_set=config['database'][
'replica_set'], username=config['database']['username'],
password=config['database']['password'], db=config['database'][
'db'], verbose=True)
log('Successfully connected')
collection = config['database']['collections']['tns']
log('Grabbing most recent object from the TNS and ingesting that into the database'
)
get_tns(grab_all=False, test=True)
log('Done')
fetched_entries = list(mongo.db[collection].find({}, {'_id': 1}))
assert len(fetched_entries) > 0
<|reserved_special_token_1|>
import pandas
import pytest
from tns_watcher import get_tns
from utils import load_config, log, Mongo
""" load config and secrets """
config = load_config(config_file="config.yaml")["kowalski"]
class TestTNSWatcher:
"""
Test TNS monitoring
"""
@pytest.mark.xfail(raises=pandas.errors.ParserError)
def test_tns_watcher(self):
log("Connecting to DB")
mongo = Mongo(
host=config["database"]["host"],
port=config["database"]["port"],
replica_set=config["database"]["replica_set"],
username=config["database"]["username"],
password=config["database"]["password"],
db=config["database"]["db"],
verbose=True,
)
log("Successfully connected")
collection = config["database"]["collections"]["tns"]
log(
"Grabbing most recent object from the TNS and ingesting that into the database"
)
get_tns(
grab_all=False,
test=True,
)
log("Done")
fetched_entries = list(mongo.db[collection].find({}, {"_id": 1}))
assert len(fetched_entries) > 0
|
flexible
|
{
"blob_id": "e7ffa852d16e8e55b4e2b6ab2383561fe359a169",
"index": 1778,
"step-1": "<mask token>\n\n\nclass TestTNSWatcher:\n <mask token>\n\n @pytest.mark.xfail(raises=pandas.errors.ParserError)\n def test_tns_watcher(self):\n log('Connecting to DB')\n mongo = Mongo(host=config['database']['host'], port=config[\n 'database']['port'], replica_set=config['database'][\n 'replica_set'], username=config['database']['username'],\n password=config['database']['password'], db=config['database'][\n 'db'], verbose=True)\n log('Successfully connected')\n collection = config['database']['collections']['tns']\n log('Grabbing most recent object from the TNS and ingesting that into the database'\n )\n get_tns(grab_all=False, test=True)\n log('Done')\n fetched_entries = list(mongo.db[collection].find({}, {'_id': 1}))\n assert len(fetched_entries) > 0\n",
"step-2": "<mask token>\n\n\nclass TestTNSWatcher:\n \"\"\"\n Test TNS monitoring\n \"\"\"\n\n @pytest.mark.xfail(raises=pandas.errors.ParserError)\n def test_tns_watcher(self):\n log('Connecting to DB')\n mongo = Mongo(host=config['database']['host'], port=config[\n 'database']['port'], replica_set=config['database'][\n 'replica_set'], username=config['database']['username'],\n password=config['database']['password'], db=config['database'][\n 'db'], verbose=True)\n log('Successfully connected')\n collection = config['database']['collections']['tns']\n log('Grabbing most recent object from the TNS and ingesting that into the database'\n )\n get_tns(grab_all=False, test=True)\n log('Done')\n fetched_entries = list(mongo.db[collection].find({}, {'_id': 1}))\n assert len(fetched_entries) > 0\n",
"step-3": "<mask token>\nconfig = load_config(config_file='config.yaml')['kowalski']\n\n\nclass TestTNSWatcher:\n \"\"\"\n Test TNS monitoring\n \"\"\"\n\n @pytest.mark.xfail(raises=pandas.errors.ParserError)\n def test_tns_watcher(self):\n log('Connecting to DB')\n mongo = Mongo(host=config['database']['host'], port=config[\n 'database']['port'], replica_set=config['database'][\n 'replica_set'], username=config['database']['username'],\n password=config['database']['password'], db=config['database'][\n 'db'], verbose=True)\n log('Successfully connected')\n collection = config['database']['collections']['tns']\n log('Grabbing most recent object from the TNS and ingesting that into the database'\n )\n get_tns(grab_all=False, test=True)\n log('Done')\n fetched_entries = list(mongo.db[collection].find({}, {'_id': 1}))\n assert len(fetched_entries) > 0\n",
"step-4": "import pandas\nimport pytest\nfrom tns_watcher import get_tns\nfrom utils import load_config, log, Mongo\n<mask token>\nconfig = load_config(config_file='config.yaml')['kowalski']\n\n\nclass TestTNSWatcher:\n \"\"\"\n Test TNS monitoring\n \"\"\"\n\n @pytest.mark.xfail(raises=pandas.errors.ParserError)\n def test_tns_watcher(self):\n log('Connecting to DB')\n mongo = Mongo(host=config['database']['host'], port=config[\n 'database']['port'], replica_set=config['database'][\n 'replica_set'], username=config['database']['username'],\n password=config['database']['password'], db=config['database'][\n 'db'], verbose=True)\n log('Successfully connected')\n collection = config['database']['collections']['tns']\n log('Grabbing most recent object from the TNS and ingesting that into the database'\n )\n get_tns(grab_all=False, test=True)\n log('Done')\n fetched_entries = list(mongo.db[collection].find({}, {'_id': 1}))\n assert len(fetched_entries) > 0\n",
"step-5": "import pandas\nimport pytest\n\nfrom tns_watcher import get_tns\nfrom utils import load_config, log, Mongo\n\n\n\"\"\" load config and secrets \"\"\"\nconfig = load_config(config_file=\"config.yaml\")[\"kowalski\"]\n\n\nclass TestTNSWatcher:\n \"\"\"\n Test TNS monitoring\n \"\"\"\n\n @pytest.mark.xfail(raises=pandas.errors.ParserError)\n def test_tns_watcher(self):\n log(\"Connecting to DB\")\n mongo = Mongo(\n host=config[\"database\"][\"host\"],\n port=config[\"database\"][\"port\"],\n replica_set=config[\"database\"][\"replica_set\"],\n username=config[\"database\"][\"username\"],\n password=config[\"database\"][\"password\"],\n db=config[\"database\"][\"db\"],\n verbose=True,\n )\n log(\"Successfully connected\")\n\n collection = config[\"database\"][\"collections\"][\"tns\"]\n\n log(\n \"Grabbing most recent object from the TNS and ingesting that into the database\"\n )\n get_tns(\n grab_all=False,\n test=True,\n )\n log(\"Done\")\n\n fetched_entries = list(mongo.db[collection].find({}, {\"_id\": 1}))\n\n assert len(fetched_entries) > 0\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
options.register('file', '', VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string, 'File path for storing output')
options.parseArguments()
<|reserved_special_token_0|>
process.load('FWCore.MessageService.MessageLogger_cfi')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
options = VarParsing.VarParsing()
options.register('file', '', VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string, 'File path for storing output')
options.parseArguments()
file_path = options.file
process = cms.Process('RawAnalyzer')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))
process.source = cms.Source('PoolSource', fileNames=cms.untracked.vstring(
'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'
))
process.analyzer = cms.EDAnalyzer('RawAnalyzer', debugit=cms.untracked.bool
(False), outputFile=cms.untracked.string(file_path), badevlist=cms.
vint32(153647285, 152905909, 153143477, 153217205, 151718625, 153024693,
150641153, 151460577, 152364043, 152889525, 153151669, 151148928,
153471157, 149944833, 151407329, 152529024, 150403585, 151124352,
152368139, 152451200, 152950965, 153135285, 154125042, 154268402,
152261643, 150718977, 152737973, 153409717, 153800866, 151321313,
152910005, 153348277, 154002162, 149846529, 150489601, 150526465,
151370465, 152959157, 153262261, 153916146, 150202881, 152750261,
153004213), modval=cms.untracked.int32(112))
process.TFileService = cms.Service('TFileService', fileName=cms.string(
'RawAnalyzer.root'))
process.MessageLogger.cerr.FwkReport.reportEvery = 2000
process.p = cms.Path(process.analyzer)
<|reserved_special_token_1|>
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
options = VarParsing.VarParsing()
options.register('file', '', VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string, 'File path for storing output')
options.parseArguments()
file_path = options.file
process = cms.Process('RawAnalyzer')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))
process.source = cms.Source('PoolSource', fileNames=cms.untracked.vstring(
'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'
))
process.analyzer = cms.EDAnalyzer('RawAnalyzer', debugit=cms.untracked.bool
(False), outputFile=cms.untracked.string(file_path), badevlist=cms.
vint32(153647285, 152905909, 153143477, 153217205, 151718625, 153024693,
150641153, 151460577, 152364043, 152889525, 153151669, 151148928,
153471157, 149944833, 151407329, 152529024, 150403585, 151124352,
152368139, 152451200, 152950965, 153135285, 154125042, 154268402,
152261643, 150718977, 152737973, 153409717, 153800866, 151321313,
152910005, 153348277, 154002162, 149846529, 150489601, 150526465,
151370465, 152959157, 153262261, 153916146, 150202881, 152750261,
153004213), modval=cms.untracked.int32(112))
process.TFileService = cms.Service('TFileService', fileName=cms.string(
'RawAnalyzer.root'))
process.MessageLogger.cerr.FwkReport.reportEvery = 2000
process.p = cms.Path(process.analyzer)
<|reserved_special_token_1|>
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
options = VarParsing.VarParsing()
options.register(
'file','',VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
'File path for storing output')
options.parseArguments()
file_path = options.file
#print file_path
process = cms.Process("RawAnalyzer")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) # -1 means run on all events
#default is HcalTBSource but you can change to PoolSource if you like
#process.source = cms.Source("HcalTBSource",
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
#'root://eoscms//eos/cms/store/data/Run2015B/SingleMuon/RAW/v1/000/251/162/00000/0050EEC0-AD25-E511-9A32-02163E011962.root'
# 'file:/afs/cern.ch/user/d/drew/USC_223708.root'
# 'file:/afs/cern.ch/user/d/drew/USC_223495.root' #HO pedestal
# '/store/group/comm_hcal/LS1/USC_223495.root' #HO pedestal, local
# '/store/group/comm_hcal/LS1/USC_222759.root'
# '/store/group/comm_hcal/LS1/USC_223775.root'
# '/store/group/comm_hcal/LS1/USC_224285.root' #not sure, takend 31/7/2014
# '/store/group/comm_hcal/LS1/USC_224625.root'
'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/20C23681-852B-E511-9FBC-02163E01413E.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/369E8A59-802B-E511-B85E-02163E01259F.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/488F97C1-8F2B-E511-86B8-02163E0144D2.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/FAE69354-7E2B-E511-80D7-02163E0125C8.root'
)
)
process.analyzer = cms.EDAnalyzer('RawAnalyzer',
debugit = cms.untracked.bool(False),
outputFile = cms.untracked.string(file_path),
badevlist = cms.vint32(
153647285, 152905909, 153143477, 153217205, 151718625, 153024693, 150641153, 151460577,
152364043, 152889525, 153151669, 151148928, 153471157, 149944833, 151407329, 152529024,
150403585, 151124352, 152368139, 152451200, 152950965, 153135285, 154125042, 154268402,
152261643, 150718977, 152737973, 153409717, 153800866, 151321313, 152910005, 153348277,
154002162, 149846529, 150489601, 150526465, 151370465, 152959157, 153262261, 153916146,
150202881, 152750261, 153004213),
modval = cms.untracked.int32(112)
)
process.TFileService = cms.Service("TFileService",fileName = cms.string("RawAnalyzer.root") )
process.MessageLogger.cerr.FwkReport.reportEvery = 2000 #type out ever <n> events
process.p = cms.Path(process.analyzer)
|
flexible
|
{
"blob_id": "6aff61ce5cef537e6b1b19e382d8bf80e3a61693",
"index": 1423,
"step-1": "<mask token>\n",
"step-2": "<mask token>\noptions.register('file', '', VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string, 'File path for storing output')\noptions.parseArguments()\n<mask token>\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\n<mask token>\n",
"step-3": "<mask token>\noptions = VarParsing.VarParsing()\noptions.register('file', '', VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string, 'File path for storing output')\noptions.parseArguments()\nfile_path = options.file\nprocess = cms.Process('RawAnalyzer')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))\nprocess.source = cms.Source('PoolSource', fileNames=cms.untracked.vstring(\n 'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'\n ))\nprocess.analyzer = cms.EDAnalyzer('RawAnalyzer', debugit=cms.untracked.bool\n (False), outputFile=cms.untracked.string(file_path), badevlist=cms.\n vint32(153647285, 152905909, 153143477, 153217205, 151718625, 153024693,\n 150641153, 151460577, 152364043, 152889525, 153151669, 151148928, \n 153471157, 149944833, 151407329, 152529024, 150403585, 151124352, \n 152368139, 152451200, 152950965, 153135285, 154125042, 154268402, \n 152261643, 150718977, 152737973, 153409717, 153800866, 151321313, \n 152910005, 153348277, 154002162, 149846529, 150489601, 150526465, \n 151370465, 152959157, 153262261, 153916146, 150202881, 152750261, \n 153004213), modval=cms.untracked.int32(112))\nprocess.TFileService = cms.Service('TFileService', fileName=cms.string(\n 'RawAnalyzer.root'))\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 2000\nprocess.p = cms.Path(process.analyzer)\n",
"step-4": "import FWCore.ParameterSet.Config as cms\nimport FWCore.ParameterSet.VarParsing as VarParsing\noptions = VarParsing.VarParsing()\noptions.register('file', '', VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string, 'File path for storing output')\noptions.parseArguments()\nfile_path = options.file\nprocess = cms.Process('RawAnalyzer')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))\nprocess.source = cms.Source('PoolSource', fileNames=cms.untracked.vstring(\n 'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'\n ))\nprocess.analyzer = cms.EDAnalyzer('RawAnalyzer', debugit=cms.untracked.bool\n (False), outputFile=cms.untracked.string(file_path), badevlist=cms.\n vint32(153647285, 152905909, 153143477, 153217205, 151718625, 153024693,\n 150641153, 151460577, 152364043, 152889525, 153151669, 151148928, \n 153471157, 149944833, 151407329, 152529024, 150403585, 151124352, \n 152368139, 152451200, 152950965, 153135285, 154125042, 154268402, \n 152261643, 150718977, 152737973, 153409717, 153800866, 151321313, \n 152910005, 153348277, 154002162, 149846529, 150489601, 150526465, \n 151370465, 152959157, 153262261, 153916146, 150202881, 152750261, \n 153004213), modval=cms.untracked.int32(112))\nprocess.TFileService = cms.Service('TFileService', fileName=cms.string(\n 'RawAnalyzer.root'))\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 2000\nprocess.p = cms.Path(process.analyzer)\n",
"step-5": "import FWCore.ParameterSet.Config as cms\nimport FWCore.ParameterSet.VarParsing as VarParsing\noptions = VarParsing.VarParsing()\noptions.register(\n\t'file','',VarParsing.VarParsing.multiplicity.singleton,\n\tVarParsing.VarParsing.varType.string,\n\t'File path for storing output')\noptions.parseArguments()\nfile_path = options.file\n#print file_path\n\nprocess = cms.Process(\"RawAnalyzer\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) # -1 means run on all events\n\n#default is HcalTBSource but you can change to PoolSource if you like\n#process.source = cms.Source(\"HcalTBSource\",\nprocess.source = cms.Source(\"PoolSource\",\n # replace 'myfile.root' with the source file you want to use\n fileNames = cms.untracked.vstring(\n#'root://eoscms//eos/cms/store/data/Run2015B/SingleMuon/RAW/v1/000/251/162/00000/0050EEC0-AD25-E511-9A32-02163E011962.root'\n# 'file:/afs/cern.ch/user/d/drew/USC_223708.root'\n# 'file:/afs/cern.ch/user/d/drew/USC_223495.root' #HO pedestal\n# '/store/group/comm_hcal/LS1/USC_223495.root' #HO pedestal, local\n# '/store/group/comm_hcal/LS1/USC_222759.root'\n# '/store/group/comm_hcal/LS1/USC_223775.root'\n#\t '/store/group/comm_hcal/LS1/USC_224285.root' #not sure, takend 31/7/2014\n# '/store/group/comm_hcal/LS1/USC_224625.root'\n'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'\n#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/20C23681-852B-E511-9FBC-02163E01413E.root'\n#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/369E8A59-802B-E511-B85E-02163E01259F.root'\n#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/488F97C1-8F2B-E511-86B8-02163E0144D2.root'\n#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/FAE69354-7E2B-E511-80D7-02163E0125C8.root'\n )\n)\n\nprocess.analyzer = cms.EDAnalyzer('RawAnalyzer',\n\tdebugit = cms.untracked.bool(False),\n\toutputFile = cms.untracked.string(file_path),\n\tbadevlist = cms.vint32(\n\t153647285,\t152905909,\t153143477,\t153217205,\t151718625,\t153024693,\t150641153,\t151460577,\n\t152364043,\t152889525,\t153151669,\t151148928,\t153471157,\t149944833,\t151407329,\t152529024,\n\t150403585,\t151124352,\t152368139,\t152451200,\t152950965,\t153135285,\t154125042,\t154268402,\n\t152261643,\t150718977,\t152737973,\t153409717,\t153800866,\t151321313,\t152910005,\t153348277,\n\t154002162,\t149846529,\t150489601,\t150526465,\t151370465,\t152959157,\t153262261,\t153916146,\n\t150202881,\t152750261, 153004213),\n\tmodval = cms.untracked.int32(112)\n)\nprocess.TFileService = cms.Service(\"TFileService\",fileName = cms.string(\"RawAnalyzer.root\") )\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 2000 #type out ever <n> events\nprocess.p = cms.Path(process.analyzer)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Graph:
def __init__(self, num_vertices):
self.adj_list = {}
for i in range(num_vertices):
self.adj_list[i] = []
def add_vertice(self, source):
self.adj_list[source] = []
def add_edge(self, source, dest):
self.adj_list[source].append(dest)
<|reserved_special_token_0|>
def topo_order(self):
def topo_util(source, visited, stack):
visited.add(source)
for neighbour in self.adj_list[source]:
if neighbour not in visited:
topo_util(neighbour, visited, stack)
stack.append(source)
visited, stack = set(), []
for vertex in self.adj_list.keys():
if vertex not in visited:
topo_util(vertex, visited, stack)
stack.reverse()
print(stack)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Graph:
def __init__(self, num_vertices):
self.adj_list = {}
for i in range(num_vertices):
self.adj_list[i] = []
def add_vertice(self, source):
self.adj_list[source] = []
def add_edge(self, source, dest):
self.adj_list[source].append(dest)
def print_graph(self):
print(self.adj_list)
print(self.adj_list.keys())
def topo_order(self):
def topo_util(source, visited, stack):
visited.add(source)
for neighbour in self.adj_list[source]:
if neighbour not in visited:
topo_util(neighbour, visited, stack)
stack.append(source)
visited, stack = set(), []
for vertex in self.adj_list.keys():
if vertex not in visited:
topo_util(vertex, visited, stack)
stack.reverse()
print(stack)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Graph:
def __init__(self, num_vertices):
self.adj_list = {}
for i in range(num_vertices):
self.adj_list[i] = []
def add_vertice(self, source):
self.adj_list[source] = []
def add_edge(self, source, dest):
self.adj_list[source].append(dest)
def print_graph(self):
print(self.adj_list)
print(self.adj_list.keys())
def topo_order(self):
def topo_util(source, visited, stack):
visited.add(source)
for neighbour in self.adj_list[source]:
if neighbour not in visited:
topo_util(neighbour, visited, stack)
stack.append(source)
visited, stack = set(), []
for vertex in self.adj_list.keys():
if vertex not in visited:
topo_util(vertex, visited, stack)
stack.reverse()
print(stack)
<|reserved_special_token_0|>
g.add_edge(5, 0)
g.add_edge(5, 2)
g.add_edge(2, 3)
g.add_edge(3, 1)
g.add_edge(4, 1)
g.add_edge(4, 0)
g.print_graph()
g.topo_order()
<|reserved_special_token_1|>
class Graph:
def __init__(self, num_vertices):
self.adj_list = {}
for i in range(num_vertices):
self.adj_list[i] = []
def add_vertice(self, source):
self.adj_list[source] = []
def add_edge(self, source, dest):
self.adj_list[source].append(dest)
def print_graph(self):
print(self.adj_list)
print(self.adj_list.keys())
def topo_order(self):
def topo_util(source, visited, stack):
visited.add(source)
for neighbour in self.adj_list[source]:
if neighbour not in visited:
topo_util(neighbour, visited, stack)
stack.append(source)
visited, stack = set(), []
for vertex in self.adj_list.keys():
if vertex not in visited:
topo_util(vertex, visited, stack)
stack.reverse()
print(stack)
g = Graph(6)
g.add_edge(5, 0)
g.add_edge(5, 2)
g.add_edge(2, 3)
g.add_edge(3, 1)
g.add_edge(4, 1)
g.add_edge(4, 0)
g.print_graph()
g.topo_order()
|
flexible
|
{
"blob_id": "ae5ec7919b9de4fbf578547c31837add32826f60",
"index": 7448,
"step-1": "class Graph:\n\n def __init__(self, num_vertices):\n self.adj_list = {}\n for i in range(num_vertices):\n self.adj_list[i] = []\n\n def add_vertice(self, source):\n self.adj_list[source] = []\n\n def add_edge(self, source, dest):\n self.adj_list[source].append(dest)\n <mask token>\n\n def topo_order(self):\n\n def topo_util(source, visited, stack):\n visited.add(source)\n for neighbour in self.adj_list[source]:\n if neighbour not in visited:\n topo_util(neighbour, visited, stack)\n stack.append(source)\n visited, stack = set(), []\n for vertex in self.adj_list.keys():\n if vertex not in visited:\n topo_util(vertex, visited, stack)\n stack.reverse()\n print(stack)\n\n\n<mask token>\n",
"step-2": "class Graph:\n\n def __init__(self, num_vertices):\n self.adj_list = {}\n for i in range(num_vertices):\n self.adj_list[i] = []\n\n def add_vertice(self, source):\n self.adj_list[source] = []\n\n def add_edge(self, source, dest):\n self.adj_list[source].append(dest)\n\n def print_graph(self):\n print(self.adj_list)\n print(self.adj_list.keys())\n\n def topo_order(self):\n\n def topo_util(source, visited, stack):\n visited.add(source)\n for neighbour in self.adj_list[source]:\n if neighbour not in visited:\n topo_util(neighbour, visited, stack)\n stack.append(source)\n visited, stack = set(), []\n for vertex in self.adj_list.keys():\n if vertex not in visited:\n topo_util(vertex, visited, stack)\n stack.reverse()\n print(stack)\n\n\n<mask token>\n",
"step-3": "class Graph:\n\n def __init__(self, num_vertices):\n self.adj_list = {}\n for i in range(num_vertices):\n self.adj_list[i] = []\n\n def add_vertice(self, source):\n self.adj_list[source] = []\n\n def add_edge(self, source, dest):\n self.adj_list[source].append(dest)\n\n def print_graph(self):\n print(self.adj_list)\n print(self.adj_list.keys())\n\n def topo_order(self):\n\n def topo_util(source, visited, stack):\n visited.add(source)\n for neighbour in self.adj_list[source]:\n if neighbour not in visited:\n topo_util(neighbour, visited, stack)\n stack.append(source)\n visited, stack = set(), []\n for vertex in self.adj_list.keys():\n if vertex not in visited:\n topo_util(vertex, visited, stack)\n stack.reverse()\n print(stack)\n\n\n<mask token>\ng.add_edge(5, 0)\ng.add_edge(5, 2)\ng.add_edge(2, 3)\ng.add_edge(3, 1)\ng.add_edge(4, 1)\ng.add_edge(4, 0)\ng.print_graph()\ng.topo_order()\n",
"step-4": "class Graph:\n\n def __init__(self, num_vertices):\n self.adj_list = {}\n for i in range(num_vertices):\n self.adj_list[i] = []\n\n def add_vertice(self, source):\n self.adj_list[source] = []\n\n def add_edge(self, source, dest):\n self.adj_list[source].append(dest)\n\n def print_graph(self):\n print(self.adj_list)\n print(self.adj_list.keys())\n\n def topo_order(self):\n\n def topo_util(source, visited, stack):\n visited.add(source)\n for neighbour in self.adj_list[source]:\n if neighbour not in visited:\n topo_util(neighbour, visited, stack)\n stack.append(source)\n visited, stack = set(), []\n for vertex in self.adj_list.keys():\n if vertex not in visited:\n topo_util(vertex, visited, stack)\n stack.reverse()\n print(stack)\n\n\ng = Graph(6)\ng.add_edge(5, 0)\ng.add_edge(5, 2)\ng.add_edge(2, 3)\ng.add_edge(3, 1)\ng.add_edge(4, 1)\ng.add_edge(4, 0)\ng.print_graph()\ng.topo_order()\n",
"step-5": null,
"step-ids": [
5,
6,
7,
8
]
}
|
[
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
SERVICE = MediathekViewService()
SERVICE.init()
SERVICE.run()
SERVICE.exit()
del SERVICE
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from resources.lib.service import MediathekViewService
if __name__ == '__main__':
SERVICE = MediathekViewService()
SERVICE.init()
SERVICE.run()
SERVICE.exit()
del SERVICE
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
"""
The main service module
MIT License
Copyright (c) 2017-2020, Leo Moll
"""
# -- Imports ------------------------------------------------
from resources.lib.service import MediathekViewService
# -- Main Code ----------------------------------------------
if __name__ == '__main__':
SERVICE = MediathekViewService()
SERVICE.init()
SERVICE.run()
SERVICE.exit()
del SERVICE
|
flexible
|
{
"blob_id": "e769e930ab8f0356116679bc38a09b83886eb8f6",
"index": 4003,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n SERVICE = MediathekViewService()\n SERVICE.init()\n SERVICE.run()\n SERVICE.exit()\n del SERVICE\n",
"step-3": "<mask token>\nfrom resources.lib.service import MediathekViewService\nif __name__ == '__main__':\n SERVICE = MediathekViewService()\n SERVICE.init()\n SERVICE.run()\n SERVICE.exit()\n del SERVICE\n",
"step-4": "# -*- coding: utf-8 -*-\n# SPDX-License-Identifier: MIT\n\"\"\"\nThe main service module\n\nMIT License\n\nCopyright (c) 2017-2020, Leo Moll\n\"\"\"\n\n\n\n# -- Imports ------------------------------------------------\nfrom resources.lib.service import MediathekViewService\n\n# -- Main Code ----------------------------------------------\nif __name__ == '__main__':\n SERVICE = MediathekViewService()\n SERVICE.init()\n SERVICE.run()\n SERVICE.exit()\n del SERVICE\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# coding=utf-8
import sys
if len(sys.argv) == 2:
filepath = sys.argv[1]
pRead = open(filepath,'r')#wordlist.txt
pWrite = open("..\\pro\\hmmsdef.mmf",'w')
time = 0
for line in pRead:
if line != '\n':
line = line[0: len(line) - 1] #去除最后的\n
if line == "sil ":
line = line[0: len(line) - 1]
print line
everyHmmfilepath = "..\\..\\model\\hmm3\\hmm_" + line + ".hmm"
pHmmRead = open(everyHmmfilepath,'r')
if time == 0:
pWrite.write(pHmmRead.read()) # read()读剩余全文
pWrite.write("\n")
time = 1
else:
for i in range(3):
pHmmRead.readline()
pWrite.write(pHmmRead.read())
pWrite.write("\n")
else :
print "the agres must be one"
|
normal
|
{
"blob_id": "9bd6da909baeb859153e3833f0f43d8cbcb66200",
"index": 9324,
"step-1": "# coding=utf-8\nimport sys\nif len(sys.argv) == 2:\n filepath = sys.argv[1]\n pRead = open(filepath,'r')#wordlist.txt\n pWrite = open(\"..\\\\pro\\\\hmmsdef.mmf\",'w')\n time = 0\n for line in pRead:\n if line != '\\n':\n line = line[0: len(line) - 1] #去除最后的\\n\n if line == \"sil \":\n line = line[0: len(line) - 1]\n print line\n everyHmmfilepath = \"..\\\\..\\\\model\\\\hmm3\\\\hmm_\" + line + \".hmm\"\n pHmmRead = open(everyHmmfilepath,'r')\n if time == 0:\n pWrite.write(pHmmRead.read()) # read()读剩余全文\n pWrite.write(\"\\n\")\n time = 1\n else:\n for i in range(3):\n pHmmRead.readline()\n pWrite.write(pHmmRead.read())\n pWrite.write(\"\\n\")\nelse :\n print \"the agres must be one\"",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def start(caller):
if not caller:
return
caller.ndb._menutree.points = {'attributes': 20, 'skills': 20}
caller.ndb._menutree.character = {'home_planet': None, 'full_name':
None, 'origin': None, 'stats': {}, 'age': 16, 'is_psionic': False,
'current_term': 0, 'species': 'human'}
caller.ndb._menutree.terms = []
for attribute in AttributeEnum:
caller.ndb._menutree.character['stats'][attribute.name] = 20
text = """
Welcome to Singularity's Character Generator!
Have a paragraph about WTF is going on and some info about our game. Also here are some warnings
that you *definitely* shouldn't make multiple characters. And also here's some commands to
help get you more info! TBD!!!
|yPlease do not make multiple characters to game chargen.|n
When you're ready, go ahead and like.. type |ybegin|n to start CharGen.
"""
return text, {'key': 'begin', 'goto': 'node_menu'}
<|reserved_special_token_0|>
def node_basics(caller):
character = caller.ndb._menutree.character
name = character['full_name']
if not name:
name = 'Not Set'
species = character['species']
origin = character['origin']
if not origin:
origin = 'Not Set'
age = character['age']
text = (
"""
|wFull Name:|n %s
|wAdolescent Age:|n %s
|wSpecies:|n %s
|wOrigin:|n %s
Type |yhelp <command>|n to get info on available choices.
"""
% (name, age, species, origin))
options = {'key': 'return', 'goto': 'node_menu'}, {'key': 'full_name',
'goto': _node_basics_full_name}, {'key': 'age', 'goto':
_node_basics_age}, {'key': 'species', 'goto': _node_basics_species}, {
'key': 'origin', 'goto': _node_basics_origin}
return text, options
def _node_basics_full_name(caller):
def callback(caller, prompt, user_input):
caller.msg("You set your character's full name to: %s." % user_input)
caller.ndb._menutree.character['full_name'] = user_input
get_input(caller, ">> Enter your character's full name.", callback)
def _node_basics_age(caller):
def callback(caller, prompt, user_input):
species = next(s for s in CHARGEN['species'] if s['key'] == caller.
ndb._menutree.character['species'])
if not user_input.is_integer() or int(user_input) < species[
'min_start_age'] or int(user_input) > species['max_start_age']:
caller.msg('Age must be a valid number between %s and %s.' % (
species['min_start_age'], species['max_start_age']))
return
caller.msg("You set your character's age to: %s." % user_input)
caller.ndb._menutree.character['age'] = int(user_input)
get_input(caller, ">> Enter your character's age.", callback)
<|reserved_special_token_0|>
def _is_attributes_done(caller):
if caller.ndb._menutree.points['attributes'] != 0:
return False, 'All attribute points must be allocated.'
return True, ''
def _is_basics_done(caller):
character = caller.ndb._menutree.character
name = character['full_name']
if not name or len(name) < 3:
return (False,
'Full name must have a value and be longer than 3 characters.')
origin = character['origin']
if not origin:
return False, 'Must select an origin.'
species_stats = next(s for s in CHARGEN['species'] if s['key'] ==
character['species'])
age = character['age']
if age < species_stats['min_start_age']:
return False, 'Age must be equal to or more than %s.' % species_stats[
'min_start_age']
if age > species_stats['max_start_age']:
return False, 'Age must be equal to or less than %s.' % species_stats[
'max_start_age']
return True, ''
def _is_skills_done(caller):
return False, ''
def _is_life_done(caller):
return False, ''
<|reserved_special_token_0|>
def node_attributes(caller):
text = ''
for attribute in AttributeEnum:
if (attribute == AttributeEnum.Psi and not caller.ndb._menutree.
character['is_psionic']):
continue
text += '%s: ' % attribute.name
text += '%s\r\n' % caller.ndb._menutree.character['stats'][attribute
.name]
text += '\r\n%s points remaining.\r\n' % caller.ndb._menutree.points[
'attributes']
text += (
'\r\nType "|yadd <number> to <attribute>|n" to adjust an attribute positively.'
)
text += (
'\r\nType "|ysub <number> from <attribute>|n" to adjust an attribute negatively.'
)
options = {'key': '_default', 'goto': _node_attributes}, {'key':
'return', 'goto': 'node_menu'}
return text, options
<|reserved_special_token_0|>
def node_terms(caller):
text = ''
term_count = 1
for term in caller.ndb._menutree.terms:
text += '\r\n* Term %s:' % term_count + ' %s' % term.title
term_count += 1
age = caller.ndb._menutree.character['age'
] + 4 * caller.ndb._menutree.character['current_term']
text += '\r\nCurrent Character Age: %s' % age
text += '\r\n\r\nType "|ychoose <term>|n" to begin a term.'
options = {'key': '_default', 'goto': _node_terms}, {'key':
'list choices', 'goto': _list_term_choices}, {'key': 'finish',
'goto': 'node_finish'}
return text, options
def _node_terms(caller, raw_string):
match = re.match('choose (\\w+)', raw_string)
if not match:
error(caller, "I didn't understand that.")
return 'node_terms'
term_token = match.group(1).lower()
term = next((x for x in TERMS if x['title'].lower().startswith(
term_token)), None)
if not term:
error(caller,
'%s is not a valid term. Type "|ylist choices|n" to get a list of all available careers.'
)
return 'node_terms'
caller.ndb._menutree.terms.append({'term': term['title']})
return 'node_term'
<|reserved_special_token_0|>
def node_term(caller):
term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1
]['term']
text = 'Career: %s' % term_title
text += '\r\nAssignment: Not Set'
text += '\r\nPersonal Advancement: Not Set'
text += '\r\nYears: %s' % caller.ndb._menutree.character['age']
text += '-%s' % (caller.ndb._menutree.character['age'] + 4)
text += '\r\n\r\nLife Event: |y1 Available|n'
text += (
'\r\n\r\nType "|yset Assignment to <assignment>|n" to choose an assignment.'
)
text += (
'\r\nType "|yset Advancement to <option>|n" to choose a personal advancement.'
)
text += (
'\r\n\r\nRolling for a life event is optional and may yield positive or negative results. '
)
text += (
"Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan."
)
options = {'key': 'show assignments', 'goto': _list_term_assignments}, {
'key': 'show advancements', 'goto': _list_term_advancements}, {'key':
'roll life event', 'goto': _do_life_event}
return text, options
<|reserved_special_token_0|>
def adjust_attribute(caller, match, is_add):
attribute_token = match.group(2).lower()
attribute = next((x for x in AttributeEnum if x.name.lower().startswith
(attribute_token)), None)
if not attribute:
error(caller, '%s is not a valid attribute.' % match.group(2))
return 'node_attributes'
value = int(match.group(1))
if not value or value < 0:
error(caller, 'Value to adjust must be a positive number.')
return 'node_attributes'
attribute_value = caller.ndb._menutree.character['stats'][attribute.name]
if not is_add and attribute_value - value < 10:
error(caller, attribute.name + ' cannot be reduced below 10.')
return 'node_attributes'
i_value = value
cost = 0
while i_value > 0:
if is_add:
new_value = i_value + attribute_value
else:
new_value = attribute_value - i_value
if new_value <= 12:
cost += 4
elif new_value <= 16:
cost += 2
elif new_value <= 23:
cost += 1
elif new_value <= 26:
cost += 2
elif new_value <= 30:
cost += 4
i_value -= 1
if not is_add:
cost *= -1
if cost > caller.ndb._menutree.points['attributes']:
deficit = (caller.ndb._menutree.points['attributes'] - cost) * -1
error(caller, 'Raising %s' % attribute.name +
' costs %s total points,' % cost +
' %s more points than you have available.' % deficit)
return 'node_attributes'
if is_add:
caller.ndb._menutree.character['stats'][attribute.name] += value
else:
caller.ndb._menutree.character['stats'][attribute.name] -= value
caller.ndb._menutree.points['attributes'] -= cost
msg = ('Successfully set %s ' % attribute.name + 'to %s' % caller.ndb.
_menutree.character['stats'][attribute.name])
msg += ' for %s points.' % cost
success(caller, msg)
return 'node_attributes'
def node_finish(caller):
text = ''
options = ()
return text, options
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def start(caller):
if not caller:
return
caller.ndb._menutree.points = {'attributes': 20, 'skills': 20}
caller.ndb._menutree.character = {'home_planet': None, 'full_name':
None, 'origin': None, 'stats': {}, 'age': 16, 'is_psionic': False,
'current_term': 0, 'species': 'human'}
caller.ndb._menutree.terms = []
for attribute in AttributeEnum:
caller.ndb._menutree.character['stats'][attribute.name] = 20
text = """
Welcome to Singularity's Character Generator!
Have a paragraph about WTF is going on and some info about our game. Also here are some warnings
that you *definitely* shouldn't make multiple characters. And also here's some commands to
help get you more info! TBD!!!
|yPlease do not make multiple characters to game chargen.|n
When you're ready, go ahead and like.. type |ybegin|n to start CharGen.
"""
return text, {'key': 'begin', 'goto': 'node_menu'}
<|reserved_special_token_0|>
def node_basics(caller):
character = caller.ndb._menutree.character
name = character['full_name']
if not name:
name = 'Not Set'
species = character['species']
origin = character['origin']
if not origin:
origin = 'Not Set'
age = character['age']
text = (
"""
|wFull Name:|n %s
|wAdolescent Age:|n %s
|wSpecies:|n %s
|wOrigin:|n %s
Type |yhelp <command>|n to get info on available choices.
"""
% (name, age, species, origin))
options = {'key': 'return', 'goto': 'node_menu'}, {'key': 'full_name',
'goto': _node_basics_full_name}, {'key': 'age', 'goto':
_node_basics_age}, {'key': 'species', 'goto': _node_basics_species}, {
'key': 'origin', 'goto': _node_basics_origin}
return text, options
def _node_basics_full_name(caller):
def callback(caller, prompt, user_input):
caller.msg("You set your character's full name to: %s." % user_input)
caller.ndb._menutree.character['full_name'] = user_input
get_input(caller, ">> Enter your character's full name.", callback)
def _node_basics_age(caller):
def callback(caller, prompt, user_input):
species = next(s for s in CHARGEN['species'] if s['key'] == caller.
ndb._menutree.character['species'])
if not user_input.is_integer() or int(user_input) < species[
'min_start_age'] or int(user_input) > species['max_start_age']:
caller.msg('Age must be a valid number between %s and %s.' % (
species['min_start_age'], species['max_start_age']))
return
caller.msg("You set your character's age to: %s." % user_input)
caller.ndb._menutree.character['age'] = int(user_input)
get_input(caller, ">> Enter your character's age.", callback)
<|reserved_special_token_0|>
def _is_attributes_done(caller):
if caller.ndb._menutree.points['attributes'] != 0:
return False, 'All attribute points must be allocated.'
return True, ''
def _is_basics_done(caller):
character = caller.ndb._menutree.character
name = character['full_name']
if not name or len(name) < 3:
return (False,
'Full name must have a value and be longer than 3 characters.')
origin = character['origin']
if not origin:
return False, 'Must select an origin.'
species_stats = next(s for s in CHARGEN['species'] if s['key'] ==
character['species'])
age = character['age']
if age < species_stats['min_start_age']:
return False, 'Age must be equal to or more than %s.' % species_stats[
'min_start_age']
if age > species_stats['max_start_age']:
return False, 'Age must be equal to or less than %s.' % species_stats[
'max_start_age']
return True, ''
def _is_skills_done(caller):
return False, ''
def _is_life_done(caller):
return False, ''
<|reserved_special_token_0|>
def node_attributes(caller):
text = ''
for attribute in AttributeEnum:
if (attribute == AttributeEnum.Psi and not caller.ndb._menutree.
character['is_psionic']):
continue
text += '%s: ' % attribute.name
text += '%s\r\n' % caller.ndb._menutree.character['stats'][attribute
.name]
text += '\r\n%s points remaining.\r\n' % caller.ndb._menutree.points[
'attributes']
text += (
'\r\nType "|yadd <number> to <attribute>|n" to adjust an attribute positively.'
)
text += (
'\r\nType "|ysub <number> from <attribute>|n" to adjust an attribute negatively.'
)
options = {'key': '_default', 'goto': _node_attributes}, {'key':
'return', 'goto': 'node_menu'}
return text, options
<|reserved_special_token_0|>
def node_terms(caller):
text = ''
term_count = 1
for term in caller.ndb._menutree.terms:
text += '\r\n* Term %s:' % term_count + ' %s' % term.title
term_count += 1
age = caller.ndb._menutree.character['age'
] + 4 * caller.ndb._menutree.character['current_term']
text += '\r\nCurrent Character Age: %s' % age
text += '\r\n\r\nType "|ychoose <term>|n" to begin a term.'
options = {'key': '_default', 'goto': _node_terms}, {'key':
'list choices', 'goto': _list_term_choices}, {'key': 'finish',
'goto': 'node_finish'}
return text, options
def _node_terms(caller, raw_string):
match = re.match('choose (\\w+)', raw_string)
if not match:
error(caller, "I didn't understand that.")
return 'node_terms'
term_token = match.group(1).lower()
term = next((x for x in TERMS if x['title'].lower().startswith(
term_token)), None)
if not term:
error(caller,
'%s is not a valid term. Type "|ylist choices|n" to get a list of all available careers.'
)
return 'node_terms'
caller.ndb._menutree.terms.append({'term': term['title']})
return 'node_term'
<|reserved_special_token_0|>
def node_term(caller):
term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1
]['term']
text = 'Career: %s' % term_title
text += '\r\nAssignment: Not Set'
text += '\r\nPersonal Advancement: Not Set'
text += '\r\nYears: %s' % caller.ndb._menutree.character['age']
text += '-%s' % (caller.ndb._menutree.character['age'] + 4)
text += '\r\n\r\nLife Event: |y1 Available|n'
text += (
'\r\n\r\nType "|yset Assignment to <assignment>|n" to choose an assignment.'
)
text += (
'\r\nType "|yset Advancement to <option>|n" to choose a personal advancement.'
)
text += (
'\r\n\r\nRolling for a life event is optional and may yield positive or negative results. '
)
text += (
"Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan."
)
options = {'key': 'show assignments', 'goto': _list_term_assignments}, {
'key': 'show advancements', 'goto': _list_term_advancements}, {'key':
'roll life event', 'goto': _do_life_event}
return text, options
def _list_term_advancements(caller):
return 'node_term'
<|reserved_special_token_0|>
def adjust_attribute(caller, match, is_add):
attribute_token = match.group(2).lower()
attribute = next((x for x in AttributeEnum if x.name.lower().startswith
(attribute_token)), None)
if not attribute:
error(caller, '%s is not a valid attribute.' % match.group(2))
return 'node_attributes'
value = int(match.group(1))
if not value or value < 0:
error(caller, 'Value to adjust must be a positive number.')
return 'node_attributes'
attribute_value = caller.ndb._menutree.character['stats'][attribute.name]
if not is_add and attribute_value - value < 10:
error(caller, attribute.name + ' cannot be reduced below 10.')
return 'node_attributes'
i_value = value
cost = 0
while i_value > 0:
if is_add:
new_value = i_value + attribute_value
else:
new_value = attribute_value - i_value
if new_value <= 12:
cost += 4
elif new_value <= 16:
cost += 2
elif new_value <= 23:
cost += 1
elif new_value <= 26:
cost += 2
elif new_value <= 30:
cost += 4
i_value -= 1
if not is_add:
cost *= -1
if cost > caller.ndb._menutree.points['attributes']:
deficit = (caller.ndb._menutree.points['attributes'] - cost) * -1
error(caller, 'Raising %s' % attribute.name +
' costs %s total points,' % cost +
' %s more points than you have available.' % deficit)
return 'node_attributes'
if is_add:
caller.ndb._menutree.character['stats'][attribute.name] += value
else:
caller.ndb._menutree.character['stats'][attribute.name] -= value
caller.ndb._menutree.points['attributes'] -= cost
msg = ('Successfully set %s ' % attribute.name + 'to %s' % caller.ndb.
_menutree.character['stats'][attribute.name])
msg += ' for %s points.' % cost
success(caller, msg)
return 'node_attributes'
def node_finish(caller):
text = ''
options = ()
return text, options
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def start(caller):
if not caller:
return
caller.ndb._menutree.points = {'attributes': 20, 'skills': 20}
caller.ndb._menutree.character = {'home_planet': None, 'full_name':
None, 'origin': None, 'stats': {}, 'age': 16, 'is_psionic': False,
'current_term': 0, 'species': 'human'}
caller.ndb._menutree.terms = []
for attribute in AttributeEnum:
caller.ndb._menutree.character['stats'][attribute.name] = 20
text = """
Welcome to Singularity's Character Generator!
Have a paragraph about WTF is going on and some info about our game. Also here are some warnings
that you *definitely* shouldn't make multiple characters. And also here's some commands to
help get you more info! TBD!!!
|yPlease do not make multiple characters to game chargen.|n
When you're ready, go ahead and like.. type |ybegin|n to start CharGen.
"""
return text, {'key': 'begin', 'goto': 'node_menu'}
def node_menu(caller):
name = caller.ndb._menutree.character['full_name']
if not name:
name = 'Not Set'
species = caller.ndb._menutree.character['species']
origin = caller.ndb._menutree.character['origin']
if not origin:
origin = 'Not Set'
d_b = '|gOk|n' if _is_basics_done(caller)[0] else '|rNo|n'
d_a = '|gOk|n' if _is_attributes_done(caller)[0] else '|rNo|n'
d_s = '|gOk|n' if _is_skills_done(caller)[0] else '|rNo|n'
d_l = '|gOk|n' if _is_life_done(caller)[0] else '|rNo|n'
text = (
"""
Below are the general details of your character. Use the below commands
to navigate through chargen steps. Some steps may appear after others are completed.
|wFull Name:|n %s
|wSpecies:|n %s
|wOrigin:|n %s
Completed:
|wBasics:|n %s
|wAttributes:|n %s
|wStarting Skills:|n %s
|wLife path:|n %s
"""
% (name, species, origin, d_b, d_a, d_s, d_l))
options = {'key': 'basics', 'goto': 'node_basics'}, {'key':
'attributes', 'goto': 'node_attributes'}, {'key': 'skills', 'goto':
'node_skills'}
if _is_basics_done(caller)[0] and _is_attributes_done(caller)[0
] and _is_skills_done(caller)[0]:
options.append({'key': 'life path', 'goto': 'node_terms'})
if _is_life_done(caller)[0]:
options.append({'key': 'finish', 'goto': 'node_finish'})
return text, options
def node_basics(caller):
character = caller.ndb._menutree.character
name = character['full_name']
if not name:
name = 'Not Set'
species = character['species']
origin = character['origin']
if not origin:
origin = 'Not Set'
age = character['age']
text = (
"""
|wFull Name:|n %s
|wAdolescent Age:|n %s
|wSpecies:|n %s
|wOrigin:|n %s
Type |yhelp <command>|n to get info on available choices.
"""
% (name, age, species, origin))
options = {'key': 'return', 'goto': 'node_menu'}, {'key': 'full_name',
'goto': _node_basics_full_name}, {'key': 'age', 'goto':
_node_basics_age}, {'key': 'species', 'goto': _node_basics_species}, {
'key': 'origin', 'goto': _node_basics_origin}
return text, options
def _node_basics_full_name(caller):
def callback(caller, prompt, user_input):
caller.msg("You set your character's full name to: %s." % user_input)
caller.ndb._menutree.character['full_name'] = user_input
get_input(caller, ">> Enter your character's full name.", callback)
def _node_basics_age(caller):
def callback(caller, prompt, user_input):
species = next(s for s in CHARGEN['species'] if s['key'] == caller.
ndb._menutree.character['species'])
if not user_input.is_integer() or int(user_input) < species[
'min_start_age'] or int(user_input) > species['max_start_age']:
caller.msg('Age must be a valid number between %s and %s.' % (
species['min_start_age'], species['max_start_age']))
return
caller.msg("You set your character's age to: %s." % user_input)
caller.ndb._menutree.character['age'] = int(user_input)
get_input(caller, ">> Enter your character's age.", callback)
def _node_basics_species(caller):
def callback(caller, prompt, user_input):
character = caller.ndb._menutree.character
species = next((s for s in SPECIES if s['title'].lower().startswith
(user_input.lower())), None)
if not species:
caller.msg(
"'%s' is not a valid species. Valid species: |wHuman|n, and |wAndroid.|n"
% user_input)
return
species_chargen = next(s for s in CHARGEN['species'] if s['key'] ==
species['key'])
caller.msg("You set your character's species to: %s." % species[
'title'])
character['age'] = species_chargen['min_age']
character['origin'] = None
character['species'] = species['key']
get_input(caller, ">> Enter your character's species.", callback)
def _node_basics_origin(caller):
def callback(caller, prompt, user_input):
character = caller.ndb._menutree.character
origins = filter(lambda o: character['species'] in o[
'species_restrictions'], CHARGEN['origins'])
origin = next((o for o in origins if o['title'].lower().startswith(
user_input.lower())), None)
if not origin:
caller.msg(
"'%s' is not a valid origin choice. Valid choices: %s" % (
user_input, list_to_string(map(lambda o: o['title'], origins)))
)
return
caller.msg("You set your character's origin to: %s." % user_input)
character['origin'] = origin['key']
get_input(caller, ">> Enter your character's origin.", callback)
def _is_attributes_done(caller):
if caller.ndb._menutree.points['attributes'] != 0:
return False, 'All attribute points must be allocated.'
return True, ''
def _is_basics_done(caller):
character = caller.ndb._menutree.character
name = character['full_name']
if not name or len(name) < 3:
return (False,
'Full name must have a value and be longer than 3 characters.')
origin = character['origin']
if not origin:
return False, 'Must select an origin.'
species_stats = next(s for s in CHARGEN['species'] if s['key'] ==
character['species'])
age = character['age']
if age < species_stats['min_start_age']:
return False, 'Age must be equal to or more than %s.' % species_stats[
'min_start_age']
if age > species_stats['max_start_age']:
return False, 'Age must be equal to or less than %s.' % species_stats[
'max_start_age']
return True, ''
def _is_skills_done(caller):
return False, ''
def _is_life_done(caller):
return False, ''
<|reserved_special_token_0|>
def node_attributes(caller):
text = ''
for attribute in AttributeEnum:
if (attribute == AttributeEnum.Psi and not caller.ndb._menutree.
character['is_psionic']):
continue
text += '%s: ' % attribute.name
text += '%s\r\n' % caller.ndb._menutree.character['stats'][attribute
.name]
text += '\r\n%s points remaining.\r\n' % caller.ndb._menutree.points[
'attributes']
text += (
'\r\nType "|yadd <number> to <attribute>|n" to adjust an attribute positively.'
)
text += (
'\r\nType "|ysub <number> from <attribute>|n" to adjust an attribute negatively.'
)
options = {'key': '_default', 'goto': _node_attributes}, {'key':
'return', 'goto': 'node_menu'}
return text, options
<|reserved_special_token_0|>
def node_terms(caller):
text = ''
term_count = 1
for term in caller.ndb._menutree.terms:
text += '\r\n* Term %s:' % term_count + ' %s' % term.title
term_count += 1
age = caller.ndb._menutree.character['age'
] + 4 * caller.ndb._menutree.character['current_term']
text += '\r\nCurrent Character Age: %s' % age
text += '\r\n\r\nType "|ychoose <term>|n" to begin a term.'
options = {'key': '_default', 'goto': _node_terms}, {'key':
'list choices', 'goto': _list_term_choices}, {'key': 'finish',
'goto': 'node_finish'}
return text, options
def _node_terms(caller, raw_string):
match = re.match('choose (\\w+)', raw_string)
if not match:
error(caller, "I didn't understand that.")
return 'node_terms'
term_token = match.group(1).lower()
term = next((x for x in TERMS if x['title'].lower().startswith(
term_token)), None)
if not term:
error(caller,
'%s is not a valid term. Type "|ylist choices|n" to get a list of all available careers.'
)
return 'node_terms'
caller.ndb._menutree.terms.append({'term': term['title']})
return 'node_term'
<|reserved_special_token_0|>
def node_term(caller):
term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1
]['term']
text = 'Career: %s' % term_title
text += '\r\nAssignment: Not Set'
text += '\r\nPersonal Advancement: Not Set'
text += '\r\nYears: %s' % caller.ndb._menutree.character['age']
text += '-%s' % (caller.ndb._menutree.character['age'] + 4)
text += '\r\n\r\nLife Event: |y1 Available|n'
text += (
'\r\n\r\nType "|yset Assignment to <assignment>|n" to choose an assignment.'
)
text += (
'\r\nType "|yset Advancement to <option>|n" to choose a personal advancement.'
)
text += (
'\r\n\r\nRolling for a life event is optional and may yield positive or negative results. '
)
text += (
"Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan."
)
options = {'key': 'show assignments', 'goto': _list_term_assignments}, {
'key': 'show advancements', 'goto': _list_term_advancements}, {'key':
'roll life event', 'goto': _do_life_event}
return text, options
def _list_term_advancements(caller):
return 'node_term'
<|reserved_special_token_0|>
def adjust_attribute(caller, match, is_add):
attribute_token = match.group(2).lower()
attribute = next((x for x in AttributeEnum if x.name.lower().startswith
(attribute_token)), None)
if not attribute:
error(caller, '%s is not a valid attribute.' % match.group(2))
return 'node_attributes'
value = int(match.group(1))
if not value or value < 0:
error(caller, 'Value to adjust must be a positive number.')
return 'node_attributes'
attribute_value = caller.ndb._menutree.character['stats'][attribute.name]
if not is_add and attribute_value - value < 10:
error(caller, attribute.name + ' cannot be reduced below 10.')
return 'node_attributes'
i_value = value
cost = 0
while i_value > 0:
if is_add:
new_value = i_value + attribute_value
else:
new_value = attribute_value - i_value
if new_value <= 12:
cost += 4
elif new_value <= 16:
cost += 2
elif new_value <= 23:
cost += 1
elif new_value <= 26:
cost += 2
elif new_value <= 30:
cost += 4
i_value -= 1
if not is_add:
cost *= -1
if cost > caller.ndb._menutree.points['attributes']:
deficit = (caller.ndb._menutree.points['attributes'] - cost) * -1
error(caller, 'Raising %s' % attribute.name +
' costs %s total points,' % cost +
' %s more points than you have available.' % deficit)
return 'node_attributes'
if is_add:
caller.ndb._menutree.character['stats'][attribute.name] += value
else:
caller.ndb._menutree.character['stats'][attribute.name] -= value
caller.ndb._menutree.points['attributes'] -= cost
msg = ('Successfully set %s ' % attribute.name + 'to %s' % caller.ndb.
_menutree.character['stats'][attribute.name])
msg += ' for %s points.' % cost
success(caller, msg)
return 'node_attributes'
def node_finish(caller):
text = ''
options = ()
return text, options
<|reserved_special_token_0|>
def error(caller, msg):
caller.msg('|y<|rError|y>|n %s' % msg)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def start(caller):
if not caller:
return
caller.ndb._menutree.points = {'attributes': 20, 'skills': 20}
caller.ndb._menutree.character = {'home_planet': None, 'full_name':
None, 'origin': None, 'stats': {}, 'age': 16, 'is_psionic': False,
'current_term': 0, 'species': 'human'}
caller.ndb._menutree.terms = []
for attribute in AttributeEnum:
caller.ndb._menutree.character['stats'][attribute.name] = 20
text = """
Welcome to Singularity's Character Generator!
Have a paragraph about WTF is going on and some info about our game. Also here are some warnings
that you *definitely* shouldn't make multiple characters. And also here's some commands to
help get you more info! TBD!!!
|yPlease do not make multiple characters to game chargen.|n
When you're ready, go ahead and like.. type |ybegin|n to start CharGen.
"""
return text, {'key': 'begin', 'goto': 'node_menu'}
def node_menu(caller):
name = caller.ndb._menutree.character['full_name']
if not name:
name = 'Not Set'
species = caller.ndb._menutree.character['species']
origin = caller.ndb._menutree.character['origin']
if not origin:
origin = 'Not Set'
d_b = '|gOk|n' if _is_basics_done(caller)[0] else '|rNo|n'
d_a = '|gOk|n' if _is_attributes_done(caller)[0] else '|rNo|n'
d_s = '|gOk|n' if _is_skills_done(caller)[0] else '|rNo|n'
d_l = '|gOk|n' if _is_life_done(caller)[0] else '|rNo|n'
text = (
"""
Below are the general details of your character. Use the below commands
to navigate through chargen steps. Some steps may appear after others are completed.
|wFull Name:|n %s
|wSpecies:|n %s
|wOrigin:|n %s
Completed:
|wBasics:|n %s
|wAttributes:|n %s
|wStarting Skills:|n %s
|wLife path:|n %s
"""
% (name, species, origin, d_b, d_a, d_s, d_l))
options = {'key': 'basics', 'goto': 'node_basics'}, {'key':
'attributes', 'goto': 'node_attributes'}, {'key': 'skills', 'goto':
'node_skills'}
if _is_basics_done(caller)[0] and _is_attributes_done(caller)[0
] and _is_skills_done(caller)[0]:
options.append({'key': 'life path', 'goto': 'node_terms'})
if _is_life_done(caller)[0]:
options.append({'key': 'finish', 'goto': 'node_finish'})
return text, options
def node_basics(caller):
character = caller.ndb._menutree.character
name = character['full_name']
if not name:
name = 'Not Set'
species = character['species']
origin = character['origin']
if not origin:
origin = 'Not Set'
age = character['age']
text = (
"""
|wFull Name:|n %s
|wAdolescent Age:|n %s
|wSpecies:|n %s
|wOrigin:|n %s
Type |yhelp <command>|n to get info on available choices.
"""
% (name, age, species, origin))
options = {'key': 'return', 'goto': 'node_menu'}, {'key': 'full_name',
'goto': _node_basics_full_name}, {'key': 'age', 'goto':
_node_basics_age}, {'key': 'species', 'goto': _node_basics_species}, {
'key': 'origin', 'goto': _node_basics_origin}
return text, options
def _node_basics_full_name(caller):
def callback(caller, prompt, user_input):
caller.msg("You set your character's full name to: %s." % user_input)
caller.ndb._menutree.character['full_name'] = user_input
get_input(caller, ">> Enter your character's full name.", callback)
def _node_basics_age(caller):
def callback(caller, prompt, user_input):
species = next(s for s in CHARGEN['species'] if s['key'] == caller.
ndb._menutree.character['species'])
if not user_input.is_integer() or int(user_input) < species[
'min_start_age'] or int(user_input) > species['max_start_age']:
caller.msg('Age must be a valid number between %s and %s.' % (
species['min_start_age'], species['max_start_age']))
return
caller.msg("You set your character's age to: %s." % user_input)
caller.ndb._menutree.character['age'] = int(user_input)
get_input(caller, ">> Enter your character's age.", callback)
def _node_basics_species(caller):
def callback(caller, prompt, user_input):
character = caller.ndb._menutree.character
species = next((s for s in SPECIES if s['title'].lower().startswith
(user_input.lower())), None)
if not species:
caller.msg(
"'%s' is not a valid species. Valid species: |wHuman|n, and |wAndroid.|n"
% user_input)
return
species_chargen = next(s for s in CHARGEN['species'] if s['key'] ==
species['key'])
caller.msg("You set your character's species to: %s." % species[
'title'])
character['age'] = species_chargen['min_age']
character['origin'] = None
character['species'] = species['key']
get_input(caller, ">> Enter your character's species.", callback)
def _node_basics_origin(caller):
def callback(caller, prompt, user_input):
character = caller.ndb._menutree.character
origins = filter(lambda o: character['species'] in o[
'species_restrictions'], CHARGEN['origins'])
origin = next((o for o in origins if o['title'].lower().startswith(
user_input.lower())), None)
if not origin:
caller.msg(
"'%s' is not a valid origin choice. Valid choices: %s" % (
user_input, list_to_string(map(lambda o: o['title'], origins)))
)
return
caller.msg("You set your character's origin to: %s." % user_input)
character['origin'] = origin['key']
get_input(caller, ">> Enter your character's origin.", callback)
def _is_attributes_done(caller):
if caller.ndb._menutree.points['attributes'] != 0:
return False, 'All attribute points must be allocated.'
return True, ''
def _is_basics_done(caller):
character = caller.ndb._menutree.character
name = character['full_name']
if not name or len(name) < 3:
return (False,
'Full name must have a value and be longer than 3 characters.')
origin = character['origin']
if not origin:
return False, 'Must select an origin.'
species_stats = next(s for s in CHARGEN['species'] if s['key'] ==
character['species'])
age = character['age']
if age < species_stats['min_start_age']:
return False, 'Age must be equal to or more than %s.' % species_stats[
'min_start_age']
if age > species_stats['max_start_age']:
return False, 'Age must be equal to or less than %s.' % species_stats[
'max_start_age']
return True, ''
def _is_skills_done(caller):
return False, ''
def _is_life_done(caller):
return False, ''
<|reserved_special_token_0|>
def node_attributes(caller):
text = ''
for attribute in AttributeEnum:
if (attribute == AttributeEnum.Psi and not caller.ndb._menutree.
character['is_psionic']):
continue
text += '%s: ' % attribute.name
text += '%s\r\n' % caller.ndb._menutree.character['stats'][attribute
.name]
text += '\r\n%s points remaining.\r\n' % caller.ndb._menutree.points[
'attributes']
text += (
'\r\nType "|yadd <number> to <attribute>|n" to adjust an attribute positively.'
)
text += (
'\r\nType "|ysub <number> from <attribute>|n" to adjust an attribute negatively.'
)
options = {'key': '_default', 'goto': _node_attributes}, {'key':
'return', 'goto': 'node_menu'}
return text, options
def _node_attributes(caller, raw_string):
match = re.match('add (\\d+) to (\\w+)', raw_string)
if match:
return adjust_attribute(caller, match, True)
match = re.match('sub (\\d+) from (\\w+)', raw_string)
if match:
return adjust_attribute(caller, match, False)
if not match:
return 'node_attributes'
def node_terms(caller):
text = ''
term_count = 1
for term in caller.ndb._menutree.terms:
text += '\r\n* Term %s:' % term_count + ' %s' % term.title
term_count += 1
age = caller.ndb._menutree.character['age'
] + 4 * caller.ndb._menutree.character['current_term']
text += '\r\nCurrent Character Age: %s' % age
text += '\r\n\r\nType "|ychoose <term>|n" to begin a term.'
options = {'key': '_default', 'goto': _node_terms}, {'key':
'list choices', 'goto': _list_term_choices}, {'key': 'finish',
'goto': 'node_finish'}
return text, options
def _node_terms(caller, raw_string):
match = re.match('choose (\\w+)', raw_string)
if not match:
error(caller, "I didn't understand that.")
return 'node_terms'
term_token = match.group(1).lower()
term = next((x for x in TERMS if x['title'].lower().startswith(
term_token)), None)
if not term:
error(caller,
'%s is not a valid term. Type "|ylist choices|n" to get a list of all available careers.'
)
return 'node_terms'
caller.ndb._menutree.terms.append({'term': term['title']})
return 'node_term'
<|reserved_special_token_0|>
def node_term(caller):
term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1
]['term']
text = 'Career: %s' % term_title
text += '\r\nAssignment: Not Set'
text += '\r\nPersonal Advancement: Not Set'
text += '\r\nYears: %s' % caller.ndb._menutree.character['age']
text += '-%s' % (caller.ndb._menutree.character['age'] + 4)
text += '\r\n\r\nLife Event: |y1 Available|n'
text += (
'\r\n\r\nType "|yset Assignment to <assignment>|n" to choose an assignment.'
)
text += (
'\r\nType "|yset Advancement to <option>|n" to choose a personal advancement.'
)
text += (
'\r\n\r\nRolling for a life event is optional and may yield positive or negative results. '
)
text += (
"Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan."
)
options = {'key': 'show assignments', 'goto': _list_term_assignments}, {
'key': 'show advancements', 'goto': _list_term_advancements}, {'key':
'roll life event', 'goto': _do_life_event}
return text, options
def _list_term_advancements(caller):
return 'node_term'
<|reserved_special_token_0|>
def _do_life_event(caller):
return 'node_term'
def adjust_attribute(caller, match, is_add):
attribute_token = match.group(2).lower()
attribute = next((x for x in AttributeEnum if x.name.lower().startswith
(attribute_token)), None)
if not attribute:
error(caller, '%s is not a valid attribute.' % match.group(2))
return 'node_attributes'
value = int(match.group(1))
if not value or value < 0:
error(caller, 'Value to adjust must be a positive number.')
return 'node_attributes'
attribute_value = caller.ndb._menutree.character['stats'][attribute.name]
if not is_add and attribute_value - value < 10:
error(caller, attribute.name + ' cannot be reduced below 10.')
return 'node_attributes'
i_value = value
cost = 0
while i_value > 0:
if is_add:
new_value = i_value + attribute_value
else:
new_value = attribute_value - i_value
if new_value <= 12:
cost += 4
elif new_value <= 16:
cost += 2
elif new_value <= 23:
cost += 1
elif new_value <= 26:
cost += 2
elif new_value <= 30:
cost += 4
i_value -= 1
if not is_add:
cost *= -1
if cost > caller.ndb._menutree.points['attributes']:
deficit = (caller.ndb._menutree.points['attributes'] - cost) * -1
error(caller, 'Raising %s' % attribute.name +
' costs %s total points,' % cost +
' %s more points than you have available.' % deficit)
return 'node_attributes'
if is_add:
caller.ndb._menutree.character['stats'][attribute.name] += value
else:
caller.ndb._menutree.character['stats'][attribute.name] -= value
caller.ndb._menutree.points['attributes'] -= cost
msg = ('Successfully set %s ' % attribute.name + 'to %s' % caller.ndb.
_menutree.character['stats'][attribute.name])
msg += ' for %s points.' % cost
success(caller, msg)
return 'node_attributes'
def node_finish(caller):
text = ''
options = ()
return text, options
def success(caller, msg):
caller.msg('|b<|cSystem|b>|n %s' % msg)
def error(caller, msg):
caller.msg('|y<|rError|y>|n %s' % msg)
<|reserved_special_token_1|>
from world.enums import *
from world.content.species import SPECIES
from world.content.chargen import *
from evennia.utils.evmenu import get_input
from evennia.utils.utils import list_to_string
import re
def start(caller):
if not caller:
return
caller.ndb._menutree.points = {
"attributes": 20,
"skills": 20
}
caller.ndb._menutree.character = {
"home_planet": None,
"full_name": None,
"origin": None,
"stats": {},
"age": 16,
"is_psionic": False,
"current_term": 0,
"species": "human"
}
caller.ndb._menutree.terms = []
for attribute in AttributeEnum:
caller.ndb._menutree.character["stats"][attribute.name] = 20
text = """
Welcome to Singularity's Character Generator!
Have a paragraph about WTF is going on and some info about our game. Also here are some warnings
that you *definitely* shouldn't make multiple characters. And also here's some commands to
help get you more info! TBD!!!
|yPlease do not make multiple characters to game chargen.|n
When you're ready, go ahead and like.. type |ybegin|n to start CharGen.
"""
return text, ({"key": "begin", "goto": "node_menu"})
def node_menu(caller):
name = caller.ndb._menutree.character["full_name"]
if not name:
name = "Not Set"
species = caller.ndb._menutree.character["species"]
origin = caller.ndb._menutree.character["origin"]
if not origin:
origin = "Not Set"
d_b = "|gOk|n" if _is_basics_done(caller)[0] else "|rNo|n"
d_a = "|gOk|n" if _is_attributes_done(caller)[0] else "|rNo|n"
d_s = "|gOk|n" if _is_skills_done(caller)[0] else "|rNo|n"
d_l = "|gOk|n" if _is_life_done(caller)[0] else "|rNo|n"
text = """
Below are the general details of your character. Use the below commands
to navigate through chargen steps. Some steps may appear after others are completed.
|wFull Name:|n %s
|wSpecies:|n %s
|wOrigin:|n %s
Completed:
|wBasics:|n %s
|wAttributes:|n %s
|wStarting Skills:|n %s
|wLife path:|n %s
""" % (name, species, origin, d_b, d_a, d_s, d_l)
options = (
{"key": "basics", "goto": "node_basics"},
{"key": "attributes", "goto": "node_attributes"},
{"key": "skills", "goto": "node_skills"}
)
if _is_basics_done(caller)[0] and _is_attributes_done(caller)[0] and _is_skills_done(caller)[0]:
options.append({"key": "life path", "goto": "node_terms"})
if _is_life_done(caller)[0]:
options.append({"key": "finish", "goto": "node_finish"})
return text, options
def node_basics(caller):
character = caller.ndb._menutree.character
name = character["full_name"]
if not name:
name = "Not Set"
species = character["species"]
origin = character["origin"]
if not origin:
origin = "Not Set"
age = character["age"]
text = """
|wFull Name:|n %s
|wAdolescent Age:|n %s
|wSpecies:|n %s
|wOrigin:|n %s
Type |yhelp <command>|n to get info on available choices.
""" % (name, age, species, origin)
options = (
{"key": "return", "goto": "node_menu"},
{"key": "full_name", "goto": _node_basics_full_name},
{"key": "age", "goto": _node_basics_age},
{"key": "species", "goto": _node_basics_species},
{"key": "origin", "goto": _node_basics_origin}
)
return text, options
def _node_basics_full_name(caller):
def callback(caller, prompt, user_input):
caller.msg("You set your character's full name to: %s." % user_input)
caller.ndb._menutree.character["full_name"] = user_input
get_input(caller, ">> Enter your character's full name.", callback)
def _node_basics_age(caller):
def callback(caller, prompt, user_input):
species = next(s for s in CHARGEN["species"] if s["key"] == caller.ndb._menutree.character["species"])
if not user_input.is_integer() \
or int(user_input) < species["min_start_age"] \
or int(user_input) > species["max_start_age"]:
caller.msg("Age must be a valid number between %s and %s."
% (species["min_start_age"], species["max_start_age"]))
return
caller.msg("You set your character's age to: %s." % user_input)
caller.ndb._menutree.character["age"] = int(user_input)
get_input(caller, ">> Enter your character's age.", callback)
def _node_basics_species(caller):
def callback(caller, prompt, user_input):
character = caller.ndb._menutree.character
species = next((s for s in SPECIES if s["title"].lower().startswith(user_input.lower())), None)
if not species:
caller.msg("'%s' is not a valid species. Valid species: |wHuman|n, and |wAndroid.|n" % user_input)
return
species_chargen = next(s for s in CHARGEN["species"] if s["key"] == species["key"])
caller.msg("You set your character's species to: %s." % species["title"])
character["age"] = species_chargen["min_age"]
character["origin"] = None
character["species"] = species["key"]
get_input(caller, ">> Enter your character's species.", callback)
def _node_basics_origin(caller):
def callback(caller, prompt, user_input):
character = caller.ndb._menutree.character
origins = filter(lambda o: character["species"] in o["species_restrictions"], CHARGEN["origins"])
origin = next((o for o in origins if o["title"].lower().startswith(user_input.lower())), None)
if not origin:
caller.msg("'%s' is not a valid origin choice. Valid choices: %s"
% (user_input, list_to_string(map(lambda o: o["title"], origins))))
return
caller.msg("You set your character's origin to: %s." % user_input)
character["origin"] = origin["key"]
get_input(caller, ">> Enter your character's origin.", callback)
def _is_attributes_done(caller):
if caller.ndb._menutree.points["attributes"] != 0:
return False, "All attribute points must be allocated."
return True, ""
def _is_basics_done(caller):
character = caller.ndb._menutree.character
name = character["full_name"]
if not name or len(name) < 3:
return False, "Full name must have a value and be longer than 3 characters."
origin = character["origin"]
if not origin:
return False, "Must select an origin."
species_stats = next(s for s in CHARGEN["species"] if s["key"] == character["species"])
age = character["age"]
if age < species_stats["min_start_age"]:
return False, "Age must be equal to or more than %s." % species_stats["min_start_age"]
if age > species_stats["max_start_age"]:
return False, "Age must be equal to or less than %s." % species_stats["max_start_age"]
return True, ""
def _is_skills_done(caller):
return False, ""
def _is_life_done(caller):
return False, ""
def node_skills(caller):
text = """
"""
index = 0
stats = caller.ndb._menutree.character["stats"]
for skill in SkillEnum:
if index % 2 == 0:
text += "\n"
text += ("%s:" % skill.name).ljust(28)
value = stats.get(skill.name, 0)
text += str(value).rjust(9)
if index % 2 == 0:
text += " "
index += 1
options = (
{"key": "return", "goto": "node_menu"},
{"key": "set", "goto": ""}
)
return text, options
def node_attributes(caller):
text = ""
for attribute in AttributeEnum:
if attribute == AttributeEnum.Psi and not caller.ndb._menutree.character["is_psionic"]:
continue
text += "%s: " % attribute.name
text += "%s\r\n" % caller.ndb._menutree.character["stats"][attribute.name]
text += "\r\n%s points remaining.\r\n" % caller.ndb._menutree.points["attributes"]
text += "\r\nType \"|yadd <number> to <attribute>|n\" to adjust an attribute positively."
text += "\r\nType \"|ysub <number> from <attribute>|n\" to adjust an attribute negatively."
# options = {"key": "_default", "goto": _node_attributes}
# if caller.ndb._menutree.points["attributes"] == 0:
options = ({"key": "_default", "goto": _node_attributes},
{"key": "return", "goto": "node_menu"})
return text, options
def _node_attributes(caller, raw_string):
match = re.match(r"add (\d+) to (\w+)", raw_string)
if match:
return adjust_attribute(caller, match, True)
match = re.match(r"sub (\d+) from (\w+)", raw_string)
if match:
return adjust_attribute(caller, match, False)
if not match:
return "node_attributes"
def node_terms(caller):
text = ""
term_count = 1
for term in caller.ndb._menutree.terms:
text += "\r\n* Term %s:" % term_count + " %s" % term.title
term_count += 1
age = caller.ndb._menutree.character["age"] + (4 * caller.ndb._menutree.character["current_term"])
text += "\r\nCurrent Character Age: %s" % age
text += "\r\n\r\nType \"|ychoose <term>|n\" to begin a term."
options = ({"key": "_default", "goto": _node_terms},
{"key": "list choices", "goto": _list_term_choices},
{"key": "finish", "goto": "node_finish"})
return text, options
def _node_terms(caller, raw_string):
match = re.match(r"choose (\w+)", raw_string)
if not match:
error(caller, "I didn't understand that.")
return "node_terms"
term_token = match.group(1).lower()
term = next((x for x in TERMS if x["title"].lower().startswith(term_token)), None)
if not term:
error(caller, "%s is not a valid term. Type \"|ylist choices|n\" to get a list of all available careers.")
return "node_terms"
caller.ndb._menutree.terms.append({
"term": term["title"]
})
return "node_term"
def _list_term_choices(caller):
text = ""
for term in TERMS:
text += "\r\n* %s" % term["title"]
for assignment in term["assignments"]:
text += "\r\n\t- %s: " % assignment["title"]
text += "sample description text"
caller.msg(text)
return "node_terms"
def node_term(caller):
term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1]["term"]
# term = next((x for x in TERMS if x["title"] == term_title), None)
text = "Career: %s" % term_title
text += "\r\nAssignment: Not Set"
text += "\r\nPersonal Advancement: Not Set"
text += "\r\nYears: %s" % caller.ndb._menutree.character["age"]
text += "-%s" % (caller.ndb._menutree.character["age"] + 4)
text += "\r\n\r\nLife Event: |y1 Available|n"
text += "\r\n\r\nType \"|yset Assignment to <assignment>|n\" to choose an assignment."
text += "\r\nType \"|yset Advancement to <option>|n\" to choose a personal advancement."
text += "\r\n\r\nRolling for a life event is optional and may yield positive or negative results. "
text += "Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan."
options = ({"key": "show assignments", "goto": _list_term_assignments},
{"key": "show advancements", "goto": _list_term_advancements},
{"key": "roll life event", "goto": _do_life_event})
return text, options
def _list_term_advancements(caller):
return "node_term"
def _list_term_assignments(caller):
return "node_term"
def _do_life_event(caller):
return "node_term"
def adjust_attribute(caller, match, is_add):
attribute_token = match.group(2).lower()
attribute = next((x for x in AttributeEnum if x.name.lower().startswith(attribute_token)), None)
if not attribute:
error(caller, "%s is not a valid attribute." % match.group(2))
return "node_attributes"
value = int(match.group(1))
if not value or value < 0:
error(caller, "Value to adjust must be a positive number.")
return "node_attributes"
attribute_value = caller.ndb._menutree.character["stats"][attribute.name]
if not is_add and attribute_value - value < 10:
error(caller, attribute.name + " cannot be reduced below 10.")
return "node_attributes"
# calculate cost..
i_value = value
cost = 0
while i_value > 0:
if is_add:
new_value = i_value + attribute_value
else:
new_value = attribute_value - i_value
if new_value <= 12:
cost += 4
elif new_value <= 16:
cost += 2
elif new_value <= 23:
cost += 1
elif new_value <= 26:
cost += 2
elif new_value <= 30:
cost += 4
i_value -= 1
if not is_add:
cost *= -1
if cost > caller.ndb._menutree.points["attributes"]:
deficit = (caller.ndb._menutree.points["attributes"] - cost) * -1
error(caller, "Raising %s" % attribute.name + " costs %s total points," % cost + " %s more points than you have available." % deficit)
return "node_attributes"
# Succeeded the gauntlet. Change their stat.
if is_add:
caller.ndb._menutree.character["stats"][attribute.name] += value
else:
caller.ndb._menutree.character["stats"][attribute.name] -= value
caller.ndb._menutree.points["attributes"] -= cost
msg = "Successfully set %s " % attribute.name + "to %s" % caller.ndb._menutree.character["stats"][attribute.name]
msg += " for %s points." % cost
success(caller, msg)
return "node_attributes"
def node_finish(caller):
text = ""
options = ()
return text, options
def success(caller, msg):
caller.msg("|b<|cSystem|b>|n %s" % msg)
def error(caller, msg):
caller.msg("|y<|rError|y>|n %s" % msg)
|
flexible
|
{
"blob_id": "99eeb039e1a369e450247d10ba22a1aa0b35dae9",
"index": 6875,
"step-1": "<mask token>\n\n\ndef start(caller):\n if not caller:\n return\n caller.ndb._menutree.points = {'attributes': 20, 'skills': 20}\n caller.ndb._menutree.character = {'home_planet': None, 'full_name':\n None, 'origin': None, 'stats': {}, 'age': 16, 'is_psionic': False,\n 'current_term': 0, 'species': 'human'}\n caller.ndb._menutree.terms = []\n for attribute in AttributeEnum:\n caller.ndb._menutree.character['stats'][attribute.name] = 20\n text = \"\"\"\n Welcome to Singularity's Character Generator!\n \n Have a paragraph about WTF is going on and some info about our game. Also here are some warnings\n that you *definitely* shouldn't make multiple characters. And also here's some commands to\n help get you more info! TBD!!!\n \n |yPlease do not make multiple characters to game chargen.|n\n \n When you're ready, go ahead and like.. type |ybegin|n to start CharGen.\n \"\"\"\n return text, {'key': 'begin', 'goto': 'node_menu'}\n\n\n<mask token>\n\n\ndef node_basics(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name:\n name = 'Not Set'\n species = character['species']\n origin = character['origin']\n if not origin:\n origin = 'Not Set'\n age = character['age']\n text = (\n \"\"\"\n |wFull Name:|n %s\n |wAdolescent Age:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Type |yhelp <command>|n to get info on available choices.\n \"\"\"\n % (name, age, species, origin))\n options = {'key': 'return', 'goto': 'node_menu'}, {'key': 'full_name',\n 'goto': _node_basics_full_name}, {'key': 'age', 'goto':\n _node_basics_age}, {'key': 'species', 'goto': _node_basics_species}, {\n 'key': 'origin', 'goto': _node_basics_origin}\n return text, options\n\n\ndef _node_basics_full_name(caller):\n\n def callback(caller, prompt, user_input):\n caller.msg(\"You set your character's full name to: %s.\" % user_input)\n caller.ndb._menutree.character['full_name'] = user_input\n get_input(caller, \">> Enter your character's full name.\", callback)\n\n\ndef _node_basics_age(caller):\n\n def callback(caller, prompt, user_input):\n species = next(s for s in CHARGEN['species'] if s['key'] == caller.\n ndb._menutree.character['species'])\n if not user_input.is_integer() or int(user_input) < species[\n 'min_start_age'] or int(user_input) > species['max_start_age']:\n caller.msg('Age must be a valid number between %s and %s.' % (\n species['min_start_age'], species['max_start_age']))\n return\n caller.msg(\"You set your character's age to: %s.\" % user_input)\n caller.ndb._menutree.character['age'] = int(user_input)\n get_input(caller, \">> Enter your character's age.\", callback)\n\n\n<mask token>\n\n\ndef _is_attributes_done(caller):\n if caller.ndb._menutree.points['attributes'] != 0:\n return False, 'All attribute points must be allocated.'\n return True, ''\n\n\ndef _is_basics_done(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name or len(name) < 3:\n return (False,\n 'Full name must have a value and be longer than 3 characters.')\n origin = character['origin']\n if not origin:\n return False, 'Must select an origin.'\n species_stats = next(s for s in CHARGEN['species'] if s['key'] ==\n character['species'])\n age = character['age']\n if age < species_stats['min_start_age']:\n return False, 'Age must be equal to or more than %s.' % species_stats[\n 'min_start_age']\n if age > species_stats['max_start_age']:\n return False, 'Age must be equal to or less than %s.' % species_stats[\n 'max_start_age']\n return True, ''\n\n\ndef _is_skills_done(caller):\n return False, ''\n\n\ndef _is_life_done(caller):\n return False, ''\n\n\n<mask token>\n\n\ndef node_attributes(caller):\n text = ''\n for attribute in AttributeEnum:\n if (attribute == AttributeEnum.Psi and not caller.ndb._menutree.\n character['is_psionic']):\n continue\n text += '%s: ' % attribute.name\n text += '%s\\r\\n' % caller.ndb._menutree.character['stats'][attribute\n .name]\n text += '\\r\\n%s points remaining.\\r\\n' % caller.ndb._menutree.points[\n 'attributes']\n text += (\n '\\r\\nType \"|yadd <number> to <attribute>|n\" to adjust an attribute positively.'\n )\n text += (\n '\\r\\nType \"|ysub <number> from <attribute>|n\" to adjust an attribute negatively.'\n )\n options = {'key': '_default', 'goto': _node_attributes}, {'key':\n 'return', 'goto': 'node_menu'}\n return text, options\n\n\n<mask token>\n\n\ndef node_terms(caller):\n text = ''\n term_count = 1\n for term in caller.ndb._menutree.terms:\n text += '\\r\\n* Term %s:' % term_count + ' %s' % term.title\n term_count += 1\n age = caller.ndb._menutree.character['age'\n ] + 4 * caller.ndb._menutree.character['current_term']\n text += '\\r\\nCurrent Character Age: %s' % age\n text += '\\r\\n\\r\\nType \"|ychoose <term>|n\" to begin a term.'\n options = {'key': '_default', 'goto': _node_terms}, {'key':\n 'list choices', 'goto': _list_term_choices}, {'key': 'finish',\n 'goto': 'node_finish'}\n return text, options\n\n\ndef _node_terms(caller, raw_string):\n match = re.match('choose (\\\\w+)', raw_string)\n if not match:\n error(caller, \"I didn't understand that.\")\n return 'node_terms'\n term_token = match.group(1).lower()\n term = next((x for x in TERMS if x['title'].lower().startswith(\n term_token)), None)\n if not term:\n error(caller,\n '%s is not a valid term. Type \"|ylist choices|n\" to get a list of all available careers.'\n )\n return 'node_terms'\n caller.ndb._menutree.terms.append({'term': term['title']})\n return 'node_term'\n\n\n<mask token>\n\n\ndef node_term(caller):\n term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1\n ]['term']\n text = 'Career: %s' % term_title\n text += '\\r\\nAssignment: Not Set'\n text += '\\r\\nPersonal Advancement: Not Set'\n text += '\\r\\nYears: %s' % caller.ndb._menutree.character['age']\n text += '-%s' % (caller.ndb._menutree.character['age'] + 4)\n text += '\\r\\n\\r\\nLife Event: |y1 Available|n'\n text += (\n '\\r\\n\\r\\nType \"|yset Assignment to <assignment>|n\" to choose an assignment.'\n )\n text += (\n '\\r\\nType \"|yset Advancement to <option>|n\" to choose a personal advancement.'\n )\n text += (\n '\\r\\n\\r\\nRolling for a life event is optional and may yield positive or negative results. '\n )\n text += (\n \"Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan.\"\n )\n options = {'key': 'show assignments', 'goto': _list_term_assignments}, {\n 'key': 'show advancements', 'goto': _list_term_advancements}, {'key':\n 'roll life event', 'goto': _do_life_event}\n return text, options\n\n\n<mask token>\n\n\ndef adjust_attribute(caller, match, is_add):\n attribute_token = match.group(2).lower()\n attribute = next((x for x in AttributeEnum if x.name.lower().startswith\n (attribute_token)), None)\n if not attribute:\n error(caller, '%s is not a valid attribute.' % match.group(2))\n return 'node_attributes'\n value = int(match.group(1))\n if not value or value < 0:\n error(caller, 'Value to adjust must be a positive number.')\n return 'node_attributes'\n attribute_value = caller.ndb._menutree.character['stats'][attribute.name]\n if not is_add and attribute_value - value < 10:\n error(caller, attribute.name + ' cannot be reduced below 10.')\n return 'node_attributes'\n i_value = value\n cost = 0\n while i_value > 0:\n if is_add:\n new_value = i_value + attribute_value\n else:\n new_value = attribute_value - i_value\n if new_value <= 12:\n cost += 4\n elif new_value <= 16:\n cost += 2\n elif new_value <= 23:\n cost += 1\n elif new_value <= 26:\n cost += 2\n elif new_value <= 30:\n cost += 4\n i_value -= 1\n if not is_add:\n cost *= -1\n if cost > caller.ndb._menutree.points['attributes']:\n deficit = (caller.ndb._menutree.points['attributes'] - cost) * -1\n error(caller, 'Raising %s' % attribute.name + \n ' costs %s total points,' % cost + \n ' %s more points than you have available.' % deficit)\n return 'node_attributes'\n if is_add:\n caller.ndb._menutree.character['stats'][attribute.name] += value\n else:\n caller.ndb._menutree.character['stats'][attribute.name] -= value\n caller.ndb._menutree.points['attributes'] -= cost\n msg = ('Successfully set %s ' % attribute.name + 'to %s' % caller.ndb.\n _menutree.character['stats'][attribute.name])\n msg += ' for %s points.' % cost\n success(caller, msg)\n return 'node_attributes'\n\n\ndef node_finish(caller):\n text = ''\n options = ()\n return text, options\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef start(caller):\n if not caller:\n return\n caller.ndb._menutree.points = {'attributes': 20, 'skills': 20}\n caller.ndb._menutree.character = {'home_planet': None, 'full_name':\n None, 'origin': None, 'stats': {}, 'age': 16, 'is_psionic': False,\n 'current_term': 0, 'species': 'human'}\n caller.ndb._menutree.terms = []\n for attribute in AttributeEnum:\n caller.ndb._menutree.character['stats'][attribute.name] = 20\n text = \"\"\"\n Welcome to Singularity's Character Generator!\n \n Have a paragraph about WTF is going on and some info about our game. Also here are some warnings\n that you *definitely* shouldn't make multiple characters. And also here's some commands to\n help get you more info! TBD!!!\n \n |yPlease do not make multiple characters to game chargen.|n\n \n When you're ready, go ahead and like.. type |ybegin|n to start CharGen.\n \"\"\"\n return text, {'key': 'begin', 'goto': 'node_menu'}\n\n\n<mask token>\n\n\ndef node_basics(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name:\n name = 'Not Set'\n species = character['species']\n origin = character['origin']\n if not origin:\n origin = 'Not Set'\n age = character['age']\n text = (\n \"\"\"\n |wFull Name:|n %s\n |wAdolescent Age:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Type |yhelp <command>|n to get info on available choices.\n \"\"\"\n % (name, age, species, origin))\n options = {'key': 'return', 'goto': 'node_menu'}, {'key': 'full_name',\n 'goto': _node_basics_full_name}, {'key': 'age', 'goto':\n _node_basics_age}, {'key': 'species', 'goto': _node_basics_species}, {\n 'key': 'origin', 'goto': _node_basics_origin}\n return text, options\n\n\ndef _node_basics_full_name(caller):\n\n def callback(caller, prompt, user_input):\n caller.msg(\"You set your character's full name to: %s.\" % user_input)\n caller.ndb._menutree.character['full_name'] = user_input\n get_input(caller, \">> Enter your character's full name.\", callback)\n\n\ndef _node_basics_age(caller):\n\n def callback(caller, prompt, user_input):\n species = next(s for s in CHARGEN['species'] if s['key'] == caller.\n ndb._menutree.character['species'])\n if not user_input.is_integer() or int(user_input) < species[\n 'min_start_age'] or int(user_input) > species['max_start_age']:\n caller.msg('Age must be a valid number between %s and %s.' % (\n species['min_start_age'], species['max_start_age']))\n return\n caller.msg(\"You set your character's age to: %s.\" % user_input)\n caller.ndb._menutree.character['age'] = int(user_input)\n get_input(caller, \">> Enter your character's age.\", callback)\n\n\n<mask token>\n\n\ndef _is_attributes_done(caller):\n if caller.ndb._menutree.points['attributes'] != 0:\n return False, 'All attribute points must be allocated.'\n return True, ''\n\n\ndef _is_basics_done(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name or len(name) < 3:\n return (False,\n 'Full name must have a value and be longer than 3 characters.')\n origin = character['origin']\n if not origin:\n return False, 'Must select an origin.'\n species_stats = next(s for s in CHARGEN['species'] if s['key'] ==\n character['species'])\n age = character['age']\n if age < species_stats['min_start_age']:\n return False, 'Age must be equal to or more than %s.' % species_stats[\n 'min_start_age']\n if age > species_stats['max_start_age']:\n return False, 'Age must be equal to or less than %s.' % species_stats[\n 'max_start_age']\n return True, ''\n\n\ndef _is_skills_done(caller):\n return False, ''\n\n\ndef _is_life_done(caller):\n return False, ''\n\n\n<mask token>\n\n\ndef node_attributes(caller):\n text = ''\n for attribute in AttributeEnum:\n if (attribute == AttributeEnum.Psi and not caller.ndb._menutree.\n character['is_psionic']):\n continue\n text += '%s: ' % attribute.name\n text += '%s\\r\\n' % caller.ndb._menutree.character['stats'][attribute\n .name]\n text += '\\r\\n%s points remaining.\\r\\n' % caller.ndb._menutree.points[\n 'attributes']\n text += (\n '\\r\\nType \"|yadd <number> to <attribute>|n\" to adjust an attribute positively.'\n )\n text += (\n '\\r\\nType \"|ysub <number> from <attribute>|n\" to adjust an attribute negatively.'\n )\n options = {'key': '_default', 'goto': _node_attributes}, {'key':\n 'return', 'goto': 'node_menu'}\n return text, options\n\n\n<mask token>\n\n\ndef node_terms(caller):\n text = ''\n term_count = 1\n for term in caller.ndb._menutree.terms:\n text += '\\r\\n* Term %s:' % term_count + ' %s' % term.title\n term_count += 1\n age = caller.ndb._menutree.character['age'\n ] + 4 * caller.ndb._menutree.character['current_term']\n text += '\\r\\nCurrent Character Age: %s' % age\n text += '\\r\\n\\r\\nType \"|ychoose <term>|n\" to begin a term.'\n options = {'key': '_default', 'goto': _node_terms}, {'key':\n 'list choices', 'goto': _list_term_choices}, {'key': 'finish',\n 'goto': 'node_finish'}\n return text, options\n\n\ndef _node_terms(caller, raw_string):\n match = re.match('choose (\\\\w+)', raw_string)\n if not match:\n error(caller, \"I didn't understand that.\")\n return 'node_terms'\n term_token = match.group(1).lower()\n term = next((x for x in TERMS if x['title'].lower().startswith(\n term_token)), None)\n if not term:\n error(caller,\n '%s is not a valid term. Type \"|ylist choices|n\" to get a list of all available careers.'\n )\n return 'node_terms'\n caller.ndb._menutree.terms.append({'term': term['title']})\n return 'node_term'\n\n\n<mask token>\n\n\ndef node_term(caller):\n term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1\n ]['term']\n text = 'Career: %s' % term_title\n text += '\\r\\nAssignment: Not Set'\n text += '\\r\\nPersonal Advancement: Not Set'\n text += '\\r\\nYears: %s' % caller.ndb._menutree.character['age']\n text += '-%s' % (caller.ndb._menutree.character['age'] + 4)\n text += '\\r\\n\\r\\nLife Event: |y1 Available|n'\n text += (\n '\\r\\n\\r\\nType \"|yset Assignment to <assignment>|n\" to choose an assignment.'\n )\n text += (\n '\\r\\nType \"|yset Advancement to <option>|n\" to choose a personal advancement.'\n )\n text += (\n '\\r\\n\\r\\nRolling for a life event is optional and may yield positive or negative results. '\n )\n text += (\n \"Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan.\"\n )\n options = {'key': 'show assignments', 'goto': _list_term_assignments}, {\n 'key': 'show advancements', 'goto': _list_term_advancements}, {'key':\n 'roll life event', 'goto': _do_life_event}\n return text, options\n\n\ndef _list_term_advancements(caller):\n return 'node_term'\n\n\n<mask token>\n\n\ndef adjust_attribute(caller, match, is_add):\n attribute_token = match.group(2).lower()\n attribute = next((x for x in AttributeEnum if x.name.lower().startswith\n (attribute_token)), None)\n if not attribute:\n error(caller, '%s is not a valid attribute.' % match.group(2))\n return 'node_attributes'\n value = int(match.group(1))\n if not value or value < 0:\n error(caller, 'Value to adjust must be a positive number.')\n return 'node_attributes'\n attribute_value = caller.ndb._menutree.character['stats'][attribute.name]\n if not is_add and attribute_value - value < 10:\n error(caller, attribute.name + ' cannot be reduced below 10.')\n return 'node_attributes'\n i_value = value\n cost = 0\n while i_value > 0:\n if is_add:\n new_value = i_value + attribute_value\n else:\n new_value = attribute_value - i_value\n if new_value <= 12:\n cost += 4\n elif new_value <= 16:\n cost += 2\n elif new_value <= 23:\n cost += 1\n elif new_value <= 26:\n cost += 2\n elif new_value <= 30:\n cost += 4\n i_value -= 1\n if not is_add:\n cost *= -1\n if cost > caller.ndb._menutree.points['attributes']:\n deficit = (caller.ndb._menutree.points['attributes'] - cost) * -1\n error(caller, 'Raising %s' % attribute.name + \n ' costs %s total points,' % cost + \n ' %s more points than you have available.' % deficit)\n return 'node_attributes'\n if is_add:\n caller.ndb._menutree.character['stats'][attribute.name] += value\n else:\n caller.ndb._menutree.character['stats'][attribute.name] -= value\n caller.ndb._menutree.points['attributes'] -= cost\n msg = ('Successfully set %s ' % attribute.name + 'to %s' % caller.ndb.\n _menutree.character['stats'][attribute.name])\n msg += ' for %s points.' % cost\n success(caller, msg)\n return 'node_attributes'\n\n\ndef node_finish(caller):\n text = ''\n options = ()\n return text, options\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef start(caller):\n if not caller:\n return\n caller.ndb._menutree.points = {'attributes': 20, 'skills': 20}\n caller.ndb._menutree.character = {'home_planet': None, 'full_name':\n None, 'origin': None, 'stats': {}, 'age': 16, 'is_psionic': False,\n 'current_term': 0, 'species': 'human'}\n caller.ndb._menutree.terms = []\n for attribute in AttributeEnum:\n caller.ndb._menutree.character['stats'][attribute.name] = 20\n text = \"\"\"\n Welcome to Singularity's Character Generator!\n \n Have a paragraph about WTF is going on and some info about our game. Also here are some warnings\n that you *definitely* shouldn't make multiple characters. And also here's some commands to\n help get you more info! TBD!!!\n \n |yPlease do not make multiple characters to game chargen.|n\n \n When you're ready, go ahead and like.. type |ybegin|n to start CharGen.\n \"\"\"\n return text, {'key': 'begin', 'goto': 'node_menu'}\n\n\ndef node_menu(caller):\n name = caller.ndb._menutree.character['full_name']\n if not name:\n name = 'Not Set'\n species = caller.ndb._menutree.character['species']\n origin = caller.ndb._menutree.character['origin']\n if not origin:\n origin = 'Not Set'\n d_b = '|gOk|n' if _is_basics_done(caller)[0] else '|rNo|n'\n d_a = '|gOk|n' if _is_attributes_done(caller)[0] else '|rNo|n'\n d_s = '|gOk|n' if _is_skills_done(caller)[0] else '|rNo|n'\n d_l = '|gOk|n' if _is_life_done(caller)[0] else '|rNo|n'\n text = (\n \"\"\"\n Below are the general details of your character. Use the below commands\n to navigate through chargen steps. Some steps may appear after others are completed.\n \n |wFull Name:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Completed:\n |wBasics:|n %s\n |wAttributes:|n %s\n |wStarting Skills:|n %s\n |wLife path:|n %s \n \"\"\"\n % (name, species, origin, d_b, d_a, d_s, d_l))\n options = {'key': 'basics', 'goto': 'node_basics'}, {'key':\n 'attributes', 'goto': 'node_attributes'}, {'key': 'skills', 'goto':\n 'node_skills'}\n if _is_basics_done(caller)[0] and _is_attributes_done(caller)[0\n ] and _is_skills_done(caller)[0]:\n options.append({'key': 'life path', 'goto': 'node_terms'})\n if _is_life_done(caller)[0]:\n options.append({'key': 'finish', 'goto': 'node_finish'})\n return text, options\n\n\ndef node_basics(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name:\n name = 'Not Set'\n species = character['species']\n origin = character['origin']\n if not origin:\n origin = 'Not Set'\n age = character['age']\n text = (\n \"\"\"\n |wFull Name:|n %s\n |wAdolescent Age:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Type |yhelp <command>|n to get info on available choices.\n \"\"\"\n % (name, age, species, origin))\n options = {'key': 'return', 'goto': 'node_menu'}, {'key': 'full_name',\n 'goto': _node_basics_full_name}, {'key': 'age', 'goto':\n _node_basics_age}, {'key': 'species', 'goto': _node_basics_species}, {\n 'key': 'origin', 'goto': _node_basics_origin}\n return text, options\n\n\ndef _node_basics_full_name(caller):\n\n def callback(caller, prompt, user_input):\n caller.msg(\"You set your character's full name to: %s.\" % user_input)\n caller.ndb._menutree.character['full_name'] = user_input\n get_input(caller, \">> Enter your character's full name.\", callback)\n\n\ndef _node_basics_age(caller):\n\n def callback(caller, prompt, user_input):\n species = next(s for s in CHARGEN['species'] if s['key'] == caller.\n ndb._menutree.character['species'])\n if not user_input.is_integer() or int(user_input) < species[\n 'min_start_age'] or int(user_input) > species['max_start_age']:\n caller.msg('Age must be a valid number between %s and %s.' % (\n species['min_start_age'], species['max_start_age']))\n return\n caller.msg(\"You set your character's age to: %s.\" % user_input)\n caller.ndb._menutree.character['age'] = int(user_input)\n get_input(caller, \">> Enter your character's age.\", callback)\n\n\ndef _node_basics_species(caller):\n\n def callback(caller, prompt, user_input):\n character = caller.ndb._menutree.character\n species = next((s for s in SPECIES if s['title'].lower().startswith\n (user_input.lower())), None)\n if not species:\n caller.msg(\n \"'%s' is not a valid species. Valid species: |wHuman|n, and |wAndroid.|n\"\n % user_input)\n return\n species_chargen = next(s for s in CHARGEN['species'] if s['key'] ==\n species['key'])\n caller.msg(\"You set your character's species to: %s.\" % species[\n 'title'])\n character['age'] = species_chargen['min_age']\n character['origin'] = None\n character['species'] = species['key']\n get_input(caller, \">> Enter your character's species.\", callback)\n\n\ndef _node_basics_origin(caller):\n\n def callback(caller, prompt, user_input):\n character = caller.ndb._menutree.character\n origins = filter(lambda o: character['species'] in o[\n 'species_restrictions'], CHARGEN['origins'])\n origin = next((o for o in origins if o['title'].lower().startswith(\n user_input.lower())), None)\n if not origin:\n caller.msg(\n \"'%s' is not a valid origin choice. Valid choices: %s\" % (\n user_input, list_to_string(map(lambda o: o['title'], origins)))\n )\n return\n caller.msg(\"You set your character's origin to: %s.\" % user_input)\n character['origin'] = origin['key']\n get_input(caller, \">> Enter your character's origin.\", callback)\n\n\ndef _is_attributes_done(caller):\n if caller.ndb._menutree.points['attributes'] != 0:\n return False, 'All attribute points must be allocated.'\n return True, ''\n\n\ndef _is_basics_done(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name or len(name) < 3:\n return (False,\n 'Full name must have a value and be longer than 3 characters.')\n origin = character['origin']\n if not origin:\n return False, 'Must select an origin.'\n species_stats = next(s for s in CHARGEN['species'] if s['key'] ==\n character['species'])\n age = character['age']\n if age < species_stats['min_start_age']:\n return False, 'Age must be equal to or more than %s.' % species_stats[\n 'min_start_age']\n if age > species_stats['max_start_age']:\n return False, 'Age must be equal to or less than %s.' % species_stats[\n 'max_start_age']\n return True, ''\n\n\ndef _is_skills_done(caller):\n return False, ''\n\n\ndef _is_life_done(caller):\n return False, ''\n\n\n<mask token>\n\n\ndef node_attributes(caller):\n text = ''\n for attribute in AttributeEnum:\n if (attribute == AttributeEnum.Psi and not caller.ndb._menutree.\n character['is_psionic']):\n continue\n text += '%s: ' % attribute.name\n text += '%s\\r\\n' % caller.ndb._menutree.character['stats'][attribute\n .name]\n text += '\\r\\n%s points remaining.\\r\\n' % caller.ndb._menutree.points[\n 'attributes']\n text += (\n '\\r\\nType \"|yadd <number> to <attribute>|n\" to adjust an attribute positively.'\n )\n text += (\n '\\r\\nType \"|ysub <number> from <attribute>|n\" to adjust an attribute negatively.'\n )\n options = {'key': '_default', 'goto': _node_attributes}, {'key':\n 'return', 'goto': 'node_menu'}\n return text, options\n\n\n<mask token>\n\n\ndef node_terms(caller):\n text = ''\n term_count = 1\n for term in caller.ndb._menutree.terms:\n text += '\\r\\n* Term %s:' % term_count + ' %s' % term.title\n term_count += 1\n age = caller.ndb._menutree.character['age'\n ] + 4 * caller.ndb._menutree.character['current_term']\n text += '\\r\\nCurrent Character Age: %s' % age\n text += '\\r\\n\\r\\nType \"|ychoose <term>|n\" to begin a term.'\n options = {'key': '_default', 'goto': _node_terms}, {'key':\n 'list choices', 'goto': _list_term_choices}, {'key': 'finish',\n 'goto': 'node_finish'}\n return text, options\n\n\ndef _node_terms(caller, raw_string):\n match = re.match('choose (\\\\w+)', raw_string)\n if not match:\n error(caller, \"I didn't understand that.\")\n return 'node_terms'\n term_token = match.group(1).lower()\n term = next((x for x in TERMS if x['title'].lower().startswith(\n term_token)), None)\n if not term:\n error(caller,\n '%s is not a valid term. Type \"|ylist choices|n\" to get a list of all available careers.'\n )\n return 'node_terms'\n caller.ndb._menutree.terms.append({'term': term['title']})\n return 'node_term'\n\n\n<mask token>\n\n\ndef node_term(caller):\n term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1\n ]['term']\n text = 'Career: %s' % term_title\n text += '\\r\\nAssignment: Not Set'\n text += '\\r\\nPersonal Advancement: Not Set'\n text += '\\r\\nYears: %s' % caller.ndb._menutree.character['age']\n text += '-%s' % (caller.ndb._menutree.character['age'] + 4)\n text += '\\r\\n\\r\\nLife Event: |y1 Available|n'\n text += (\n '\\r\\n\\r\\nType \"|yset Assignment to <assignment>|n\" to choose an assignment.'\n )\n text += (\n '\\r\\nType \"|yset Advancement to <option>|n\" to choose a personal advancement.'\n )\n text += (\n '\\r\\n\\r\\nRolling for a life event is optional and may yield positive or negative results. '\n )\n text += (\n \"Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan.\"\n )\n options = {'key': 'show assignments', 'goto': _list_term_assignments}, {\n 'key': 'show advancements', 'goto': _list_term_advancements}, {'key':\n 'roll life event', 'goto': _do_life_event}\n return text, options\n\n\ndef _list_term_advancements(caller):\n return 'node_term'\n\n\n<mask token>\n\n\ndef adjust_attribute(caller, match, is_add):\n attribute_token = match.group(2).lower()\n attribute = next((x for x in AttributeEnum if x.name.lower().startswith\n (attribute_token)), None)\n if not attribute:\n error(caller, '%s is not a valid attribute.' % match.group(2))\n return 'node_attributes'\n value = int(match.group(1))\n if not value or value < 0:\n error(caller, 'Value to adjust must be a positive number.')\n return 'node_attributes'\n attribute_value = caller.ndb._menutree.character['stats'][attribute.name]\n if not is_add and attribute_value - value < 10:\n error(caller, attribute.name + ' cannot be reduced below 10.')\n return 'node_attributes'\n i_value = value\n cost = 0\n while i_value > 0:\n if is_add:\n new_value = i_value + attribute_value\n else:\n new_value = attribute_value - i_value\n if new_value <= 12:\n cost += 4\n elif new_value <= 16:\n cost += 2\n elif new_value <= 23:\n cost += 1\n elif new_value <= 26:\n cost += 2\n elif new_value <= 30:\n cost += 4\n i_value -= 1\n if not is_add:\n cost *= -1\n if cost > caller.ndb._menutree.points['attributes']:\n deficit = (caller.ndb._menutree.points['attributes'] - cost) * -1\n error(caller, 'Raising %s' % attribute.name + \n ' costs %s total points,' % cost + \n ' %s more points than you have available.' % deficit)\n return 'node_attributes'\n if is_add:\n caller.ndb._menutree.character['stats'][attribute.name] += value\n else:\n caller.ndb._menutree.character['stats'][attribute.name] -= value\n caller.ndb._menutree.points['attributes'] -= cost\n msg = ('Successfully set %s ' % attribute.name + 'to %s' % caller.ndb.\n _menutree.character['stats'][attribute.name])\n msg += ' for %s points.' % cost\n success(caller, msg)\n return 'node_attributes'\n\n\ndef node_finish(caller):\n text = ''\n options = ()\n return text, options\n\n\n<mask token>\n\n\ndef error(caller, msg):\n caller.msg('|y<|rError|y>|n %s' % msg)\n",
"step-4": "<mask token>\n\n\ndef start(caller):\n if not caller:\n return\n caller.ndb._menutree.points = {'attributes': 20, 'skills': 20}\n caller.ndb._menutree.character = {'home_planet': None, 'full_name':\n None, 'origin': None, 'stats': {}, 'age': 16, 'is_psionic': False,\n 'current_term': 0, 'species': 'human'}\n caller.ndb._menutree.terms = []\n for attribute in AttributeEnum:\n caller.ndb._menutree.character['stats'][attribute.name] = 20\n text = \"\"\"\n Welcome to Singularity's Character Generator!\n \n Have a paragraph about WTF is going on and some info about our game. Also here are some warnings\n that you *definitely* shouldn't make multiple characters. And also here's some commands to\n help get you more info! TBD!!!\n \n |yPlease do not make multiple characters to game chargen.|n\n \n When you're ready, go ahead and like.. type |ybegin|n to start CharGen.\n \"\"\"\n return text, {'key': 'begin', 'goto': 'node_menu'}\n\n\ndef node_menu(caller):\n name = caller.ndb._menutree.character['full_name']\n if not name:\n name = 'Not Set'\n species = caller.ndb._menutree.character['species']\n origin = caller.ndb._menutree.character['origin']\n if not origin:\n origin = 'Not Set'\n d_b = '|gOk|n' if _is_basics_done(caller)[0] else '|rNo|n'\n d_a = '|gOk|n' if _is_attributes_done(caller)[0] else '|rNo|n'\n d_s = '|gOk|n' if _is_skills_done(caller)[0] else '|rNo|n'\n d_l = '|gOk|n' if _is_life_done(caller)[0] else '|rNo|n'\n text = (\n \"\"\"\n Below are the general details of your character. Use the below commands\n to navigate through chargen steps. Some steps may appear after others are completed.\n \n |wFull Name:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Completed:\n |wBasics:|n %s\n |wAttributes:|n %s\n |wStarting Skills:|n %s\n |wLife path:|n %s \n \"\"\"\n % (name, species, origin, d_b, d_a, d_s, d_l))\n options = {'key': 'basics', 'goto': 'node_basics'}, {'key':\n 'attributes', 'goto': 'node_attributes'}, {'key': 'skills', 'goto':\n 'node_skills'}\n if _is_basics_done(caller)[0] and _is_attributes_done(caller)[0\n ] and _is_skills_done(caller)[0]:\n options.append({'key': 'life path', 'goto': 'node_terms'})\n if _is_life_done(caller)[0]:\n options.append({'key': 'finish', 'goto': 'node_finish'})\n return text, options\n\n\ndef node_basics(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name:\n name = 'Not Set'\n species = character['species']\n origin = character['origin']\n if not origin:\n origin = 'Not Set'\n age = character['age']\n text = (\n \"\"\"\n |wFull Name:|n %s\n |wAdolescent Age:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Type |yhelp <command>|n to get info on available choices.\n \"\"\"\n % (name, age, species, origin))\n options = {'key': 'return', 'goto': 'node_menu'}, {'key': 'full_name',\n 'goto': _node_basics_full_name}, {'key': 'age', 'goto':\n _node_basics_age}, {'key': 'species', 'goto': _node_basics_species}, {\n 'key': 'origin', 'goto': _node_basics_origin}\n return text, options\n\n\ndef _node_basics_full_name(caller):\n\n def callback(caller, prompt, user_input):\n caller.msg(\"You set your character's full name to: %s.\" % user_input)\n caller.ndb._menutree.character['full_name'] = user_input\n get_input(caller, \">> Enter your character's full name.\", callback)\n\n\ndef _node_basics_age(caller):\n\n def callback(caller, prompt, user_input):\n species = next(s for s in CHARGEN['species'] if s['key'] == caller.\n ndb._menutree.character['species'])\n if not user_input.is_integer() or int(user_input) < species[\n 'min_start_age'] or int(user_input) > species['max_start_age']:\n caller.msg('Age must be a valid number between %s and %s.' % (\n species['min_start_age'], species['max_start_age']))\n return\n caller.msg(\"You set your character's age to: %s.\" % user_input)\n caller.ndb._menutree.character['age'] = int(user_input)\n get_input(caller, \">> Enter your character's age.\", callback)\n\n\ndef _node_basics_species(caller):\n\n def callback(caller, prompt, user_input):\n character = caller.ndb._menutree.character\n species = next((s for s in SPECIES if s['title'].lower().startswith\n (user_input.lower())), None)\n if not species:\n caller.msg(\n \"'%s' is not a valid species. Valid species: |wHuman|n, and |wAndroid.|n\"\n % user_input)\n return\n species_chargen = next(s for s in CHARGEN['species'] if s['key'] ==\n species['key'])\n caller.msg(\"You set your character's species to: %s.\" % species[\n 'title'])\n character['age'] = species_chargen['min_age']\n character['origin'] = None\n character['species'] = species['key']\n get_input(caller, \">> Enter your character's species.\", callback)\n\n\ndef _node_basics_origin(caller):\n\n def callback(caller, prompt, user_input):\n character = caller.ndb._menutree.character\n origins = filter(lambda o: character['species'] in o[\n 'species_restrictions'], CHARGEN['origins'])\n origin = next((o for o in origins if o['title'].lower().startswith(\n user_input.lower())), None)\n if not origin:\n caller.msg(\n \"'%s' is not a valid origin choice. Valid choices: %s\" % (\n user_input, list_to_string(map(lambda o: o['title'], origins)))\n )\n return\n caller.msg(\"You set your character's origin to: %s.\" % user_input)\n character['origin'] = origin['key']\n get_input(caller, \">> Enter your character's origin.\", callback)\n\n\ndef _is_attributes_done(caller):\n if caller.ndb._menutree.points['attributes'] != 0:\n return False, 'All attribute points must be allocated.'\n return True, ''\n\n\ndef _is_basics_done(caller):\n character = caller.ndb._menutree.character\n name = character['full_name']\n if not name or len(name) < 3:\n return (False,\n 'Full name must have a value and be longer than 3 characters.')\n origin = character['origin']\n if not origin:\n return False, 'Must select an origin.'\n species_stats = next(s for s in CHARGEN['species'] if s['key'] ==\n character['species'])\n age = character['age']\n if age < species_stats['min_start_age']:\n return False, 'Age must be equal to or more than %s.' % species_stats[\n 'min_start_age']\n if age > species_stats['max_start_age']:\n return False, 'Age must be equal to or less than %s.' % species_stats[\n 'max_start_age']\n return True, ''\n\n\ndef _is_skills_done(caller):\n return False, ''\n\n\ndef _is_life_done(caller):\n return False, ''\n\n\n<mask token>\n\n\ndef node_attributes(caller):\n text = ''\n for attribute in AttributeEnum:\n if (attribute == AttributeEnum.Psi and not caller.ndb._menutree.\n character['is_psionic']):\n continue\n text += '%s: ' % attribute.name\n text += '%s\\r\\n' % caller.ndb._menutree.character['stats'][attribute\n .name]\n text += '\\r\\n%s points remaining.\\r\\n' % caller.ndb._menutree.points[\n 'attributes']\n text += (\n '\\r\\nType \"|yadd <number> to <attribute>|n\" to adjust an attribute positively.'\n )\n text += (\n '\\r\\nType \"|ysub <number> from <attribute>|n\" to adjust an attribute negatively.'\n )\n options = {'key': '_default', 'goto': _node_attributes}, {'key':\n 'return', 'goto': 'node_menu'}\n return text, options\n\n\ndef _node_attributes(caller, raw_string):\n match = re.match('add (\\\\d+) to (\\\\w+)', raw_string)\n if match:\n return adjust_attribute(caller, match, True)\n match = re.match('sub (\\\\d+) from (\\\\w+)', raw_string)\n if match:\n return adjust_attribute(caller, match, False)\n if not match:\n return 'node_attributes'\n\n\ndef node_terms(caller):\n text = ''\n term_count = 1\n for term in caller.ndb._menutree.terms:\n text += '\\r\\n* Term %s:' % term_count + ' %s' % term.title\n term_count += 1\n age = caller.ndb._menutree.character['age'\n ] + 4 * caller.ndb._menutree.character['current_term']\n text += '\\r\\nCurrent Character Age: %s' % age\n text += '\\r\\n\\r\\nType \"|ychoose <term>|n\" to begin a term.'\n options = {'key': '_default', 'goto': _node_terms}, {'key':\n 'list choices', 'goto': _list_term_choices}, {'key': 'finish',\n 'goto': 'node_finish'}\n return text, options\n\n\ndef _node_terms(caller, raw_string):\n match = re.match('choose (\\\\w+)', raw_string)\n if not match:\n error(caller, \"I didn't understand that.\")\n return 'node_terms'\n term_token = match.group(1).lower()\n term = next((x for x in TERMS if x['title'].lower().startswith(\n term_token)), None)\n if not term:\n error(caller,\n '%s is not a valid term. Type \"|ylist choices|n\" to get a list of all available careers.'\n )\n return 'node_terms'\n caller.ndb._menutree.terms.append({'term': term['title']})\n return 'node_term'\n\n\n<mask token>\n\n\ndef node_term(caller):\n term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1\n ]['term']\n text = 'Career: %s' % term_title\n text += '\\r\\nAssignment: Not Set'\n text += '\\r\\nPersonal Advancement: Not Set'\n text += '\\r\\nYears: %s' % caller.ndb._menutree.character['age']\n text += '-%s' % (caller.ndb._menutree.character['age'] + 4)\n text += '\\r\\n\\r\\nLife Event: |y1 Available|n'\n text += (\n '\\r\\n\\r\\nType \"|yset Assignment to <assignment>|n\" to choose an assignment.'\n )\n text += (\n '\\r\\nType \"|yset Advancement to <option>|n\" to choose a personal advancement.'\n )\n text += (\n '\\r\\n\\r\\nRolling for a life event is optional and may yield positive or negative results. '\n )\n text += (\n \"Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan.\"\n )\n options = {'key': 'show assignments', 'goto': _list_term_assignments}, {\n 'key': 'show advancements', 'goto': _list_term_advancements}, {'key':\n 'roll life event', 'goto': _do_life_event}\n return text, options\n\n\ndef _list_term_advancements(caller):\n return 'node_term'\n\n\n<mask token>\n\n\ndef _do_life_event(caller):\n return 'node_term'\n\n\ndef adjust_attribute(caller, match, is_add):\n attribute_token = match.group(2).lower()\n attribute = next((x for x in AttributeEnum if x.name.lower().startswith\n (attribute_token)), None)\n if not attribute:\n error(caller, '%s is not a valid attribute.' % match.group(2))\n return 'node_attributes'\n value = int(match.group(1))\n if not value or value < 0:\n error(caller, 'Value to adjust must be a positive number.')\n return 'node_attributes'\n attribute_value = caller.ndb._menutree.character['stats'][attribute.name]\n if not is_add and attribute_value - value < 10:\n error(caller, attribute.name + ' cannot be reduced below 10.')\n return 'node_attributes'\n i_value = value\n cost = 0\n while i_value > 0:\n if is_add:\n new_value = i_value + attribute_value\n else:\n new_value = attribute_value - i_value\n if new_value <= 12:\n cost += 4\n elif new_value <= 16:\n cost += 2\n elif new_value <= 23:\n cost += 1\n elif new_value <= 26:\n cost += 2\n elif new_value <= 30:\n cost += 4\n i_value -= 1\n if not is_add:\n cost *= -1\n if cost > caller.ndb._menutree.points['attributes']:\n deficit = (caller.ndb._menutree.points['attributes'] - cost) * -1\n error(caller, 'Raising %s' % attribute.name + \n ' costs %s total points,' % cost + \n ' %s more points than you have available.' % deficit)\n return 'node_attributes'\n if is_add:\n caller.ndb._menutree.character['stats'][attribute.name] += value\n else:\n caller.ndb._menutree.character['stats'][attribute.name] -= value\n caller.ndb._menutree.points['attributes'] -= cost\n msg = ('Successfully set %s ' % attribute.name + 'to %s' % caller.ndb.\n _menutree.character['stats'][attribute.name])\n msg += ' for %s points.' % cost\n success(caller, msg)\n return 'node_attributes'\n\n\ndef node_finish(caller):\n text = ''\n options = ()\n return text, options\n\n\ndef success(caller, msg):\n caller.msg('|b<|cSystem|b>|n %s' % msg)\n\n\ndef error(caller, msg):\n caller.msg('|y<|rError|y>|n %s' % msg)\n",
"step-5": "from world.enums import *\nfrom world.content.species import SPECIES\nfrom world.content.chargen import *\nfrom evennia.utils.evmenu import get_input\nfrom evennia.utils.utils import list_to_string\nimport re\n\ndef start(caller):\n if not caller:\n return\n caller.ndb._menutree.points = {\n \"attributes\": 20,\n \"skills\": 20\n }\n caller.ndb._menutree.character = {\n \"home_planet\": None,\n \"full_name\": None,\n \"origin\": None,\n \"stats\": {},\n \"age\": 16,\n \"is_psionic\": False,\n \"current_term\": 0,\n \"species\": \"human\"\n }\n caller.ndb._menutree.terms = []\n\n for attribute in AttributeEnum:\n caller.ndb._menutree.character[\"stats\"][attribute.name] = 20\n\n text = \"\"\"\n Welcome to Singularity's Character Generator!\n \n Have a paragraph about WTF is going on and some info about our game. Also here are some warnings\n that you *definitely* shouldn't make multiple characters. And also here's some commands to\n help get you more info! TBD!!!\n \n |yPlease do not make multiple characters to game chargen.|n\n \n When you're ready, go ahead and like.. type |ybegin|n to start CharGen.\n \"\"\"\n\n return text, ({\"key\": \"begin\", \"goto\": \"node_menu\"})\n\n\ndef node_menu(caller):\n name = caller.ndb._menutree.character[\"full_name\"]\n if not name:\n name = \"Not Set\"\n species = caller.ndb._menutree.character[\"species\"]\n origin = caller.ndb._menutree.character[\"origin\"]\n if not origin:\n origin = \"Not Set\"\n\n d_b = \"|gOk|n\" if _is_basics_done(caller)[0] else \"|rNo|n\"\n d_a = \"|gOk|n\" if _is_attributes_done(caller)[0] else \"|rNo|n\"\n d_s = \"|gOk|n\" if _is_skills_done(caller)[0] else \"|rNo|n\"\n d_l = \"|gOk|n\" if _is_life_done(caller)[0] else \"|rNo|n\"\n\n text = \"\"\"\n Below are the general details of your character. Use the below commands\n to navigate through chargen steps. Some steps may appear after others are completed.\n \n |wFull Name:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Completed:\n |wBasics:|n %s\n |wAttributes:|n %s\n |wStarting Skills:|n %s\n |wLife path:|n %s \n \"\"\" % (name, species, origin, d_b, d_a, d_s, d_l)\n\n options = (\n {\"key\": \"basics\", \"goto\": \"node_basics\"},\n {\"key\": \"attributes\", \"goto\": \"node_attributes\"},\n {\"key\": \"skills\", \"goto\": \"node_skills\"}\n )\n\n if _is_basics_done(caller)[0] and _is_attributes_done(caller)[0] and _is_skills_done(caller)[0]:\n options.append({\"key\": \"life path\", \"goto\": \"node_terms\"})\n if _is_life_done(caller)[0]:\n options.append({\"key\": \"finish\", \"goto\": \"node_finish\"})\n\n return text, options\n\n\ndef node_basics(caller):\n character = caller.ndb._menutree.character\n name = character[\"full_name\"]\n if not name:\n name = \"Not Set\"\n species = character[\"species\"]\n origin = character[\"origin\"]\n if not origin:\n origin = \"Not Set\"\n age = character[\"age\"]\n text = \"\"\"\n |wFull Name:|n %s\n |wAdolescent Age:|n %s\n |wSpecies:|n %s\n |wOrigin:|n %s\n \n Type |yhelp <command>|n to get info on available choices.\n \"\"\" % (name, age, species, origin)\n\n options = (\n {\"key\": \"return\", \"goto\": \"node_menu\"},\n {\"key\": \"full_name\", \"goto\": _node_basics_full_name},\n {\"key\": \"age\", \"goto\": _node_basics_age},\n {\"key\": \"species\", \"goto\": _node_basics_species},\n {\"key\": \"origin\", \"goto\": _node_basics_origin}\n )\n\n return text, options\n\n\ndef _node_basics_full_name(caller):\n def callback(caller, prompt, user_input):\n caller.msg(\"You set your character's full name to: %s.\" % user_input)\n caller.ndb._menutree.character[\"full_name\"] = user_input\n get_input(caller, \">> Enter your character's full name.\", callback)\n\n\ndef _node_basics_age(caller):\n def callback(caller, prompt, user_input):\n species = next(s for s in CHARGEN[\"species\"] if s[\"key\"] == caller.ndb._menutree.character[\"species\"])\n if not user_input.is_integer() \\\n or int(user_input) < species[\"min_start_age\"] \\\n or int(user_input) > species[\"max_start_age\"]:\n caller.msg(\"Age must be a valid number between %s and %s.\"\n % (species[\"min_start_age\"], species[\"max_start_age\"]))\n return\n caller.msg(\"You set your character's age to: %s.\" % user_input)\n caller.ndb._menutree.character[\"age\"] = int(user_input)\n get_input(caller, \">> Enter your character's age.\", callback)\n\n\ndef _node_basics_species(caller):\n def callback(caller, prompt, user_input):\n character = caller.ndb._menutree.character\n species = next((s for s in SPECIES if s[\"title\"].lower().startswith(user_input.lower())), None)\n if not species:\n caller.msg(\"'%s' is not a valid species. Valid species: |wHuman|n, and |wAndroid.|n\" % user_input)\n return\n species_chargen = next(s for s in CHARGEN[\"species\"] if s[\"key\"] == species[\"key\"])\n caller.msg(\"You set your character's species to: %s.\" % species[\"title\"])\n character[\"age\"] = species_chargen[\"min_age\"]\n character[\"origin\"] = None\n character[\"species\"] = species[\"key\"]\n get_input(caller, \">> Enter your character's species.\", callback)\n\n\ndef _node_basics_origin(caller):\n def callback(caller, prompt, user_input):\n character = caller.ndb._menutree.character\n origins = filter(lambda o: character[\"species\"] in o[\"species_restrictions\"], CHARGEN[\"origins\"])\n origin = next((o for o in origins if o[\"title\"].lower().startswith(user_input.lower())), None)\n if not origin:\n caller.msg(\"'%s' is not a valid origin choice. Valid choices: %s\"\n % (user_input, list_to_string(map(lambda o: o[\"title\"], origins))))\n return\n caller.msg(\"You set your character's origin to: %s.\" % user_input)\n character[\"origin\"] = origin[\"key\"]\n get_input(caller, \">> Enter your character's origin.\", callback)\n\n\ndef _is_attributes_done(caller):\n if caller.ndb._menutree.points[\"attributes\"] != 0:\n return False, \"All attribute points must be allocated.\"\n return True, \"\"\n\n\ndef _is_basics_done(caller):\n character = caller.ndb._menutree.character\n name = character[\"full_name\"]\n if not name or len(name) < 3:\n return False, \"Full name must have a value and be longer than 3 characters.\"\n origin = character[\"origin\"]\n if not origin:\n return False, \"Must select an origin.\"\n species_stats = next(s for s in CHARGEN[\"species\"] if s[\"key\"] == character[\"species\"])\n age = character[\"age\"]\n if age < species_stats[\"min_start_age\"]:\n return False, \"Age must be equal to or more than %s.\" % species_stats[\"min_start_age\"]\n if age > species_stats[\"max_start_age\"]:\n return False, \"Age must be equal to or less than %s.\" % species_stats[\"max_start_age\"]\n return True, \"\"\n\n\ndef _is_skills_done(caller):\n return False, \"\"\n\n\ndef _is_life_done(caller):\n return False, \"\"\n\n\ndef node_skills(caller):\n text = \"\"\"\n \"\"\"\n\n index = 0\n stats = caller.ndb._menutree.character[\"stats\"]\n for skill in SkillEnum:\n if index % 2 == 0:\n text += \"\\n\"\n\n text += (\"%s:\" % skill.name).ljust(28)\n value = stats.get(skill.name, 0)\n text += str(value).rjust(9)\n if index % 2 == 0:\n text += \" \"\n index += 1\n\n options = (\n {\"key\": \"return\", \"goto\": \"node_menu\"},\n {\"key\": \"set\", \"goto\": \"\"}\n )\n\n return text, options\n\n\ndef node_attributes(caller):\n text = \"\"\n for attribute in AttributeEnum:\n if attribute == AttributeEnum.Psi and not caller.ndb._menutree.character[\"is_psionic\"]:\n continue\n text += \"%s: \" % attribute.name\n text += \"%s\\r\\n\" % caller.ndb._menutree.character[\"stats\"][attribute.name]\n text += \"\\r\\n%s points remaining.\\r\\n\" % caller.ndb._menutree.points[\"attributes\"]\n text += \"\\r\\nType \\\"|yadd <number> to <attribute>|n\\\" to adjust an attribute positively.\"\n text += \"\\r\\nType \\\"|ysub <number> from <attribute>|n\\\" to adjust an attribute negatively.\"\n\n # options = {\"key\": \"_default\", \"goto\": _node_attributes}\n # if caller.ndb._menutree.points[\"attributes\"] == 0:\n options = ({\"key\": \"_default\", \"goto\": _node_attributes},\n {\"key\": \"return\", \"goto\": \"node_menu\"})\n return text, options\n\n\ndef _node_attributes(caller, raw_string):\n match = re.match(r\"add (\\d+) to (\\w+)\", raw_string)\n if match:\n return adjust_attribute(caller, match, True)\n match = re.match(r\"sub (\\d+) from (\\w+)\", raw_string)\n if match:\n return adjust_attribute(caller, match, False)\n\n if not match:\n return \"node_attributes\"\n\n\ndef node_terms(caller):\n text = \"\"\n term_count = 1\n for term in caller.ndb._menutree.terms:\n text += \"\\r\\n* Term %s:\" % term_count + \" %s\" % term.title\n term_count += 1\n\n age = caller.ndb._menutree.character[\"age\"] + (4 * caller.ndb._menutree.character[\"current_term\"])\n text += \"\\r\\nCurrent Character Age: %s\" % age\n text += \"\\r\\n\\r\\nType \\\"|ychoose <term>|n\\\" to begin a term.\"\n\n options = ({\"key\": \"_default\", \"goto\": _node_terms},\n {\"key\": \"list choices\", \"goto\": _list_term_choices},\n {\"key\": \"finish\", \"goto\": \"node_finish\"})\n return text, options\n\n\ndef _node_terms(caller, raw_string):\n match = re.match(r\"choose (\\w+)\", raw_string)\n if not match:\n error(caller, \"I didn't understand that.\")\n return \"node_terms\"\n\n term_token = match.group(1).lower()\n term = next((x for x in TERMS if x[\"title\"].lower().startswith(term_token)), None)\n if not term:\n error(caller, \"%s is not a valid term. Type \\\"|ylist choices|n\\\" to get a list of all available careers.\")\n return \"node_terms\"\n\n caller.ndb._menutree.terms.append({\n \"term\": term[\"title\"]\n })\n return \"node_term\"\n\n\ndef _list_term_choices(caller):\n text = \"\"\n for term in TERMS:\n text += \"\\r\\n* %s\" % term[\"title\"]\n for assignment in term[\"assignments\"]:\n text += \"\\r\\n\\t- %s: \" % assignment[\"title\"]\n text += \"sample description text\"\n\n caller.msg(text)\n return \"node_terms\"\n\n\ndef node_term(caller):\n term_title = caller.ndb._menutree.terms[len(caller.ndb._menutree.terms) - 1][\"term\"]\n # term = next((x for x in TERMS if x[\"title\"] == term_title), None)\n text = \"Career: %s\" % term_title\n text += \"\\r\\nAssignment: Not Set\"\n text += \"\\r\\nPersonal Advancement: Not Set\"\n text += \"\\r\\nYears: %s\" % caller.ndb._menutree.character[\"age\"]\n text += \"-%s\" % (caller.ndb._menutree.character[\"age\"] + 4)\n text += \"\\r\\n\\r\\nLife Event: |y1 Available|n\"\n\n text += \"\\r\\n\\r\\nType \\\"|yset Assignment to <assignment>|n\\\" to choose an assignment.\"\n text += \"\\r\\nType \\\"|yset Advancement to <option>|n\\\" to choose a personal advancement.\"\n text += \"\\r\\n\\r\\nRolling for a life event is optional and may yield positive or negative results. \"\n text += \"Once you've chosen to roll a life event, the result cannot be rerolled or changed except through mulligan.\"\n\n options = ({\"key\": \"show assignments\", \"goto\": _list_term_assignments},\n {\"key\": \"show advancements\", \"goto\": _list_term_advancements},\n {\"key\": \"roll life event\", \"goto\": _do_life_event})\n return text, options\n\n\ndef _list_term_advancements(caller):\n return \"node_term\"\n\n\ndef _list_term_assignments(caller):\n return \"node_term\"\n\n\ndef _do_life_event(caller):\n return \"node_term\"\n\n\ndef adjust_attribute(caller, match, is_add):\n attribute_token = match.group(2).lower()\n attribute = next((x for x in AttributeEnum if x.name.lower().startswith(attribute_token)), None)\n if not attribute:\n error(caller, \"%s is not a valid attribute.\" % match.group(2))\n return \"node_attributes\"\n value = int(match.group(1))\n if not value or value < 0:\n error(caller, \"Value to adjust must be a positive number.\")\n return \"node_attributes\"\n\n attribute_value = caller.ndb._menutree.character[\"stats\"][attribute.name]\n if not is_add and attribute_value - value < 10:\n error(caller, attribute.name + \" cannot be reduced below 10.\")\n return \"node_attributes\"\n\n # calculate cost..\n i_value = value\n cost = 0\n while i_value > 0:\n if is_add:\n new_value = i_value + attribute_value\n else:\n new_value = attribute_value - i_value\n\n if new_value <= 12:\n cost += 4\n elif new_value <= 16:\n cost += 2\n elif new_value <= 23:\n cost += 1\n elif new_value <= 26:\n cost += 2\n elif new_value <= 30:\n cost += 4\n i_value -= 1\n\n if not is_add:\n cost *= -1\n\n if cost > caller.ndb._menutree.points[\"attributes\"]:\n deficit = (caller.ndb._menutree.points[\"attributes\"] - cost) * -1\n error(caller, \"Raising %s\" % attribute.name + \" costs %s total points,\" % cost + \" %s more points than you have available.\" % deficit)\n return \"node_attributes\"\n\n # Succeeded the gauntlet. Change their stat.\n if is_add:\n caller.ndb._menutree.character[\"stats\"][attribute.name] += value\n else:\n caller.ndb._menutree.character[\"stats\"][attribute.name] -= value\n caller.ndb._menutree.points[\"attributes\"] -= cost\n\n msg = \"Successfully set %s \" % attribute.name + \"to %s\" % caller.ndb._menutree.character[\"stats\"][attribute.name]\n msg += \" for %s points.\" % cost\n success(caller, msg)\n return \"node_attributes\"\n\n\ndef node_finish(caller):\n text = \"\"\n options = ()\n\n return text, options\n\n\ndef success(caller, msg):\n caller.msg(\"|b<|cSystem|b>|n %s\" % msg)\n\n\ndef error(caller, msg):\n caller.msg(\"|y<|rError|y>|n %s\" % msg)\n",
"step-ids": [
14,
15,
19,
22,
27
]
}
|
[
14,
15,
19,
22,
27
] |
<|reserved_special_token_0|>
class SerialTester:
def write(self, line):
print(line)
def read(self, num):
return
class Antenna:
azimuth = initial_az
altitude = initial_alt
parked = True
def set_position(self, az, alt):
self.azimuth = az
self.altitude = alt
az_int = round(az)
alt_int = round(alt)
ser.write(':Sz ' + str(az_int) + '*00:00#')
ser.write(':Sa +' + str(alt_int) + '*00:00#')
ser.write(':MS#')
ser.read(64)
def park(self):
if self.parked:
print('Antenna Parked')
else:
print('Parking Antenna')
self.set_position(initial_az, initial_alt)
self.parked = True
def move(self, az, alt):
if self.parked:
self.parked = False
if self.azimuth - az > unwind_threshold:
self.set_position(initial_az, self.altitude)
print('Repositioning to unwrap cable')
time.sleep(sleep_on_unwind)
else:
print('Tracking Mode')
self.set_position(az, alt)
<|reserved_special_token_0|>
def setup_serial(port, baud):
ser = serial.Serial(port, baud)
print('Port used:' + ser.name)
return ser
<|reserved_special_token_0|>
def read_message(port):
while True:
try:
line = port.readline().decode('ascii').replace('\r', '').replace(
'\n', '')
except:
line = ''
if len(line) > 0 and line[0] == '$':
return line
<|reserved_special_token_0|>
def display_stats(orient, position, obs):
try:
print('\n' * 65)
magvar = get_magnetic_var(float(last_lat), float(last_lon))
print(
""" _.:::::._
.:::'_|_':::.
/::' --|-- '::\\
|:" .---"---. ':|
|: ( O R E O ) :|
|:: `-------' ::|
\\:::.......:::/
':::::::::::'
`'""\"'`
"""
)
print('Time: {}\n'.format(ephem.now()))
print('GPS\n===\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=
position.is_fixed(), lat=obs.lat, lon=obs.lon))
print(position.unparsed)
print('Sensor\n===')
print(
'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\n---'
.format(heading=orient.get_heading(), pitch=orient.get_pitch(),
roll=orient.get_roll()))
print(
'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\n'
.format(cal=orient.get_calibration()))
print(
'\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'
.format(magvar=magvar, true_heading=(orient.get_heading() +
magvar + 720) % 360))
print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing
=position.get_bearing(), speed=position.get_speed()))
except:
pass
def get_magnetic_var(lat, lon):
gm = geomag.GeoMag()
magobj = gm.GeoMag(lat, lon)
return magobj.dec
<|reserved_special_token_0|>
class myThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global val
global ii
val = '@'
ii = ''
while True:
ii = input()
if ii == 'q':
break
val = chr(ord(val) + 1)
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SerialTester:
def write(self, line):
print(line)
def read(self, num):
return
class Antenna:
azimuth = initial_az
altitude = initial_alt
parked = True
def set_position(self, az, alt):
self.azimuth = az
self.altitude = alt
az_int = round(az)
alt_int = round(alt)
ser.write(':Sz ' + str(az_int) + '*00:00#')
ser.write(':Sa +' + str(alt_int) + '*00:00#')
ser.write(':MS#')
ser.read(64)
def park(self):
if self.parked:
print('Antenna Parked')
else:
print('Parking Antenna')
self.set_position(initial_az, initial_alt)
self.parked = True
def move(self, az, alt):
if self.parked:
self.parked = False
if self.azimuth - az > unwind_threshold:
self.set_position(initial_az, self.altitude)
print('Repositioning to unwrap cable')
time.sleep(sleep_on_unwind)
else:
print('Tracking Mode')
self.set_position(az, alt)
<|reserved_special_token_0|>
def setup_serial(port, baud):
ser = serial.Serial(port, baud)
print('Port used:' + ser.name)
return ser
def setup_satellite():
icof2 = ephem.readtle('ICO F2',
'1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997'
,
'2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058'
)
return icof2
def to_degrees(radians):
return radians / ephem.degree
<|reserved_special_token_0|>
def read_message(port):
while True:
try:
line = port.readline().decode('ascii').replace('\r', '').replace(
'\n', '')
except:
line = ''
if len(line) > 0 and line[0] == '$':
return line
<|reserved_special_token_0|>
def arduino_tester():
ard = setup_serial(arduino_port, 115200)
icof2 = setup_satellite()
while True:
try:
line = read_nmea(ard)
home = reset()
home, heading = update(nmea.nmea(line))
print(home.lat)
print(home.lon)
print(home.date)
print(heading)
except:
break
def display_stats(orient, position, obs):
try:
print('\n' * 65)
magvar = get_magnetic_var(float(last_lat), float(last_lon))
print(
""" _.:::::._
.:::'_|_':::.
/::' --|-- '::\\
|:" .---"---. ':|
|: ( O R E O ) :|
|:: `-------' ::|
\\:::.......:::/
':::::::::::'
`'""\"'`
"""
)
print('Time: {}\n'.format(ephem.now()))
print('GPS\n===\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=
position.is_fixed(), lat=obs.lat, lon=obs.lon))
print(position.unparsed)
print('Sensor\n===')
print(
'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\n---'
.format(heading=orient.get_heading(), pitch=orient.get_pitch(),
roll=orient.get_roll()))
print(
'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\n'
.format(cal=orient.get_calibration()))
print(
'\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'
.format(magvar=magvar, true_heading=(orient.get_heading() +
magvar + 720) % 360))
print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing
=position.get_bearing(), speed=position.get_speed()))
except:
pass
def get_magnetic_var(lat, lon):
gm = geomag.GeoMag()
magobj = gm.GeoMag(lat, lon)
return magobj.dec
<|reserved_special_token_0|>
class myThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global val
global ii
val = '@'
ii = ''
while True:
ii = input()
if ii == 'q':
break
val = chr(ord(val) + 1)
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SerialTester:
def write(self, line):
print(line)
def read(self, num):
return
class Antenna:
azimuth = initial_az
altitude = initial_alt
parked = True
def set_position(self, az, alt):
self.azimuth = az
self.altitude = alt
az_int = round(az)
alt_int = round(alt)
ser.write(':Sz ' + str(az_int) + '*00:00#')
ser.write(':Sa +' + str(alt_int) + '*00:00#')
ser.write(':MS#')
ser.read(64)
def park(self):
if self.parked:
print('Antenna Parked')
else:
print('Parking Antenna')
self.set_position(initial_az, initial_alt)
self.parked = True
def move(self, az, alt):
if self.parked:
self.parked = False
if self.azimuth - az > unwind_threshold:
self.set_position(initial_az, self.altitude)
print('Repositioning to unwrap cable')
time.sleep(sleep_on_unwind)
else:
print('Tracking Mode')
self.set_position(az, alt)
<|reserved_special_token_0|>
def update_gps(gprmc, obs):
obsc = obs.copy()
try:
if gprmc.is_fixed() and gprmc.checksum():
datetime = gprmc.get_date() + ' ' + gprmc.get_time()
obsc.date = datetime
obsc.lat = str(gprmc.get_lat())
last_lat = str(gprmc.get_lat())
obsc.lon = str(gprmc.get_lon())
last_lon = str(gprmc.get_lon())
return obsc
except:
return obs
def setup_serial(port, baud):
ser = serial.Serial(port, baud)
print('Port used:' + ser.name)
return ser
def setup_satellite():
icof2 = ephem.readtle('ICO F2',
'1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997'
,
'2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058'
)
return icof2
def to_degrees(radians):
return radians / ephem.degree
def get_sat_position(icof2, home):
icof2.compute(home)
icof2_az = to_degrees(icof2.az)
icof2_alt = to_degrees(icof2.alt)
print(
'Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' %
(icof2_az, icof2_alt))
return icof2_az, icof2_alt
def read_message(port):
while True:
try:
line = port.readline().decode('ascii').replace('\r', '').replace(
'\n', '')
except:
line = ''
if len(line) > 0 and line[0] == '$':
return line
def nmea_tester(sentence):
mes = nmea.nmea(sentence)
print('Checksum: ')
print(mes.checksum())
print('Reformatted Date & Time: ')
print(mes.get_date())
print(mes.get_time())
print('Lat, Lon: ')
print(str(mes.get_lat()) + ', ' + str(mes.get_lon()))
print('Heading, MagVar')
print(str(mes.get_magnetic_heading()) + ', ' + str(mes.get_magnetic_var()))
def arduino_tester():
ard = setup_serial(arduino_port, 115200)
icof2 = setup_satellite()
while True:
try:
line = read_nmea(ard)
home = reset()
home, heading = update(nmea.nmea(line))
print(home.lat)
print(home.lon)
print(home.date)
print(heading)
except:
break
def display_stats(orient, position, obs):
try:
print('\n' * 65)
magvar = get_magnetic_var(float(last_lat), float(last_lon))
print(
""" _.:::::._
.:::'_|_':::.
/::' --|-- '::\\
|:" .---"---. ':|
|: ( O R E O ) :|
|:: `-------' ::|
\\:::.......:::/
':::::::::::'
`'""\"'`
"""
)
print('Time: {}\n'.format(ephem.now()))
print('GPS\n===\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=
position.is_fixed(), lat=obs.lat, lon=obs.lon))
print(position.unparsed)
print('Sensor\n===')
print(
'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\n---'
.format(heading=orient.get_heading(), pitch=orient.get_pitch(),
roll=orient.get_roll()))
print(
'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\n'
.format(cal=orient.get_calibration()))
print(
'\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'
.format(magvar=magvar, true_heading=(orient.get_heading() +
magvar + 720) % 360))
print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing
=position.get_bearing(), speed=position.get_speed()))
except:
pass
def get_magnetic_var(lat, lon):
gm = geomag.GeoMag()
magobj = gm.GeoMag(lat, lon)
return magobj.dec
<|reserved_special_token_0|>
class myThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global val
global ii
val = '@'
ii = ''
while True:
ii = input()
if ii == 'q':
break
val = chr(ord(val) + 1)
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import time
import ephem
import serial
import nmea
import orientation
import sys
import threading
from geomag import geomag
initial_az = 180
initial_alt = 90
min_elevation = 10.0
sleep_time = 1.0
unwind_threshold = 180
sleep_on_unwind = 45.0
last_lon = '-88.787'
last_lat = '41.355'
last_heading = 0.0
mount_port = '/dev/ttyUSB0'
arduino_port = '/dev/ttyACM0'
class SerialTester:
def write(self, line):
print(line)
def read(self, num):
return
class Antenna:
azimuth = initial_az
altitude = initial_alt
parked = True
def set_position(self, az, alt):
self.azimuth = az
self.altitude = alt
az_int = round(az)
alt_int = round(alt)
ser.write(':Sz ' + str(az_int) + '*00:00#')
ser.write(':Sa +' + str(alt_int) + '*00:00#')
ser.write(':MS#')
ser.read(64)
def park(self):
if self.parked:
print('Antenna Parked')
else:
print('Parking Antenna')
self.set_position(initial_az, initial_alt)
self.parked = True
def move(self, az, alt):
if self.parked:
self.parked = False
if self.azimuth - az > unwind_threshold:
self.set_position(initial_az, self.altitude)
print('Repositioning to unwrap cable')
time.sleep(sleep_on_unwind)
else:
print('Tracking Mode')
self.set_position(az, alt)
def reset():
obs = ephem.Observer()
obs.date = ephem.now()
obs.lon = last_lon
obs.lat = last_lat
obs.elevation = 0.0
return obs
def update_gps(gprmc, obs):
obsc = obs.copy()
try:
if gprmc.is_fixed() and gprmc.checksum():
datetime = gprmc.get_date() + ' ' + gprmc.get_time()
obsc.date = datetime
obsc.lat = str(gprmc.get_lat())
last_lat = str(gprmc.get_lat())
obsc.lon = str(gprmc.get_lon())
last_lon = str(gprmc.get_lon())
return obsc
except:
return obs
def setup_serial(port, baud):
ser = serial.Serial(port, baud)
print('Port used:' + ser.name)
return ser
def setup_satellite():
icof2 = ephem.readtle('ICO F2',
'1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997'
,
'2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058'
)
return icof2
def to_degrees(radians):
return radians / ephem.degree
def get_sat_position(icof2, home):
icof2.compute(home)
icof2_az = to_degrees(icof2.az)
icof2_alt = to_degrees(icof2.alt)
print(
'Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' %
(icof2_az, icof2_alt))
return icof2_az, icof2_alt
def read_message(port):
while True:
try:
line = port.readline().decode('ascii').replace('\r', '').replace(
'\n', '')
except:
line = ''
if len(line) > 0 and line[0] == '$':
return line
def nmea_tester(sentence):
mes = nmea.nmea(sentence)
print('Checksum: ')
print(mes.checksum())
print('Reformatted Date & Time: ')
print(mes.get_date())
print(mes.get_time())
print('Lat, Lon: ')
print(str(mes.get_lat()) + ', ' + str(mes.get_lon()))
print('Heading, MagVar')
print(str(mes.get_magnetic_heading()) + ', ' + str(mes.get_magnetic_var()))
def arduino_tester():
ard = setup_serial(arduino_port, 115200)
icof2 = setup_satellite()
while True:
try:
line = read_nmea(ard)
home = reset()
home, heading = update(nmea.nmea(line))
print(home.lat)
print(home.lon)
print(home.date)
print(heading)
except:
break
def display_stats(orient, position, obs):
try:
print('\n' * 65)
magvar = get_magnetic_var(float(last_lat), float(last_lon))
print(
""" _.:::::._
.:::'_|_':::.
/::' --|-- '::\\
|:" .---"---. ':|
|: ( O R E O ) :|
|:: `-------' ::|
\\:::.......:::/
':::::::::::'
`'""\"'`
"""
)
print('Time: {}\n'.format(ephem.now()))
print('GPS\n===\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=
position.is_fixed(), lat=obs.lat, lon=obs.lon))
print(position.unparsed)
print('Sensor\n===')
print(
'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\n---'
.format(heading=orient.get_heading(), pitch=orient.get_pitch(),
roll=orient.get_roll()))
print(
'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\n'
.format(cal=orient.get_calibration()))
print(
'\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'
.format(magvar=magvar, true_heading=(orient.get_heading() +
magvar + 720) % 360))
print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing
=position.get_bearing(), speed=position.get_speed()))
except:
pass
def get_magnetic_var(lat, lon):
gm = geomag.GeoMag()
magobj = gm.GeoMag(lat, lon)
return magobj.dec
home = reset()
ard = setup_serial(arduino_port, 115200)
counter = time.time()
f = open('logs/log_' + str(float(ephem.now())) + '.csv', 'w')
f.write('Epoch Time,Speed,Sensor,GPS,Waypoint\n')
orient = orientation.orientation('$IMU,0,0,0,0,0,0,0,0,0')
position = nmea.nmea('$GPRMC,0,V,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')
magvar = get_magnetic_var(float(last_lat), float(last_lon))
class myThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global val
global ii
val = '@'
ii = ''
while True:
ii = input()
if ii == 'q':
break
val = chr(ord(val) + 1)
pass
thread1 = myThread()
thread1.start()
while True:
mes = read_message(ard)
if mes[:2] == '$G':
try:
position = nmea.nmea(mes)
except:
pass
elif mes[:2] == '$I':
try:
orient = orientation.orientation(mes)
except:
pass
home = update_gps(position, home)
home.date = ephem.now()
magvar = get_magnetic_var(float(last_lat), float(last_lon))
display_stats(orient, position, home)
print(val)
if time.time() - counter >= 1.0:
counter = time.time()
try:
f.write(str(ephem.now()) + ',')
f.write(str(position.get_speed()) + ',')
f.write(str(orient.get_heading()) + ',')
f.write(str(position.get_bearing()) + ',')
f.write(val + '\n')
except:
f.write('x\n')
if ii == 'q':
f.close()
break
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import time
import ephem
import serial
import nmea
import orientation
import sys
import threading
from geomag import geomag
#Constants
initial_az = 180
initial_alt = 90
min_elevation = 10.0
sleep_time = 1.0
unwind_threshold = 180
sleep_on_unwind = 45.0
last_lon = '-88.787'
last_lat = '41.355'
last_heading = 0.0
mount_port = '/dev/ttyUSB0'
arduino_port = '/dev/ttyACM0'
class SerialTester:
def write(self,line):
print(line)
def read(self, num):
return
class Antenna:
azimuth = initial_az
altitude = initial_alt
parked = True
def set_position(self, az, alt):
self.azimuth = az
self.altitude = alt
az_int = round(az)
alt_int = round(alt)
ser.write(":Sz " + str(az_int) + "*00:00#")
ser.write(":Sa +" + str(alt_int) + "*00:00#")
ser.write(":MS#")
ser.read(64)
def park(self):
if (self.parked):
print('Antenna Parked')
else:
print('Parking Antenna')
self.set_position(initial_az, initial_alt)
self.parked = True
def move(self, az, alt):
if (self.parked):
self.parked = False
# Unwrap Cable if Azimuth will cross through True North
# In the above case, Set Azimuth to 180 Degrees, then pick up
# normal tracking
# Then sleep 45 seconds to give the positioner time to
# reposition
if ((self.azimuth - az) > unwind_threshold):
self.set_position(initial_az, self.altitude)
print('Repositioning to unwrap cable')
time.sleep(sleep_on_unwind)
else:
print('Tracking Mode')
self.set_position(az, alt)
def reset():
obs = ephem.Observer()
#Set LAT/LON Coordinates to IMSA's location
obs.date = ephem.now()
obs.lon = last_lon
obs.lat = last_lat
obs.elevation = 0.0
return obs
def update_gps(gprmc, obs):
obsc = obs.copy()
try:
if gprmc.is_fixed() and gprmc.checksum():
datetime = gprmc.get_date() + " " + gprmc.get_time()
obsc.date = datetime
obsc.lat = str(gprmc.get_lat())
last_lat = str(gprmc.get_lat())
obsc.lon = str(gprmc.get_lon())
last_lon = str(gprmc.get_lon())
return obsc
except:
return obs
def setup_serial(port, baud):
# Set Serial Port - USB0
ser = serial.Serial(port, baud)
print("Port used:" + ser.name)
return ser
# return SerialTester()
def setup_satellite():
# Read in TLE for target satellite ICO F2
icof2 = ephem.readtle('ICO F2',
'1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997',
'2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058')
return icof2
def to_degrees(radians):
return radians / ephem.degree
def get_sat_position(icof2, home):
icof2.compute(home)
icof2_az = to_degrees(icof2.az)
icof2_alt = to_degrees(icof2.alt)
print('Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' % (icof2_az, icof2_alt))
return icof2_az, icof2_alt
def read_message(port):
while True:
try:
line = port.readline().decode("ascii").replace('\r', '').replace('\n', '')
except:
line = ""
if len(line) > 0 and line[0] == "$":
return line
def nmea_tester(sentence):
mes = nmea.nmea(sentence)
print("Checksum: ")
print(mes.checksum())
print("Reformatted Date & Time: ")
print(mes.get_date())
print(mes.get_time())
print("Lat, Lon: ")
print(str(mes.get_lat()) + ", " + str(mes.get_lon()))
print("Heading, MagVar")
print(str(mes.get_magnetic_heading()) + ", " + str(mes.get_magnetic_var()))
def arduino_tester():
ard = setup_serial(arduino_port, 115200)
icof2 = setup_satellite()
while True:
try:
line = read_nmea(ard)
home = reset()
home, heading = update(nmea.nmea(line))
print(home.lat)
print(home.lon)
print(home.date)
print(heading)
except:
break
def display_stats(orient, position, obs):
try:
print("\n"*65)
magvar = get_magnetic_var(float(last_lat), float(last_lon))
print(''' _.:::::._
.:::'_|_':::.
/::' --|-- '::\\
|:" .---"---. ':|
|: ( O R E O ) :|
|:: `-------' ::|
\:::.......:::/
':::::::::::'
`'"""'`\n\n''')
print("Time: {}\n".format(ephem.now()))
print('GPS\n===\nFix: {fix}, Lat: {lat}, Lon: {lon}'
.format(fix = position.is_fixed(), lat = obs.lat, lon = obs.lon))
print(position.unparsed)
print("Sensor\n===")
print('Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, '\
'Roll: {roll:7.2f}\n---'.format(heading = orient.get_heading(),
pitch = orient.get_pitch(),
roll = orient.get_roll()))
print('CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]},'\
' Acc: {cal[2]}, Mag: {cal[3]}\n'
.format(cal=orient.get_calibration()))
print("\nMagnetic Declination: {magvar:7.2f}, "
"Adjusted Heading: {true_heading:7.2f}"
.format(magvar = magvar,
true_heading= (orient.get_heading() +
magvar+720)%360))
print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'
.format(bearing = position.get_bearing(),
speed = position.get_speed()))
except:
pass
def get_magnetic_var(lat, lon):
gm = geomag.GeoMag()
magobj = gm.GeoMag(lat, lon)
return magobj.dec
home = reset()
ard = setup_serial(arduino_port, 115200)
counter = time.time()
f = open("logs/log_"+str(float(ephem.now()))+".csv", 'w')
f.write("Epoch Time,Speed,Sensor,GPS,Waypoint\n")
orient = orientation.orientation("$IMU,0,0,0,0,0,0,0,0,0")
position = nmea.nmea("$GPRMC,0,V,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0")
magvar = get_magnetic_var(float(last_lat), float(last_lon))
class myThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global val
global ii
val = '@'
ii = ''
while True:
ii = input()
if ii == "q":
break
val = chr(ord(val) + 1)
pass
thread1 = myThread()
thread1.start()
while True:
mes = (read_message(ard))
if mes[:2] == "$G":
try:
position = nmea.nmea(mes)
except:
pass
elif mes[:2] == "$I":
try:
orient = orientation.orientation(mes)
except:
pass
# home.date = "2016-06-28 12:00:00"
# Operate the antenna if the satellite's elevation is greater than 10
# degrees
# If the elevation IS above 10 degrees and the antenna is parked, then
# unlatch the park_latch variable
home = update_gps(position, home)
home.date = ephem.now()
magvar = get_magnetic_var(float(last_lat), float(last_lon))
display_stats(orient, position, home)
print(val)
if time.time() - counter >= 1.0:
counter = time.time()
try:
f.write(str(ephem.now())+",")
f.write(str(position.get_speed())+",")
f.write(str(orient.get_heading())+",")
f.write(str(position.get_bearing())+",")
f.write(val+"\n")
except:
f.write("x\n")
if ii == "q":
f.close()
break
''' icof2_az, icof2_alt = get_sat_position(icof2, home)
if (icof2_alt >= min_elevation):
antenna.set_position(icof2_az - heading, icof2_alt)
else:
antenna.park()'''
|
flexible
|
{
"blob_id": "468b5bd8d7b045ca8dd46c76a1829fc499e16950",
"index": 5756,
"step-1": "<mask token>\n\n\nclass SerialTester:\n\n def write(self, line):\n print(line)\n\n def read(self, num):\n return\n\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(':Sz ' + str(az_int) + '*00:00#')\n ser.write(':Sa +' + str(alt_int) + '*00:00#')\n ser.write(':MS#')\n ser.read(64)\n\n def park(self):\n if self.parked:\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if self.parked:\n self.parked = False\n if self.azimuth - az > unwind_threshold:\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\n\n<mask token>\n\n\ndef setup_serial(port, baud):\n ser = serial.Serial(port, baud)\n print('Port used:' + ser.name)\n return ser\n\n\n<mask token>\n\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode('ascii').replace('\\r', '').replace(\n '\\n', '')\n except:\n line = ''\n if len(line) > 0 and line[0] == '$':\n return line\n\n\n<mask token>\n\n\ndef display_stats(orient, position, obs):\n try:\n print('\\n' * 65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(\n \"\"\" _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\\\:::.......:::/\n ':::::::::::'\n `'\"\"\\\"'`\n\n\"\"\"\n )\n print('Time: {}\\n'.format(ephem.now()))\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=\n position.is_fixed(), lat=obs.lat, lon=obs.lon))\n print(position.unparsed)\n print('Sensor\\n===')\n print(\n 'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\\n---'\n .format(heading=orient.get_heading(), pitch=orient.get_pitch(),\n roll=orient.get_roll()))\n print(\n 'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\n '\\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'\n .format(magvar=magvar, true_heading=(orient.get_heading() +\n magvar + 720) % 360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing\n =position.get_bearing(), speed=position.get_speed()))\n except:\n pass\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\n<mask token>\n\n\nclass myThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == 'q':\n break\n val = chr(ord(val) + 1)\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SerialTester:\n\n def write(self, line):\n print(line)\n\n def read(self, num):\n return\n\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(':Sz ' + str(az_int) + '*00:00#')\n ser.write(':Sa +' + str(alt_int) + '*00:00#')\n ser.write(':MS#')\n ser.read(64)\n\n def park(self):\n if self.parked:\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if self.parked:\n self.parked = False\n if self.azimuth - az > unwind_threshold:\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\n\n<mask token>\n\n\ndef setup_serial(port, baud):\n ser = serial.Serial(port, baud)\n print('Port used:' + ser.name)\n return ser\n\n\ndef setup_satellite():\n icof2 = ephem.readtle('ICO F2',\n '1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997'\n ,\n '2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058'\n )\n return icof2\n\n\ndef to_degrees(radians):\n return radians / ephem.degree\n\n\n<mask token>\n\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode('ascii').replace('\\r', '').replace(\n '\\n', '')\n except:\n line = ''\n if len(line) > 0 and line[0] == '$':\n return line\n\n\n<mask token>\n\n\ndef arduino_tester():\n ard = setup_serial(arduino_port, 115200)\n icof2 = setup_satellite()\n while True:\n try:\n line = read_nmea(ard)\n home = reset()\n home, heading = update(nmea.nmea(line))\n print(home.lat)\n print(home.lon)\n print(home.date)\n print(heading)\n except:\n break\n\n\ndef display_stats(orient, position, obs):\n try:\n print('\\n' * 65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(\n \"\"\" _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\\\:::.......:::/\n ':::::::::::'\n `'\"\"\\\"'`\n\n\"\"\"\n )\n print('Time: {}\\n'.format(ephem.now()))\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=\n position.is_fixed(), lat=obs.lat, lon=obs.lon))\n print(position.unparsed)\n print('Sensor\\n===')\n print(\n 'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\\n---'\n .format(heading=orient.get_heading(), pitch=orient.get_pitch(),\n roll=orient.get_roll()))\n print(\n 'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\n '\\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'\n .format(magvar=magvar, true_heading=(orient.get_heading() +\n magvar + 720) % 360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing\n =position.get_bearing(), speed=position.get_speed()))\n except:\n pass\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\n<mask token>\n\n\nclass myThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == 'q':\n break\n val = chr(ord(val) + 1)\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SerialTester:\n\n def write(self, line):\n print(line)\n\n def read(self, num):\n return\n\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(':Sz ' + str(az_int) + '*00:00#')\n ser.write(':Sa +' + str(alt_int) + '*00:00#')\n ser.write(':MS#')\n ser.read(64)\n\n def park(self):\n if self.parked:\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if self.parked:\n self.parked = False\n if self.azimuth - az > unwind_threshold:\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\n\n<mask token>\n\n\ndef update_gps(gprmc, obs):\n obsc = obs.copy()\n try:\n if gprmc.is_fixed() and gprmc.checksum():\n datetime = gprmc.get_date() + ' ' + gprmc.get_time()\n obsc.date = datetime\n obsc.lat = str(gprmc.get_lat())\n last_lat = str(gprmc.get_lat())\n obsc.lon = str(gprmc.get_lon())\n last_lon = str(gprmc.get_lon())\n return obsc\n except:\n return obs\n\n\ndef setup_serial(port, baud):\n ser = serial.Serial(port, baud)\n print('Port used:' + ser.name)\n return ser\n\n\ndef setup_satellite():\n icof2 = ephem.readtle('ICO F2',\n '1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997'\n ,\n '2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058'\n )\n return icof2\n\n\ndef to_degrees(radians):\n return radians / ephem.degree\n\n\ndef get_sat_position(icof2, home):\n icof2.compute(home)\n icof2_az = to_degrees(icof2.az)\n icof2_alt = to_degrees(icof2.alt)\n print(\n 'Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' %\n (icof2_az, icof2_alt))\n return icof2_az, icof2_alt\n\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode('ascii').replace('\\r', '').replace(\n '\\n', '')\n except:\n line = ''\n if len(line) > 0 and line[0] == '$':\n return line\n\n\ndef nmea_tester(sentence):\n mes = nmea.nmea(sentence)\n print('Checksum: ')\n print(mes.checksum())\n print('Reformatted Date & Time: ')\n print(mes.get_date())\n print(mes.get_time())\n print('Lat, Lon: ')\n print(str(mes.get_lat()) + ', ' + str(mes.get_lon()))\n print('Heading, MagVar')\n print(str(mes.get_magnetic_heading()) + ', ' + str(mes.get_magnetic_var()))\n\n\ndef arduino_tester():\n ard = setup_serial(arduino_port, 115200)\n icof2 = setup_satellite()\n while True:\n try:\n line = read_nmea(ard)\n home = reset()\n home, heading = update(nmea.nmea(line))\n print(home.lat)\n print(home.lon)\n print(home.date)\n print(heading)\n except:\n break\n\n\ndef display_stats(orient, position, obs):\n try:\n print('\\n' * 65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(\n \"\"\" _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\\\:::.......:::/\n ':::::::::::'\n `'\"\"\\\"'`\n\n\"\"\"\n )\n print('Time: {}\\n'.format(ephem.now()))\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=\n position.is_fixed(), lat=obs.lat, lon=obs.lon))\n print(position.unparsed)\n print('Sensor\\n===')\n print(\n 'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\\n---'\n .format(heading=orient.get_heading(), pitch=orient.get_pitch(),\n roll=orient.get_roll()))\n print(\n 'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\n '\\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'\n .format(magvar=magvar, true_heading=(orient.get_heading() +\n magvar + 720) % 360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing\n =position.get_bearing(), speed=position.get_speed()))\n except:\n pass\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\n<mask token>\n\n\nclass myThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == 'q':\n break\n val = chr(ord(val) + 1)\n pass\n\n\n<mask token>\n",
"step-4": "import time\nimport ephem\nimport serial\nimport nmea\nimport orientation\nimport sys\nimport threading\nfrom geomag import geomag\ninitial_az = 180\ninitial_alt = 90\nmin_elevation = 10.0\nsleep_time = 1.0\nunwind_threshold = 180\nsleep_on_unwind = 45.0\nlast_lon = '-88.787'\nlast_lat = '41.355'\nlast_heading = 0.0\nmount_port = '/dev/ttyUSB0'\narduino_port = '/dev/ttyACM0'\n\n\nclass SerialTester:\n\n def write(self, line):\n print(line)\n\n def read(self, num):\n return\n\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(':Sz ' + str(az_int) + '*00:00#')\n ser.write(':Sa +' + str(alt_int) + '*00:00#')\n ser.write(':MS#')\n ser.read(64)\n\n def park(self):\n if self.parked:\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if self.parked:\n self.parked = False\n if self.azimuth - az > unwind_threshold:\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\n\ndef reset():\n obs = ephem.Observer()\n obs.date = ephem.now()\n obs.lon = last_lon\n obs.lat = last_lat\n obs.elevation = 0.0\n return obs\n\n\ndef update_gps(gprmc, obs):\n obsc = obs.copy()\n try:\n if gprmc.is_fixed() and gprmc.checksum():\n datetime = gprmc.get_date() + ' ' + gprmc.get_time()\n obsc.date = datetime\n obsc.lat = str(gprmc.get_lat())\n last_lat = str(gprmc.get_lat())\n obsc.lon = str(gprmc.get_lon())\n last_lon = str(gprmc.get_lon())\n return obsc\n except:\n return obs\n\n\ndef setup_serial(port, baud):\n ser = serial.Serial(port, baud)\n print('Port used:' + ser.name)\n return ser\n\n\ndef setup_satellite():\n icof2 = ephem.readtle('ICO F2',\n '1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997'\n ,\n '2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058'\n )\n return icof2\n\n\ndef to_degrees(radians):\n return radians / ephem.degree\n\n\ndef get_sat_position(icof2, home):\n icof2.compute(home)\n icof2_az = to_degrees(icof2.az)\n icof2_alt = to_degrees(icof2.alt)\n print(\n 'Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' %\n (icof2_az, icof2_alt))\n return icof2_az, icof2_alt\n\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode('ascii').replace('\\r', '').replace(\n '\\n', '')\n except:\n line = ''\n if len(line) > 0 and line[0] == '$':\n return line\n\n\ndef nmea_tester(sentence):\n mes = nmea.nmea(sentence)\n print('Checksum: ')\n print(mes.checksum())\n print('Reformatted Date & Time: ')\n print(mes.get_date())\n print(mes.get_time())\n print('Lat, Lon: ')\n print(str(mes.get_lat()) + ', ' + str(mes.get_lon()))\n print('Heading, MagVar')\n print(str(mes.get_magnetic_heading()) + ', ' + str(mes.get_magnetic_var()))\n\n\ndef arduino_tester():\n ard = setup_serial(arduino_port, 115200)\n icof2 = setup_satellite()\n while True:\n try:\n line = read_nmea(ard)\n home = reset()\n home, heading = update(nmea.nmea(line))\n print(home.lat)\n print(home.lon)\n print(home.date)\n print(heading)\n except:\n break\n\n\ndef display_stats(orient, position, obs):\n try:\n print('\\n' * 65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(\n \"\"\" _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\\\:::.......:::/\n ':::::::::::'\n `'\"\"\\\"'`\n\n\"\"\"\n )\n print('Time: {}\\n'.format(ephem.now()))\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=\n position.is_fixed(), lat=obs.lat, lon=obs.lon))\n print(position.unparsed)\n print('Sensor\\n===')\n print(\n 'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\\n---'\n .format(heading=orient.get_heading(), pitch=orient.get_pitch(),\n roll=orient.get_roll()))\n print(\n 'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\n '\\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'\n .format(magvar=magvar, true_heading=(orient.get_heading() +\n magvar + 720) % 360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing\n =position.get_bearing(), speed=position.get_speed()))\n except:\n pass\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\nhome = reset()\nard = setup_serial(arduino_port, 115200)\ncounter = time.time()\nf = open('logs/log_' + str(float(ephem.now())) + '.csv', 'w')\nf.write('Epoch Time,Speed,Sensor,GPS,Waypoint\\n')\norient = orientation.orientation('$IMU,0,0,0,0,0,0,0,0,0')\nposition = nmea.nmea('$GPRMC,0,V,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')\nmagvar = get_magnetic_var(float(last_lat), float(last_lon))\n\n\nclass myThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == 'q':\n break\n val = chr(ord(val) + 1)\n pass\n\n\nthread1 = myThread()\nthread1.start()\nwhile True:\n mes = read_message(ard)\n if mes[:2] == '$G':\n try:\n position = nmea.nmea(mes)\n except:\n pass\n elif mes[:2] == '$I':\n try:\n orient = orientation.orientation(mes)\n except:\n pass\n home = update_gps(position, home)\n home.date = ephem.now()\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n display_stats(orient, position, home)\n print(val)\n if time.time() - counter >= 1.0:\n counter = time.time()\n try:\n f.write(str(ephem.now()) + ',')\n f.write(str(position.get_speed()) + ',')\n f.write(str(orient.get_heading()) + ',')\n f.write(str(position.get_bearing()) + ',')\n f.write(val + '\\n')\n except:\n f.write('x\\n')\n if ii == 'q':\n f.close()\n break\n<mask token>\n",
"step-5": "import time\nimport ephem\nimport serial\nimport nmea\nimport orientation\nimport sys\nimport threading\nfrom geomag import geomag\n\n#Constants\ninitial_az = 180\ninitial_alt = 90\nmin_elevation = 10.0\nsleep_time = 1.0\nunwind_threshold = 180\nsleep_on_unwind = 45.0\n\nlast_lon = '-88.787'\nlast_lat = '41.355'\nlast_heading = 0.0\n\nmount_port = '/dev/ttyUSB0'\narduino_port = '/dev/ttyACM0'\n\nclass SerialTester:\n def write(self,line):\n print(line)\n\n def read(self, num):\n return\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(\":Sz \" + str(az_int) + \"*00:00#\")\n ser.write(\":Sa +\" + str(alt_int) + \"*00:00#\")\n ser.write(\":MS#\")\n ser.read(64)\n\n def park(self):\n if (self.parked):\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if (self.parked):\n self.parked = False\n # Unwrap Cable if Azimuth will cross through True North\n # In the above case, Set Azimuth to 180 Degrees, then pick up\n # normal tracking\n # Then sleep 45 seconds to give the positioner time to\n # reposition\n if ((self.azimuth - az) > unwind_threshold):\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\ndef reset():\n obs = ephem.Observer()\n #Set LAT/LON Coordinates to IMSA's location\n obs.date = ephem.now()\n obs.lon = last_lon\n obs.lat = last_lat\n obs.elevation = 0.0\n return obs\n\ndef update_gps(gprmc, obs):\n obsc = obs.copy()\n try:\n if gprmc.is_fixed() and gprmc.checksum():\n datetime = gprmc.get_date() + \" \" + gprmc.get_time()\n obsc.date = datetime\n obsc.lat = str(gprmc.get_lat())\n last_lat = str(gprmc.get_lat())\n obsc.lon = str(gprmc.get_lon())\n last_lon = str(gprmc.get_lon())\n return obsc\n except:\n return obs\n\n\ndef setup_serial(port, baud):\n # Set Serial Port - USB0\n ser = serial.Serial(port, baud)\n print(\"Port used:\" + ser.name)\n return ser\n# return SerialTester()\n\ndef setup_satellite():\n # Read in TLE for target satellite ICO F2\n icof2 = ephem.readtle('ICO F2',\n '1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997',\n '2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058')\n return icof2\n\ndef to_degrees(radians):\n return radians / ephem.degree\n\ndef get_sat_position(icof2, home):\n icof2.compute(home)\n icof2_az = to_degrees(icof2.az)\n icof2_alt = to_degrees(icof2.alt)\n print('Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' % (icof2_az, icof2_alt))\n return icof2_az, icof2_alt\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode(\"ascii\").replace('\\r', '').replace('\\n', '')\n except:\n line = \"\"\n if len(line) > 0 and line[0] == \"$\":\n return line\n\ndef nmea_tester(sentence):\n mes = nmea.nmea(sentence)\n print(\"Checksum: \")\n print(mes.checksum())\n print(\"Reformatted Date & Time: \")\n print(mes.get_date())\n print(mes.get_time())\n print(\"Lat, Lon: \")\n print(str(mes.get_lat()) + \", \" + str(mes.get_lon()))\n print(\"Heading, MagVar\")\n print(str(mes.get_magnetic_heading()) + \", \" + str(mes.get_magnetic_var()))\n\n\ndef arduino_tester():\n ard = setup_serial(arduino_port, 115200)\n icof2 = setup_satellite()\n while True:\n try:\n line = read_nmea(ard)\n home = reset()\n home, heading = update(nmea.nmea(line))\n print(home.lat)\n print(home.lon)\n print(home.date)\n print(heading)\n except:\n break\n\ndef display_stats(orient, position, obs):\n try:\n print(\"\\n\"*65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(''' _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\:::.......:::/\n ':::::::::::'\n `'\"\"\"'`\\n\\n''')\n print(\"Time: {}\\n\".format(ephem.now()))\n\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'\n .format(fix = position.is_fixed(), lat = obs.lat, lon = obs.lon))\n print(position.unparsed)\n\n print(\"Sensor\\n===\")\n print('Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, '\\\n 'Roll: {roll:7.2f}\\n---'.format(heading = orient.get_heading(),\n pitch = orient.get_pitch(),\n roll = orient.get_roll()))\n print('CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]},'\\\n ' Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\"\\nMagnetic Declination: {magvar:7.2f}, \"\n \"Adjusted Heading: {true_heading:7.2f}\"\n .format(magvar = magvar,\n true_heading= (orient.get_heading() +\n magvar+720)%360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'\n .format(bearing = position.get_bearing(),\n speed = position.get_speed()))\n except:\n pass\n \n\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\n\nhome = reset()\nard = setup_serial(arduino_port, 115200)\ncounter = time.time()\nf = open(\"logs/log_\"+str(float(ephem.now()))+\".csv\", 'w')\nf.write(\"Epoch Time,Speed,Sensor,GPS,Waypoint\\n\")\norient = orientation.orientation(\"$IMU,0,0,0,0,0,0,0,0,0\")\nposition = nmea.nmea(\"$GPRMC,0,V,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\")\nmagvar = get_magnetic_var(float(last_lat), float(last_lon))\n\nclass myThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == \"q\":\n break\n val = chr(ord(val) + 1)\n pass\n\nthread1 = myThread()\n\nthread1.start()\n\nwhile True:\n mes = (read_message(ard))\n if mes[:2] == \"$G\":\n try:\n position = nmea.nmea(mes)\n except:\n pass\n elif mes[:2] == \"$I\":\n try:\n orient = orientation.orientation(mes)\n except:\n pass\n # home.date = \"2016-06-28 12:00:00\"\n\n # Operate the antenna if the satellite's elevation is greater than 10\n # degrees\n # If the elevation IS above 10 degrees and the antenna is parked, then\n # unlatch the park_latch variable\n home = update_gps(position, home)\n home.date = ephem.now()\n\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n\n display_stats(orient, position, home)\n print(val)\n if time.time() - counter >= 1.0:\n counter = time.time()\n try:\n f.write(str(ephem.now())+\",\")\n f.write(str(position.get_speed())+\",\")\n f.write(str(orient.get_heading())+\",\")\n f.write(str(position.get_bearing())+\",\")\n f.write(val+\"\\n\")\n except:\n f.write(\"x\\n\")\n if ii == \"q\":\n f.close()\n break\n\n''' icof2_az, icof2_alt = get_sat_position(icof2, home)\n if (icof2_alt >= min_elevation):\n antenna.set_position(icof2_az - heading, icof2_alt)\n\n else:\n antenna.park()'''\n",
"step-ids": [
15,
18,
21,
25,
26
]
}
|
[
15,
18,
21,
25,
26
] |
import strawberry as stb
from app.crud import cruduser
from app.db import get_session
@stb.type
class Query:
@stb.field
async def ReadUser(self, info, username: str):
ses = await get_session()
fields = info.field_nodes[0].selection_set.selections[0]
return await cruduser.get_user(ses, username, fields)
|
normal
|
{
"blob_id": "0992297ffc19b1bc4dc3d5e8a75307009c837032",
"index": 5134,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@stb.type\nclass Query:\n\n @stb.field\n async def ReadUser(self, info, username: str):\n ses = await get_session()\n fields = info.field_nodes[0].selection_set.selections[0]\n return await cruduser.get_user(ses, username, fields)\n",
"step-3": "import strawberry as stb\nfrom app.crud import cruduser\nfrom app.db import get_session\n\n\n@stb.type\nclass Query:\n\n @stb.field\n async def ReadUser(self, info, username: str):\n ses = await get_session()\n fields = info.field_nodes[0].selection_set.selections[0]\n return await cruduser.get_user(ses, username, fields)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""
commands/map.py
description:
Generates a blank configuration file in the current directory
"""
from json import dumps
from .base_command import BaseCommand
class Map(BaseCommand):
def run(self):
from lib.models import Mapping
from lib.models import Migration
migration = Migration.load(self.options['MIGRATION_FILE'])
mapping = Mapping(self.options)
migration.mappings.append(mapping)
migration.write()
|
normal
|
{
"blob_id": "07783921da2fb4ae9452324f833b08b3f92ba294",
"index": 546,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Map(BaseCommand):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Map(BaseCommand):\n\n def run(self):\n from lib.models import Mapping\n from lib.models import Migration\n migration = Migration.load(self.options['MIGRATION_FILE'])\n mapping = Mapping(self.options)\n migration.mappings.append(mapping)\n migration.write()\n",
"step-4": "<mask token>\nfrom json import dumps\nfrom .base_command import BaseCommand\n\n\nclass Map(BaseCommand):\n\n def run(self):\n from lib.models import Mapping\n from lib.models import Migration\n migration = Migration.load(self.options['MIGRATION_FILE'])\n mapping = Mapping(self.options)\n migration.mappings.append(mapping)\n migration.write()\n",
"step-5": "\"\"\"\n\ncommands/map.py\n\ndescription:\n\tGenerates a blank configuration file in the current directory\n\n\"\"\"\n\nfrom json import dumps\nfrom .base_command import BaseCommand\n\nclass Map(BaseCommand):\n\tdef run(self):\n\t\tfrom lib.models import Mapping\n\t\tfrom lib.models import Migration\n\n\t\tmigration = Migration.load(self.options['MIGRATION_FILE'])\n\n\t\tmapping = Mapping(self.options)\n\n\t\tmigration.mappings.append(mapping)\n\n\t\tmigration.write()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib.auth import authenticate, login, logout
from django.template import loader
from django.http import (HttpResponse, JsonResponse,
HttpResponseForbidden, HttpResponseBadRequest)
from django.shortcuts import redirect
from django.views.decorators.http import require_POST
import json
from aimodel.AnalyticSession import AnalyticSession
from data.DatasetConfigManager import DatasetConfigManager
def index(request, err_msg=None):
"""
Renders the index page.
"""
template = loader.get_template("aimodel/index.html")
context = {}
context["err_msg"] = err_msg
return HttpResponse(template.render(context, request))
@require_POST
def log_in(request):
"""
Handles login.
"""
# Get the username and password
username = request.POST.get("username")
password = request.POST.get("password")
if not username or not password:
return index(request, "Invalid credentials!")
# Authenticate and log in
user = authenticate(username=username, password=password)
if user:
login(request, user)
return redirect("/main")
else:
return index(request, "Invalid credentials!")
def main(request):
"""
Renders the main page behind login.
"""
if not request.user.is_authenticated:
return redirect("/")
template = loader.get_template("aimodel/main.html")
context = dict()
context["datasets"] = DatasetConfigManager.loaded_datasets_list()
return HttpResponse(template.render(context, request))
@require_POST
def analytics_session(request):
"""
Starts a new analytic session.
"""
if not request.user.is_authenticated:
return redirect("/")
try:
dataset = request.POST["dataset"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
if "analytics" in request.session:
del request.session["analytics"]
request.session["analytics"] = AnalyticSession(dataset)
bucket_info = request.session["analytics"].bucket_info()
template = loader.get_template("ui/analytics.html")
context = dict()
context["init_buckets"] = json.dumps(bucket_info["buckets"])
context["init_bucket_ordering"] =\
json.dumps(bucket_info["bucket_ordering"])
return HttpResponse(template.render(context, request))
def log_out(request):
"""
Logs the user out.
"""
if request.user.is_authenticated:
logout(request)
return redirect("/")
def _check_session_valid(request):
"""
A helper function checking whether the user is logged in and the session
data is present.
"""
if not request.user.is_authenticated:
return HttpResponseForbidden(reason="Access denied!")
if "analytics" not in request.session:
err = "Could not fetch analytic session data."
return HttpResponseBadRequest(reason=err)
return None
def bucket_info(request):
"""
Fetches information about current buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
return JsonResponse(request.session["analytics"].bucket_info())
def create_bucket(request):
"""
Creates a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
try:
request.session["analytics"].create_bucket()
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def delete_bucket(request):
"""
Deletes a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data["bucket_id"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].delete_bucket(bucket_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def rename_bucket(request):
"""
Renames a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data["bucket_id"]
new_bucket_name = request_data["new_bucket_name"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].rename_bucket(bucket_id, new_bucket_name)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def swap_buckets(request):
"""
Swaps the position of two buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket1_id = request_data["bucket1_id"]
bucket2_id = request_data["bucket2_id"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].swap_buckets(bucket1_id, bucket2_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def toggle_bucket(request):
"""
Toggles (activates/deactivates) a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data["bucket_id"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].toggle_bucket(bucket_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def interaction_round(request):
"""
Performs an interaction round, providing new image suggestions.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
user_feedback = json.loads(request.body)
try:
suggs = request.session["analytics"].interaction_round(user_feedback)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(suggs, safe=False)
@require_POST
def bucket_view_data(request):
"""
Obtains bucket view data, i.e., the images in the bucket with bucket
confidences.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data["bucket_id"]
sort_by = request_data["sort_by"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
bucket_view_data =\
request.session["analytics"].bucket_view_data(bucket_id, sort_by)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(bucket_view_data, safe=False)
def toggle_mode(request):
"""
Toggles between Tetris/grid.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request.session["analytics"].toggle_mode()
return JsonResponse({})
@require_POST
def grid_set_size(request):
"""
Resizes the grid.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
dim = request_data["dim"]
new_size = request_data["new_size"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
new_grid_data = request.session["analytics"].grid_set_size(dim,
new_size)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(new_grid_data, safe=False)
@require_POST
def transfer_images(request):
"""
Transfers (moves/copies) images between buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
images = request_data["images"]
bucket_src = request_data["bucket_src"]
bucket_dst = request_data["bucket_dst"]
mode = request_data["mode"]
sort_by = request_data["sort_by"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].transfer_images(images,
bucket_src, bucket_dst,
mode)
bucket_view_data =\
request.session["analytics"].bucket_view_data(bucket_src, sort_by)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(bucket_view_data, safe=False)
@require_POST
def fast_forward(request):
"""
Fast-forwards a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket = request_data["bucket"]
n_ff = request_data["n_ff"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].fast_forward(bucket, n_ff)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def ff_commit(request):
"""
Commits a fast-forward.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
print(request_data)
try:
bucket = request_data["bucket"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].ff_commit(bucket)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
def end_session(request):
"""
Ends an analytic session.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
del request.session["analytics"]
response = {
"redirect_url": "/main"
}
return JsonResponse(response)
|
normal
|
{
"blob_id": "41ca762fe6865613ae4ef2f657f86b516353676f",
"index": 9784,
"step-1": "<mask token>\n\n\ndef index(request, err_msg=None):\n \"\"\"\n Renders the index page.\n \"\"\"\n template = loader.get_template('aimodel/index.html')\n context = {}\n context['err_msg'] = err_msg\n return HttpResponse(template.render(context, request))\n\n\n<mask token>\n\n\n@require_POST\ndef analytics_session(request):\n \"\"\"\n Starts a new analytic session.\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('/')\n try:\n dataset = request.POST['dataset']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n if 'analytics' in request.session:\n del request.session['analytics']\n request.session['analytics'] = AnalyticSession(dataset)\n bucket_info = request.session['analytics'].bucket_info()\n template = loader.get_template('ui/analytics.html')\n context = dict()\n context['init_buckets'] = json.dumps(bucket_info['buckets'])\n context['init_bucket_ordering'] = json.dumps(bucket_info['bucket_ordering']\n )\n return HttpResponse(template.render(context, request))\n\n\ndef log_out(request):\n \"\"\"\n Logs the user out.\n \"\"\"\n if request.user.is_authenticated:\n logout(request)\n return redirect('/')\n\n\ndef _check_session_valid(request):\n \"\"\"\n A helper function checking whether the user is logged in and the session\n data is present.\n \"\"\"\n if not request.user.is_authenticated:\n return HttpResponseForbidden(reason='Access denied!')\n if 'analytics' not in request.session:\n err = 'Could not fetch analytic session data.'\n return HttpResponseBadRequest(reason=err)\n return None\n\n\ndef bucket_info(request):\n \"\"\"\n Fetches information about current buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n return JsonResponse(request.session['analytics'].bucket_info())\n\n\n<mask token>\n\n\n@require_POST\ndef rename_bucket(request):\n \"\"\"\n Renames a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n new_bucket_name = request_data['new_bucket_name']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].rename_bucket(bucket_id, new_bucket_name)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef swap_buckets(request):\n \"\"\"\n Swaps the position of two buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket1_id = request_data['bucket1_id']\n bucket2_id = request_data['bucket2_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].swap_buckets(bucket1_id, bucket2_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef toggle_bucket(request):\n \"\"\"\n Toggles (activates/deactivates) a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].toggle_bucket(bucket_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n<mask token>\n\n\ndef toggle_mode(request):\n \"\"\"\n Toggles between Tetris/grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request.session['analytics'].toggle_mode()\n return JsonResponse({})\n\n\n@require_POST\ndef grid_set_size(request):\n \"\"\"\n Resizes the grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n dim = request_data['dim']\n new_size = request_data['new_size']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n new_grid_data = request.session['analytics'].grid_set_size(dim,\n new_size)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(new_grid_data, safe=False)\n\n\n@require_POST\ndef transfer_images(request):\n \"\"\"\n Transfers (moves/copies) images between buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n images = request_data['images']\n bucket_src = request_data['bucket_src']\n bucket_dst = request_data['bucket_dst']\n mode = request_data['mode']\n sort_by = request_data['sort_by']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].transfer_images(images, bucket_src,\n bucket_dst, mode)\n bucket_view_data = request.session['analytics'].bucket_view_data(\n bucket_src, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(bucket_view_data, safe=False)\n\n\n<mask token>\n\n\n@require_POST\ndef ff_commit(request):\n \"\"\"\n Commits a fast-forward.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n print(request_data)\n try:\n bucket = request_data['bucket']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].ff_commit(bucket)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\ndef end_session(request):\n \"\"\"\n Ends an analytic session.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n del request.session['analytics']\n response = {'redirect_url': '/main'}\n return JsonResponse(response)\n",
"step-2": "<mask token>\n\n\ndef index(request, err_msg=None):\n \"\"\"\n Renders the index page.\n \"\"\"\n template = loader.get_template('aimodel/index.html')\n context = {}\n context['err_msg'] = err_msg\n return HttpResponse(template.render(context, request))\n\n\n<mask token>\n\n\ndef main(request):\n \"\"\"\n Renders the main page behind login.\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('/')\n template = loader.get_template('aimodel/main.html')\n context = dict()\n context['datasets'] = DatasetConfigManager.loaded_datasets_list()\n return HttpResponse(template.render(context, request))\n\n\n@require_POST\ndef analytics_session(request):\n \"\"\"\n Starts a new analytic session.\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('/')\n try:\n dataset = request.POST['dataset']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n if 'analytics' in request.session:\n del request.session['analytics']\n request.session['analytics'] = AnalyticSession(dataset)\n bucket_info = request.session['analytics'].bucket_info()\n template = loader.get_template('ui/analytics.html')\n context = dict()\n context['init_buckets'] = json.dumps(bucket_info['buckets'])\n context['init_bucket_ordering'] = json.dumps(bucket_info['bucket_ordering']\n )\n return HttpResponse(template.render(context, request))\n\n\ndef log_out(request):\n \"\"\"\n Logs the user out.\n \"\"\"\n if request.user.is_authenticated:\n logout(request)\n return redirect('/')\n\n\ndef _check_session_valid(request):\n \"\"\"\n A helper function checking whether the user is logged in and the session\n data is present.\n \"\"\"\n if not request.user.is_authenticated:\n return HttpResponseForbidden(reason='Access denied!')\n if 'analytics' not in request.session:\n err = 'Could not fetch analytic session data.'\n return HttpResponseBadRequest(reason=err)\n return None\n\n\ndef bucket_info(request):\n \"\"\"\n Fetches information about current buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n return JsonResponse(request.session['analytics'].bucket_info())\n\n\n<mask token>\n\n\n@require_POST\ndef rename_bucket(request):\n \"\"\"\n Renames a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n new_bucket_name = request_data['new_bucket_name']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].rename_bucket(bucket_id, new_bucket_name)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef swap_buckets(request):\n \"\"\"\n Swaps the position of two buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket1_id = request_data['bucket1_id']\n bucket2_id = request_data['bucket2_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].swap_buckets(bucket1_id, bucket2_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef toggle_bucket(request):\n \"\"\"\n Toggles (activates/deactivates) a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].toggle_bucket(bucket_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef interaction_round(request):\n \"\"\"\n Performs an interaction round, providing new image suggestions.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n user_feedback = json.loads(request.body)\n try:\n suggs = request.session['analytics'].interaction_round(user_feedback)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(suggs, safe=False)\n\n\n@require_POST\ndef bucket_view_data(request):\n \"\"\"\n Obtains bucket view data, i.e., the images in the bucket with bucket\n confidences.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n sort_by = request_data['sort_by']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n bucket_view_data = request.session['analytics'].bucket_view_data(\n bucket_id, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(bucket_view_data, safe=False)\n\n\ndef toggle_mode(request):\n \"\"\"\n Toggles between Tetris/grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request.session['analytics'].toggle_mode()\n return JsonResponse({})\n\n\n@require_POST\ndef grid_set_size(request):\n \"\"\"\n Resizes the grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n dim = request_data['dim']\n new_size = request_data['new_size']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n new_grid_data = request.session['analytics'].grid_set_size(dim,\n new_size)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(new_grid_data, safe=False)\n\n\n@require_POST\ndef transfer_images(request):\n \"\"\"\n Transfers (moves/copies) images between buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n images = request_data['images']\n bucket_src = request_data['bucket_src']\n bucket_dst = request_data['bucket_dst']\n mode = request_data['mode']\n sort_by = request_data['sort_by']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].transfer_images(images, bucket_src,\n bucket_dst, mode)\n bucket_view_data = request.session['analytics'].bucket_view_data(\n bucket_src, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(bucket_view_data, safe=False)\n\n\n<mask token>\n\n\n@require_POST\ndef ff_commit(request):\n \"\"\"\n Commits a fast-forward.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n print(request_data)\n try:\n bucket = request_data['bucket']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].ff_commit(bucket)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\ndef end_session(request):\n \"\"\"\n Ends an analytic session.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n del request.session['analytics']\n response = {'redirect_url': '/main'}\n return JsonResponse(response)\n",
"step-3": "<mask token>\n\n\ndef index(request, err_msg=None):\n \"\"\"\n Renders the index page.\n \"\"\"\n template = loader.get_template('aimodel/index.html')\n context = {}\n context['err_msg'] = err_msg\n return HttpResponse(template.render(context, request))\n\n\n@require_POST\ndef log_in(request):\n \"\"\"\n Handles login.\n \"\"\"\n username = request.POST.get('username')\n password = request.POST.get('password')\n if not username or not password:\n return index(request, 'Invalid credentials!')\n user = authenticate(username=username, password=password)\n if user:\n login(request, user)\n return redirect('/main')\n else:\n return index(request, 'Invalid credentials!')\n\n\ndef main(request):\n \"\"\"\n Renders the main page behind login.\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('/')\n template = loader.get_template('aimodel/main.html')\n context = dict()\n context['datasets'] = DatasetConfigManager.loaded_datasets_list()\n return HttpResponse(template.render(context, request))\n\n\n@require_POST\ndef analytics_session(request):\n \"\"\"\n Starts a new analytic session.\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('/')\n try:\n dataset = request.POST['dataset']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n if 'analytics' in request.session:\n del request.session['analytics']\n request.session['analytics'] = AnalyticSession(dataset)\n bucket_info = request.session['analytics'].bucket_info()\n template = loader.get_template('ui/analytics.html')\n context = dict()\n context['init_buckets'] = json.dumps(bucket_info['buckets'])\n context['init_bucket_ordering'] = json.dumps(bucket_info['bucket_ordering']\n )\n return HttpResponse(template.render(context, request))\n\n\ndef log_out(request):\n \"\"\"\n Logs the user out.\n \"\"\"\n if request.user.is_authenticated:\n logout(request)\n return redirect('/')\n\n\ndef _check_session_valid(request):\n \"\"\"\n A helper function checking whether the user is logged in and the session\n data is present.\n \"\"\"\n if not request.user.is_authenticated:\n return HttpResponseForbidden(reason='Access denied!')\n if 'analytics' not in request.session:\n err = 'Could not fetch analytic session data.'\n return HttpResponseBadRequest(reason=err)\n return None\n\n\ndef bucket_info(request):\n \"\"\"\n Fetches information about current buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n return JsonResponse(request.session['analytics'].bucket_info())\n\n\n<mask token>\n\n\n@require_POST\ndef rename_bucket(request):\n \"\"\"\n Renames a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n new_bucket_name = request_data['new_bucket_name']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].rename_bucket(bucket_id, new_bucket_name)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef swap_buckets(request):\n \"\"\"\n Swaps the position of two buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket1_id = request_data['bucket1_id']\n bucket2_id = request_data['bucket2_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].swap_buckets(bucket1_id, bucket2_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef toggle_bucket(request):\n \"\"\"\n Toggles (activates/deactivates) a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].toggle_bucket(bucket_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef interaction_round(request):\n \"\"\"\n Performs an interaction round, providing new image suggestions.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n user_feedback = json.loads(request.body)\n try:\n suggs = request.session['analytics'].interaction_round(user_feedback)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(suggs, safe=False)\n\n\n@require_POST\ndef bucket_view_data(request):\n \"\"\"\n Obtains bucket view data, i.e., the images in the bucket with bucket\n confidences.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n sort_by = request_data['sort_by']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n bucket_view_data = request.session['analytics'].bucket_view_data(\n bucket_id, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(bucket_view_data, safe=False)\n\n\ndef toggle_mode(request):\n \"\"\"\n Toggles between Tetris/grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request.session['analytics'].toggle_mode()\n return JsonResponse({})\n\n\n@require_POST\ndef grid_set_size(request):\n \"\"\"\n Resizes the grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n dim = request_data['dim']\n new_size = request_data['new_size']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n new_grid_data = request.session['analytics'].grid_set_size(dim,\n new_size)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(new_grid_data, safe=False)\n\n\n@require_POST\ndef transfer_images(request):\n \"\"\"\n Transfers (moves/copies) images between buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n images = request_data['images']\n bucket_src = request_data['bucket_src']\n bucket_dst = request_data['bucket_dst']\n mode = request_data['mode']\n sort_by = request_data['sort_by']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].transfer_images(images, bucket_src,\n bucket_dst, mode)\n bucket_view_data = request.session['analytics'].bucket_view_data(\n bucket_src, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(bucket_view_data, safe=False)\n\n\n<mask token>\n\n\n@require_POST\ndef ff_commit(request):\n \"\"\"\n Commits a fast-forward.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n print(request_data)\n try:\n bucket = request_data['bucket']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].ff_commit(bucket)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\ndef end_session(request):\n \"\"\"\n Ends an analytic session.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n del request.session['analytics']\n response = {'redirect_url': '/main'}\n return JsonResponse(response)\n",
"step-4": "<mask token>\n\n\ndef index(request, err_msg=None):\n \"\"\"\n Renders the index page.\n \"\"\"\n template = loader.get_template('aimodel/index.html')\n context = {}\n context['err_msg'] = err_msg\n return HttpResponse(template.render(context, request))\n\n\n@require_POST\ndef log_in(request):\n \"\"\"\n Handles login.\n \"\"\"\n username = request.POST.get('username')\n password = request.POST.get('password')\n if not username or not password:\n return index(request, 'Invalid credentials!')\n user = authenticate(username=username, password=password)\n if user:\n login(request, user)\n return redirect('/main')\n else:\n return index(request, 'Invalid credentials!')\n\n\ndef main(request):\n \"\"\"\n Renders the main page behind login.\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('/')\n template = loader.get_template('aimodel/main.html')\n context = dict()\n context['datasets'] = DatasetConfigManager.loaded_datasets_list()\n return HttpResponse(template.render(context, request))\n\n\n@require_POST\ndef analytics_session(request):\n \"\"\"\n Starts a new analytic session.\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('/')\n try:\n dataset = request.POST['dataset']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n if 'analytics' in request.session:\n del request.session['analytics']\n request.session['analytics'] = AnalyticSession(dataset)\n bucket_info = request.session['analytics'].bucket_info()\n template = loader.get_template('ui/analytics.html')\n context = dict()\n context['init_buckets'] = json.dumps(bucket_info['buckets'])\n context['init_bucket_ordering'] = json.dumps(bucket_info['bucket_ordering']\n )\n return HttpResponse(template.render(context, request))\n\n\ndef log_out(request):\n \"\"\"\n Logs the user out.\n \"\"\"\n if request.user.is_authenticated:\n logout(request)\n return redirect('/')\n\n\ndef _check_session_valid(request):\n \"\"\"\n A helper function checking whether the user is logged in and the session\n data is present.\n \"\"\"\n if not request.user.is_authenticated:\n return HttpResponseForbidden(reason='Access denied!')\n if 'analytics' not in request.session:\n err = 'Could not fetch analytic session data.'\n return HttpResponseBadRequest(reason=err)\n return None\n\n\ndef bucket_info(request):\n \"\"\"\n Fetches information about current buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n return JsonResponse(request.session['analytics'].bucket_info())\n\n\ndef create_bucket(request):\n \"\"\"\n Creates a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n try:\n request.session['analytics'].create_bucket()\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n<mask token>\n\n\n@require_POST\ndef rename_bucket(request):\n \"\"\"\n Renames a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n new_bucket_name = request_data['new_bucket_name']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].rename_bucket(bucket_id, new_bucket_name)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef swap_buckets(request):\n \"\"\"\n Swaps the position of two buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket1_id = request_data['bucket1_id']\n bucket2_id = request_data['bucket2_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].swap_buckets(bucket1_id, bucket2_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef toggle_bucket(request):\n \"\"\"\n Toggles (activates/deactivates) a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].toggle_bucket(bucket_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef interaction_round(request):\n \"\"\"\n Performs an interaction round, providing new image suggestions.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n user_feedback = json.loads(request.body)\n try:\n suggs = request.session['analytics'].interaction_round(user_feedback)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(suggs, safe=False)\n\n\n@require_POST\ndef bucket_view_data(request):\n \"\"\"\n Obtains bucket view data, i.e., the images in the bucket with bucket\n confidences.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n sort_by = request_data['sort_by']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n bucket_view_data = request.session['analytics'].bucket_view_data(\n bucket_id, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(bucket_view_data, safe=False)\n\n\ndef toggle_mode(request):\n \"\"\"\n Toggles between Tetris/grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request.session['analytics'].toggle_mode()\n return JsonResponse({})\n\n\n@require_POST\ndef grid_set_size(request):\n \"\"\"\n Resizes the grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n dim = request_data['dim']\n new_size = request_data['new_size']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n new_grid_data = request.session['analytics'].grid_set_size(dim,\n new_size)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(new_grid_data, safe=False)\n\n\n@require_POST\ndef transfer_images(request):\n \"\"\"\n Transfers (moves/copies) images between buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n images = request_data['images']\n bucket_src = request_data['bucket_src']\n bucket_dst = request_data['bucket_dst']\n mode = request_data['mode']\n sort_by = request_data['sort_by']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].transfer_images(images, bucket_src,\n bucket_dst, mode)\n bucket_view_data = request.session['analytics'].bucket_view_data(\n bucket_src, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(bucket_view_data, safe=False)\n\n\n@require_POST\ndef fast_forward(request):\n \"\"\"\n Fast-forwards a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket = request_data['bucket']\n n_ff = request_data['n_ff']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].fast_forward(bucket, n_ff)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef ff_commit(request):\n \"\"\"\n Commits a fast-forward.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n print(request_data)\n try:\n bucket = request_data['bucket']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].ff_commit(bucket)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\ndef end_session(request):\n \"\"\"\n Ends an analytic session.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n del request.session['analytics']\n response = {'redirect_url': '/main'}\n return JsonResponse(response)\n",
"step-5": "from django.contrib.auth import authenticate, login, logout\nfrom django.template import loader\nfrom django.http import (HttpResponse, JsonResponse,\n HttpResponseForbidden, HttpResponseBadRequest)\nfrom django.shortcuts import redirect\nfrom django.views.decorators.http import require_POST\n\nimport json\n\nfrom aimodel.AnalyticSession import AnalyticSession\nfrom data.DatasetConfigManager import DatasetConfigManager\n\n\ndef index(request, err_msg=None):\n \"\"\"\n Renders the index page.\n \"\"\"\n template = loader.get_template(\"aimodel/index.html\")\n context = {}\n\n context[\"err_msg\"] = err_msg\n\n return HttpResponse(template.render(context, request))\n\n\n@require_POST\ndef log_in(request):\n \"\"\"\n Handles login.\n \"\"\"\n\n # Get the username and password\n username = request.POST.get(\"username\")\n password = request.POST.get(\"password\")\n\n if not username or not password:\n return index(request, \"Invalid credentials!\")\n\n # Authenticate and log in\n user = authenticate(username=username, password=password)\n\n if user:\n login(request, user)\n return redirect(\"/main\")\n else:\n return index(request, \"Invalid credentials!\")\n\n\ndef main(request):\n \"\"\"\n Renders the main page behind login.\n \"\"\"\n\n if not request.user.is_authenticated:\n return redirect(\"/\")\n\n template = loader.get_template(\"aimodel/main.html\")\n context = dict()\n context[\"datasets\"] = DatasetConfigManager.loaded_datasets_list()\n\n return HttpResponse(template.render(context, request))\n\n\n@require_POST\ndef analytics_session(request):\n \"\"\"\n Starts a new analytic session.\n \"\"\"\n\n if not request.user.is_authenticated:\n return redirect(\"/\")\n\n try:\n dataset = request.POST[\"dataset\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n if \"analytics\" in request.session:\n del request.session[\"analytics\"]\n\n request.session[\"analytics\"] = AnalyticSession(dataset)\n\n bucket_info = request.session[\"analytics\"].bucket_info()\n\n template = loader.get_template(\"ui/analytics.html\")\n\n context = dict()\n context[\"init_buckets\"] = json.dumps(bucket_info[\"buckets\"])\n context[\"init_bucket_ordering\"] =\\\n json.dumps(bucket_info[\"bucket_ordering\"])\n\n return HttpResponse(template.render(context, request))\n\n\ndef log_out(request):\n \"\"\"\n Logs the user out.\n \"\"\"\n\n if request.user.is_authenticated:\n logout(request)\n\n return redirect(\"/\")\n\n\ndef _check_session_valid(request):\n \"\"\"\n A helper function checking whether the user is logged in and the session\n data is present.\n \"\"\"\n if not request.user.is_authenticated:\n return HttpResponseForbidden(reason=\"Access denied!\")\n\n if \"analytics\" not in request.session:\n err = \"Could not fetch analytic session data.\"\n return HttpResponseBadRequest(reason=err)\n\n return None\n\n\ndef bucket_info(request):\n \"\"\"\n Fetches information about current buckets.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n return JsonResponse(request.session[\"analytics\"].bucket_info())\n\n\ndef create_bucket(request):\n \"\"\"\n Creates a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n try:\n request.session[\"analytics\"].create_bucket()\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse({})\n\n\n@require_POST\ndef delete_bucket(request):\n \"\"\"\n Deletes a bucket.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n bucket_id = request_data[\"bucket_id\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n request.session[\"analytics\"].delete_bucket(bucket_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse({})\n\n\n@require_POST\ndef rename_bucket(request):\n \"\"\"\n Renames a bucket.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n bucket_id = request_data[\"bucket_id\"]\n new_bucket_name = request_data[\"new_bucket_name\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n request.session[\"analytics\"].rename_bucket(bucket_id, new_bucket_name)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse({})\n\n\n@require_POST\ndef swap_buckets(request):\n \"\"\"\n Swaps the position of two buckets.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n bucket1_id = request_data[\"bucket1_id\"]\n bucket2_id = request_data[\"bucket2_id\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n request.session[\"analytics\"].swap_buckets(bucket1_id, bucket2_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse({})\n\n\n@require_POST\ndef toggle_bucket(request):\n \"\"\"\n Toggles (activates/deactivates) a bucket.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n bucket_id = request_data[\"bucket_id\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n request.session[\"analytics\"].toggle_bucket(bucket_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse({})\n\n\n@require_POST\ndef interaction_round(request):\n \"\"\"\n Performs an interaction round, providing new image suggestions.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n user_feedback = json.loads(request.body)\n\n try:\n suggs = request.session[\"analytics\"].interaction_round(user_feedback)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse(suggs, safe=False)\n\n\n@require_POST\ndef bucket_view_data(request):\n \"\"\"\n Obtains bucket view data, i.e., the images in the bucket with bucket\n confidences.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n bucket_id = request_data[\"bucket_id\"]\n sort_by = request_data[\"sort_by\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n bucket_view_data =\\\n request.session[\"analytics\"].bucket_view_data(bucket_id, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse(bucket_view_data, safe=False)\n\n\ndef toggle_mode(request):\n \"\"\"\n Toggles between Tetris/grid.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request.session[\"analytics\"].toggle_mode()\n\n return JsonResponse({})\n\n\n@require_POST\ndef grid_set_size(request):\n \"\"\"\n Resizes the grid.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n dim = request_data[\"dim\"]\n new_size = request_data[\"new_size\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n new_grid_data = request.session[\"analytics\"].grid_set_size(dim,\n new_size)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse(new_grid_data, safe=False)\n\n\n@require_POST\ndef transfer_images(request):\n \"\"\"\n Transfers (moves/copies) images between buckets.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n images = request_data[\"images\"]\n bucket_src = request_data[\"bucket_src\"]\n bucket_dst = request_data[\"bucket_dst\"]\n mode = request_data[\"mode\"]\n sort_by = request_data[\"sort_by\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n request.session[\"analytics\"].transfer_images(images,\n bucket_src, bucket_dst,\n mode)\n bucket_view_data =\\\n request.session[\"analytics\"].bucket_view_data(bucket_src, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse(bucket_view_data, safe=False)\n\n\n@require_POST\ndef fast_forward(request):\n \"\"\"\n Fast-forwards a bucket.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n bucket = request_data[\"bucket\"]\n n_ff = request_data[\"n_ff\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n request.session[\"analytics\"].fast_forward(bucket, n_ff)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse({})\n\n\n@require_POST\ndef ff_commit(request):\n \"\"\"\n Commits a fast-forward.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n print(request_data)\n\n try:\n bucket = request_data[\"bucket\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n request.session[\"analytics\"].ff_commit(bucket)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse({})\n\n\ndef end_session(request):\n \"\"\"\n Ends an analytic session.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n del request.session[\"analytics\"]\n\n response = {\n \"redirect_url\": \"/main\"\n }\n\n return JsonResponse(response)\n",
"step-ids": [
13,
16,
17,
19,
22
]
}
|
[
13,
16,
17,
19,
22
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = [('game_skeleton', '0001_initial'), ('contenttypes',
'0002_remove_content_type_name'), ('class_room', '0001_initial')]
operations = [migrations.CreateModel(name='UserHero', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('datetime_created', models.
DateTimeField(auto_now=True)), ('datetime_edited', models.
DateTimeField(auto_now_add=True)), ('datetime_finished', models.
DateTimeField(blank=True, null=True)), ('capacity', models.
FloatField()), ('wallet', models.DecimalField(decimal_places=4,
default=0.0, max_digits=10)), ('hero_class', models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to=
'game_skeleton.HeroClass')), ('user', models.ForeignKey(on_delete=
django.db.models.deletion.CASCADE, related_name='heroes', to=
'class_room.User'))]), migrations.CreateModel(name='EventHistory',
fields=[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('object_id', models.
PositiveIntegerField()), ('is_draft', models.BooleanField(default=
False, help_text=
'Draft note does not participate in hero capacity calculation.')),
('datetime_created', models.DateTimeField(auto_now=True)), (
'datetime_edited', models.DateTimeField(auto_now_add=True)), (
'author', models.OneToOneField(null=True, on_delete=django.db.
models.deletion.SET_NULL, related_name='actions', to=
'class_room.User')), ('content_type', models.ForeignKey(on_delete=
django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.
CASCADE, related_name='events', to='class_room.User'))], options={
'verbose_name_plural': 'User`s history events'})]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [('game_skeleton', '0001_initial'), ('contenttypes',
'0002_remove_content_type_name'), ('class_room', '0001_initial')]
operations = [migrations.CreateModel(name='UserHero', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('datetime_created', models.
DateTimeField(auto_now=True)), ('datetime_edited', models.
DateTimeField(auto_now_add=True)), ('datetime_finished', models.
DateTimeField(blank=True, null=True)), ('capacity', models.
FloatField()), ('wallet', models.DecimalField(decimal_places=4,
default=0.0, max_digits=10)), ('hero_class', models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to=
'game_skeleton.HeroClass')), ('user', models.ForeignKey(on_delete=
django.db.models.deletion.CASCADE, related_name='heroes', to=
'class_room.User'))]), migrations.CreateModel(name='EventHistory',
fields=[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('object_id', models.
PositiveIntegerField()), ('is_draft', models.BooleanField(default=
False, help_text=
'Draft note does not participate in hero capacity calculation.')),
('datetime_created', models.DateTimeField(auto_now=True)), (
'datetime_edited', models.DateTimeField(auto_now_add=True)), (
'author', models.OneToOneField(null=True, on_delete=django.db.
models.deletion.SET_NULL, related_name='actions', to=
'class_room.User')), ('content_type', models.ForeignKey(on_delete=
django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.
CASCADE, related_name='events', to='class_room.User'))], options={
'verbose_name_plural': 'User`s history events'})]
<|reserved_special_token_1|>
# Generated by Django 3.0 on 2020-05-04 16:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('game_skeleton', '0001_initial'),
('contenttypes', '0002_remove_content_type_name'),
('class_room', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserHero',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime_created', models.DateTimeField(auto_now=True)),
('datetime_edited', models.DateTimeField(auto_now_add=True)),
('datetime_finished', models.DateTimeField(blank=True, null=True)),
('capacity', models.FloatField()),
('wallet', models.DecimalField(decimal_places=4, default=0.0, max_digits=10)),
('hero_class', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='game_skeleton.HeroClass')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='heroes', to='class_room.User')),
],
),
migrations.CreateModel(
name='EventHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('is_draft', models.BooleanField(default=False, help_text='Draft note does not participate in hero capacity calculation.')),
('datetime_created', models.DateTimeField(auto_now=True)),
('datetime_edited', models.DateTimeField(auto_now_add=True)),
('author', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='actions', to='class_room.User')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='class_room.User')),
],
options={
'verbose_name_plural': 'User`s history events',
},
),
]
|
flexible
|
{
"blob_id": "a718d82713503c4ce3d94225ff0db04991ad4094",
"index": 9744,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('game_skeleton', '0001_initial'), ('contenttypes',\n '0002_remove_content_type_name'), ('class_room', '0001_initial')]\n operations = [migrations.CreateModel(name='UserHero', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('datetime_created', models.\n DateTimeField(auto_now=True)), ('datetime_edited', models.\n DateTimeField(auto_now_add=True)), ('datetime_finished', models.\n DateTimeField(blank=True, null=True)), ('capacity', models.\n FloatField()), ('wallet', models.DecimalField(decimal_places=4,\n default=0.0, max_digits=10)), ('hero_class', models.OneToOneField(\n on_delete=django.db.models.deletion.CASCADE, to=\n 'game_skeleton.HeroClass')), ('user', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='heroes', to=\n 'class_room.User'))]), migrations.CreateModel(name='EventHistory',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('object_id', models.\n PositiveIntegerField()), ('is_draft', models.BooleanField(default=\n False, help_text=\n 'Draft note does not participate in hero capacity calculation.')),\n ('datetime_created', models.DateTimeField(auto_now=True)), (\n 'datetime_edited', models.DateTimeField(auto_now_add=True)), (\n 'author', models.OneToOneField(null=True, on_delete=django.db.\n models.deletion.SET_NULL, related_name='actions', to=\n 'class_room.User')), ('content_type', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.\n CASCADE, related_name='events', to='class_room.User'))], options={\n 'verbose_name_plural': 'User`s history events'})]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('game_skeleton', '0001_initial'), ('contenttypes',\n '0002_remove_content_type_name'), ('class_room', '0001_initial')]\n operations = [migrations.CreateModel(name='UserHero', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('datetime_created', models.\n DateTimeField(auto_now=True)), ('datetime_edited', models.\n DateTimeField(auto_now_add=True)), ('datetime_finished', models.\n DateTimeField(blank=True, null=True)), ('capacity', models.\n FloatField()), ('wallet', models.DecimalField(decimal_places=4,\n default=0.0, max_digits=10)), ('hero_class', models.OneToOneField(\n on_delete=django.db.models.deletion.CASCADE, to=\n 'game_skeleton.HeroClass')), ('user', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='heroes', to=\n 'class_room.User'))]), migrations.CreateModel(name='EventHistory',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('object_id', models.\n PositiveIntegerField()), ('is_draft', models.BooleanField(default=\n False, help_text=\n 'Draft note does not participate in hero capacity calculation.')),\n ('datetime_created', models.DateTimeField(auto_now=True)), (\n 'datetime_edited', models.DateTimeField(auto_now_add=True)), (\n 'author', models.OneToOneField(null=True, on_delete=django.db.\n models.deletion.SET_NULL, related_name='actions', to=\n 'class_room.User')), ('content_type', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.\n CASCADE, related_name='events', to='class_room.User'))], options={\n 'verbose_name_plural': 'User`s history events'})]\n",
"step-5": "# Generated by Django 3.0 on 2020-05-04 16:15\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('game_skeleton', '0001_initial'),\n ('contenttypes', '0002_remove_content_type_name'),\n ('class_room', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserHero',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('datetime_created', models.DateTimeField(auto_now=True)),\n ('datetime_edited', models.DateTimeField(auto_now_add=True)),\n ('datetime_finished', models.DateTimeField(blank=True, null=True)),\n ('capacity', models.FloatField()),\n ('wallet', models.DecimalField(decimal_places=4, default=0.0, max_digits=10)),\n ('hero_class', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='game_skeleton.HeroClass')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='heroes', to='class_room.User')),\n ],\n ),\n migrations.CreateModel(\n name='EventHistory',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('object_id', models.PositiveIntegerField()),\n ('is_draft', models.BooleanField(default=False, help_text='Draft note does not participate in hero capacity calculation.')),\n ('datetime_created', models.DateTimeField(auto_now=True)),\n ('datetime_edited', models.DateTimeField(auto_now_add=True)),\n ('author', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='actions', to='class_room.User')),\n ('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='class_room.User')),\n ],\n options={\n 'verbose_name_plural': 'User`s history events',\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('number of lines :' + str(len(read)))
while i <= len(read) - 1:
counter = counter + read[i].count('\n') + read[i].count(' ')
total += len(read[i]) - read[i].count('\n') - read[i].count(' ')
i += 1
counter += 1
print('Number of words is :' + str(counter))
print('total number of letters are :' + str(total))
<|reserved_special_token_1|>
text = open('mytext.txt', 'w')
x = text.write("""I like coding
it is a new part
of my life!!!""")
text = open('mytext.txt')
read = text.readlines()
i = 0
counter = 0
total = 0
print('number of lines :' + str(len(read)))
while i <= len(read) - 1:
counter = counter + read[i].count('\n') + read[i].count(' ')
total += len(read[i]) - read[i].count('\n') - read[i].count(' ')
i += 1
counter += 1
print('Number of words is :' + str(counter))
print('total number of letters are :' + str(total))
<|reserved_special_token_1|>
text=open('mytext.txt','w')
x=text.write("I like coding\nit is a new part\nof my life!!!")
text=open('mytext.txt')
read=text.readlines()
i=0
counter=0
total=0
print("number of lines :"+str(len(read)))
while i<=len(read)-1:
counter=counter+read[i].count('\n') + read[i].count(' ')
total+=len(read[i])-read[i].count('\n') - read[i].count(' ')
i+=1
counter+=1
print('Number of words is :'+str(counter))
print('total number of letters are :' +str(total))
|
flexible
|
{
"blob_id": "5ad8db85f4f705173cf5d0649af6039ebe1544b2",
"index": 7488,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('number of lines :' + str(len(read)))\nwhile i <= len(read) - 1:\n counter = counter + read[i].count('\\n') + read[i].count(' ')\n total += len(read[i]) - read[i].count('\\n') - read[i].count(' ')\n i += 1\ncounter += 1\nprint('Number of words is :' + str(counter))\nprint('total number of letters are :' + str(total))\n",
"step-3": "text = open('mytext.txt', 'w')\nx = text.write(\"\"\"I like coding\nit is a new part\nof my life!!!\"\"\")\ntext = open('mytext.txt')\nread = text.readlines()\ni = 0\ncounter = 0\ntotal = 0\nprint('number of lines :' + str(len(read)))\nwhile i <= len(read) - 1:\n counter = counter + read[i].count('\\n') + read[i].count(' ')\n total += len(read[i]) - read[i].count('\\n') - read[i].count(' ')\n i += 1\ncounter += 1\nprint('Number of words is :' + str(counter))\nprint('total number of letters are :' + str(total))\n",
"step-4": "text=open('mytext.txt','w')\nx=text.write(\"I like coding\\nit is a new part\\nof my life!!!\")\ntext=open('mytext.txt')\nread=text.readlines()\ni=0\ncounter=0\ntotal=0\nprint(\"number of lines :\"+str(len(read)))\n\nwhile i<=len(read)-1:\n counter=counter+read[i].count('\\n') + read[i].count(' ')\n total+=len(read[i])-read[i].count('\\n') - read[i].count(' ')\n i+=1\ncounter+=1\nprint('Number of words is :'+str(counter))\nprint('total number of letters are :' +str(total))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class TestIsBalanced(unittest.TestCase):
def test_is_balanced(self):
self.assertEquals(descending_order(0), 0)
self.assertEquals(descending_order(15), 51)
self.assertEquals(descending_order(123456789), 987654321)
self.assertEquals(descending_order(1201), 2110)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def descending_order(num):
return int(''.join(sorted(str(num), reverse=True)))
<|reserved_special_token_0|>
class TestIsBalanced(unittest.TestCase):
def test_is_balanced(self):
self.assertEquals(descending_order(0), 0)
self.assertEquals(descending_order(15), 51)
self.assertEquals(descending_order(123456789), 987654321)
self.assertEquals(descending_order(1201), 2110)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def descending_order(num):
return int(''.join(sorted(str(num), reverse=True)))
<|reserved_special_token_0|>
class TestIsBalanced(unittest.TestCase):
def test_is_balanced(self):
self.assertEquals(descending_order(0), 0)
self.assertEquals(descending_order(15), 51)
self.assertEquals(descending_order(123456789), 987654321)
self.assertEquals(descending_order(1201), 2110)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
def descending_order(num):
return int(''.join(sorted(str(num), reverse=True)))
import unittest
class TestIsBalanced(unittest.TestCase):
def test_is_balanced(self):
self.assertEquals(descending_order(0), 0)
self.assertEquals(descending_order(15), 51)
self.assertEquals(descending_order(123456789), 987654321)
self.assertEquals(descending_order(1201), 2110)
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "fc5d0dd16b87ab073bf4b054bd2641bdec88e019",
"index": 6594,
"step-1": "<mask token>\n\n\nclass TestIsBalanced(unittest.TestCase):\n\n def test_is_balanced(self):\n self.assertEquals(descending_order(0), 0)\n self.assertEquals(descending_order(15), 51)\n self.assertEquals(descending_order(123456789), 987654321)\n self.assertEquals(descending_order(1201), 2110)\n\n\n<mask token>\n",
"step-2": "def descending_order(num):\n return int(''.join(sorted(str(num), reverse=True)))\n\n\n<mask token>\n\n\nclass TestIsBalanced(unittest.TestCase):\n\n def test_is_balanced(self):\n self.assertEquals(descending_order(0), 0)\n self.assertEquals(descending_order(15), 51)\n self.assertEquals(descending_order(123456789), 987654321)\n self.assertEquals(descending_order(1201), 2110)\n\n\n<mask token>\n",
"step-3": "def descending_order(num):\n return int(''.join(sorted(str(num), reverse=True)))\n\n\n<mask token>\n\n\nclass TestIsBalanced(unittest.TestCase):\n\n def test_is_balanced(self):\n self.assertEquals(descending_order(0), 0)\n self.assertEquals(descending_order(15), 51)\n self.assertEquals(descending_order(123456789), 987654321)\n self.assertEquals(descending_order(1201), 2110)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "def descending_order(num):\n return int(''.join(sorted(str(num), reverse=True)))\n\n\nimport unittest\n\n\nclass TestIsBalanced(unittest.TestCase):\n\n def test_is_balanced(self):\n self.assertEquals(descending_order(0), 0)\n self.assertEquals(descending_order(15), 51)\n self.assertEquals(descending_order(123456789), 987654321)\n self.assertEquals(descending_order(1201), 2110)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
#!/usr/bin/env python3
import operator
from functools import reduce
import music21
def get_top_line(piece):
top_part = piece.parts[0]
if len(top_part.voices) > 0:
top_part = top_part.voices[0]
# replace all chords with top note of chord
for item in top_part.notes:
if isinstance(item, music21.chord.Chord):
top_part.notes.replace(item, item[0])
return top_part
def get_notes(piece):
part = piece.parts[0]
measures = filter(lambda x: isinstance(x, music21.stream.Measure), part.elements)
# add all the notes from all the measures
notes = reduce(operator.add, map(lambda x: x.notes.elements, measures))
return list(notes)
|
normal
|
{
"blob_id": "92ee66565eb1d0e3cd8fa1ec16747f15e0d92be8",
"index": 2885,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_notes(piece):\n part = piece.parts[0]\n measures = filter(lambda x: isinstance(x, music21.stream.Measure), part\n .elements)\n notes = reduce(operator.add, map(lambda x: x.notes.elements, measures))\n return list(notes)\n",
"step-3": "<mask token>\n\n\ndef get_top_line(piece):\n top_part = piece.parts[0]\n if len(top_part.voices) > 0:\n top_part = top_part.voices[0]\n for item in top_part.notes:\n if isinstance(item, music21.chord.Chord):\n top_part.notes.replace(item, item[0])\n return top_part\n\n\ndef get_notes(piece):\n part = piece.parts[0]\n measures = filter(lambda x: isinstance(x, music21.stream.Measure), part\n .elements)\n notes = reduce(operator.add, map(lambda x: x.notes.elements, measures))\n return list(notes)\n",
"step-4": "import operator\nfrom functools import reduce\nimport music21\n\n\ndef get_top_line(piece):\n top_part = piece.parts[0]\n if len(top_part.voices) > 0:\n top_part = top_part.voices[0]\n for item in top_part.notes:\n if isinstance(item, music21.chord.Chord):\n top_part.notes.replace(item, item[0])\n return top_part\n\n\ndef get_notes(piece):\n part = piece.parts[0]\n measures = filter(lambda x: isinstance(x, music21.stream.Measure), part\n .elements)\n notes = reduce(operator.add, map(lambda x: x.notes.elements, measures))\n return list(notes)\n",
"step-5": "#!/usr/bin/env python3\n\nimport operator\nfrom functools import reduce\n\nimport music21\n\n\ndef get_top_line(piece):\n top_part = piece.parts[0]\n if len(top_part.voices) > 0:\n top_part = top_part.voices[0]\n\n # replace all chords with top note of chord\n for item in top_part.notes:\n if isinstance(item, music21.chord.Chord):\n top_part.notes.replace(item, item[0])\n return top_part\n\n\ndef get_notes(piece):\n part = piece.parts[0]\n measures = filter(lambda x: isinstance(x, music21.stream.Measure), part.elements)\n\n # add all the notes from all the measures\n notes = reduce(operator.add, map(lambda x: x.notes.elements, measures))\n\n return list(notes)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import request, Flask
import ldap3
app = Flask(__name__)
@app.route("/normal")
def normal():
"""
A RemoteFlowSource is used directly as DN and search filter
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = "dc={}".format(unsafe_dc)
search_filter = "(user={})".format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True)
conn.search(dn, search_filter)
@app.route("/direct")
def direct():
"""
A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = "dc={}".format(unsafe_dc)
search_filter = "(user={})".format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(
dn, search_filter)
# if __name__ == "__main__":
# app.run(debug=True)
|
normal
|
{
"blob_id": "b51591de921f6e153c1dd478cec7fad42ff4251a",
"index": 749,
"step-1": "<mask token>\n\n\n@app.route('/direct')\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,\n search_filter)\n",
"step-2": "<mask token>\n\n\n@app.route('/normal')\ndef normal():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True)\n conn.search(dn, search_filter)\n\n\n@app.route('/direct')\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,\n search_filter)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/normal')\ndef normal():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True)\n conn.search(dn, search_filter)\n\n\n@app.route('/direct')\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,\n search_filter)\n",
"step-4": "from flask import request, Flask\nimport ldap3\napp = Flask(__name__)\n\n\n@app.route('/normal')\ndef normal():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True)\n conn.search(dn, search_filter)\n\n\n@app.route('/direct')\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,\n search_filter)\n",
"step-5": "from flask import request, Flask\nimport ldap3\n\napp = Flask(__name__)\n\n\n@app.route(\"/normal\")\ndef normal():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter\n \"\"\"\n\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n\n dn = \"dc={}\".format(unsafe_dc)\n search_filter = \"(user={})\".format(unsafe_filter)\n\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True)\n conn.search(dn, search_filter)\n\n\n@app.route(\"/direct\")\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n\n dn = \"dc={}\".format(unsafe_dc)\n search_filter = \"(user={})\".format(unsafe_filter)\n\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(\n dn, search_filter)\n\n# if __name__ == \"__main__\":\n# app.run(debug=True)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
"""
CST 383, measles simulation homework
# Here's a question. Suppose 1% of people have measles, that the
# test for measles if 98% accurate if you do have measles, and 98%
# accurate if you don't have measles. Then what is the probability
# that you have measles, given that you have tested positive for them?
#
# Try guessing an answer before you start on this assignment.
#
# In this homework we will use simulation to estimate the answer,
# and we'll also compute the answer using Bayes' Law. There
# are three parts below:
# 1. Warm up by simulating some coin flips.
# 2. Use simulation to answer the question above.
# 3. Use Bayes' Law to answer the question without simulation.
"""
import numpy as np
import matplotlib.pyplot as plt
# Instructions:
# Problems start with #@ and then give a number. Enter your
# Python code after each problem. Do not use any variables
# in your answer except for the ones that the problem says
# you can assume are defined.
#
# Part 1: warmup
#
#@ 1
# Simulate flipping a coin 200 times that has a 90% chance of
# landing heads. Store your result in a NumPy array x of length
# 200 that contains only 0 or 1, where 1 represents heads.
# Use np.random.choice().
# (assignment to x)
x = np.random.choice(2, 200, p=[0.1, 0.9])
#@ 2
# Repeat the problem above, but this time use np.random.sample(),
# which gives values between 0 and 1. Obviously you will need to do
# further processing to turn the output of sample() into your
# array x. This will take a little thought.
# (assignment to x)
x = np.random.sample(size=200)
weights = [0.1, 0.9]
cs = np.cumsum(weights)
def calculated_weights(x):
return sum(cs < x)
vectroized_calculated_weights = np.vectorize(calculated_weights)
x = vectroized_calculated_weights(x)
#@ 3
# compute the fraction of values in array x that are 1.
# (expression)
len(x[x == 1]) / len(x)
#@ 4
# Flip the weighted coin of problem 1 200 times, compute the fraction
# of values that are 1, and repeat this entire process 100 times to
# get an array of length 100. Assign this array to variable y1.
# (assignment to y1)
def t200():
return np.random.choice(2, 200, p=[0.1, 0.9])
y1 = np.array([len(t200()[t200()==1])/len(t200()) for i in range(100)])
#@ 5
# plot a histogram of y1 using matplotlib
# (produce a plot)
plt.hist(y1)
plt.title("Fraction of 1's for 200 biased coin tosses a 100 times")
plt.xlabel("Fraction of 1's in a given attempt (of 200 tosses)")
plt.ylabel("frequency")
#@ 6
# compute a NumPy array y2 that is just like y1, except that in creating y2
# we do 1000 coin flips in each experiment, not 200.
# (assignment to y2)
def t1000():
return np.random.choice(2, 1000, p=[0.1, 0.9])
y2 = np.array([len(t1000()[t1000()==1])/len(t1000()) for i in range(100)])
#@ 7
# plot histograms for y1 and y2, with the histogram for y1 above
# the plot for y2. Our lecture notes show how to do this; see
# the 'multiple subplots' slide. Use matplotlib. In both histograms,
# let the x axis values range from 0.85 to 0.95. Please study
# the two histograms and think about why they are different.
# Assume y1 and y2 are defined.
# (produce a plot)
fig, ax = plt.subplots(2)
fig.suptitle("Histograms for Y1 and Y2")
ax[0].hist(y1)
ax[1].hist(y2)
ax[0].set_xlim([0.85, 0.95])
ax[1].set_xlim([0.85, 0.95])
#
# Part 2 - simulate the answer to the question
#
#@ 8
# Simulate the overall occurrence of measles among 10,000 people,
# based on the assumption that each person has a 0.01% chance of
# having measles.
# Compute a NumPy array x of length 10,000, where each value is
# either 0 or 1. Each of the 10,000 values should be found by
# "flipping a 0/1 coin" that is weighted 99% to 0. Approximately
# 99% of the values in x should be 0, and the others should be one.
# (assignment to x)
x = np.random.choice(2, 10000, p=[0.99, 0.01])
#@ 9
# Simulate the measles test results on the people without measles,
# based on the assumption that the measles test gives the right
# answer about 95% of the time on people without measles.
# Create an array y0, which is as long as the number of 0's in
# array x, by flipping a 0/1 coin that is weighted 95% to 0.
# Assume x is defined.
# (assignment to y0)
y0 = np.random.choice(2, len(x[x==0]), p=[0.95, 0.05])
#@ 10
# Simulate the measles test results on the people with measles,
# based on the assumption that the measles test gives the right
# answer about 98% of the time on people with measles.
# Create an array y1, which is as long as the number of 1's in
# array x, by flipping a 0/1 coin that is weighted 98% to 1.
# Assume x is defined.
# (assignment to y1)
y1 = np.random.choice(2, len(x[x==1]), p=[0.02, 0.98])
#@ 11
# Collect the measles-free people among those who tested positive.
# Compute a vector pos_no_meas that is all 0's, and is as long as the
# number of 1's in y0.
# Assume y0 is defined.
# (assignment to pos_no_meas)
pos_no_meas = np.zeros(len(y0[y0==1]))
#@ 12
# Collect the measles-infected people among those who tested positive.
# Compute a vector pos_with_meas that is all 1's, and is as long as
# the number of 1's in y1.
# Assume y1 is defined.
# (assignment to pos_with_meas)
pos_with_meas = np.ones(len(y1[y1==1]))
#@ 13
# Collect information about all people who tested positive.
# Concatenate arrays pos_no_meas and pos_with_meas, and assign
# the result to array 'tested_pos'. A 0 in in this array means
# no measles; a 1 means measles.
# Assume pos_no_meas and pos_with_meas are defined.
# (assignment to tested_pos)
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
#@ 14
# Estimate the probability of having measles if you've tested
# positive for measles. Compute the fraction of values in
# tested_positive that are 1, and assign the result to
# variable 'p'.
# Assume tested_pos is defined.
# (assignment to p)
p = len(tested_pos[tested_pos == 1]) / len(tested_pos)
#@ 15
# Package up your code into a function 'prob_cond_given_pos'. This
# function will return the probability of having a condition, based
# on certain probabilities.
# The function should have the following parameters:
# prob_cond - probability of a condition (above you used 0.01)
# prob_pos_given_cond - probability of testing positive given condition (you used 0.98)
# prob_neg_given_no_cond - probability of testing negative given no condition (you used 0.95)
# The function must return the probability of having the condition.
#
# Your function should return a slightly different value every time.
# When you run prob_cond_given_pos(0.01, 0.98, 0.95), you should get an answer
# similar to the value of p you just computed.
#
# Here is the output from tests I ran with my code:
# test 1:
# np.array([prob_cond_given_pos(0.5, 0.9, 0.8) for i in range(1000)]).mean()
# output: 0.8180582615720287
# test 2:
# np.array([prob_cond_given_pos(0.3, 0.8, 0.7) for i in range(1000)]).mean()
# output: 0.5334712339397902
# test 3:
# np.array([prob_cond_given_pos(0.5, 0.9, 0.8) for i in range(100)]).std()
# output: 0.00550051982001144
#
## I provided the function header. You should fill out the function body,
# including the return statement.
# (define a function)
def prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond):
x = np.random.choice(2, 10000, p=[1-prob_cond, prob_cond])
y0 = np.random.choice(2, len(x[x==0]), p=[prob_neg_given_no_cond, 1-prob_neg_given_no_cond])
y1 = np.random.choice(2, len(x[x==1]), p=[1-prob_pos_given_cond, prob_pos_given_cond])
pos_no_meas = np.zeros(len(y0[y0==1]))
pos_with_meas = np.ones(len(y1[y1==1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
return len(tested_pos[tested_pos == 1]) / len(tested_pos)
#
# Part 3 - compute the answer using Bayes' Law
#
#@ 16
# Write a function 'prob_cond_given_pos_bayes'. This function
# will take the same parameters as prob_cond_given_pos, but will
# use Bayes' Law to compute the result.
#
# Here is some output from my code:
# test1:
# prob_cond_given_pos_bayes(0.5, 0.9, 0.8)
# output: 0.1818...
# test 2:
# prob_cond_given_pos_bayes(0.3, 0.8, 0.7)
# output: 0.5333...
#
# I provided the function header. You should fill out the function body,
# including the return statement.
# (define a function)
def prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond):
return (prob_pos_given_cond*prob_cond) / ((prob_pos_given_cond*prob_cond)+(1-prob_neg_given_no_cond)*(1-prob_cond))
#@ 17
# How does the probability of having a condition given you
# tested positive for it change based on how rare the
# condition is?
# Produce a histogram showing the probability of having measles
# given you tested positive for measles. Compute
# prob_cond_given_pos_bayes(x, 0.98, 0.95) for x ranging
# from 0.001 to 0.10 (x is the probability of having the
# condition). Use at least 100 values of x.
# Plot the results as a scatter plot, with x on the x axis
# and probability on the y axis. Label the x and y axes
# appropriately. Use matplotlib.
# Assume function prob_cond_given_pos_bayes() is defined.
# (produce a plot)
#x = np.arange(0.001, 0.1, ((0.1-0.001)/100))
x = np.linspace(0.001, 0.1, 100)
plt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))
plt.xlabel("Probability of condition (%)")
plt.ylabel("Probability of condition if tested positive (%)")
|
normal
|
{
"blob_id": "076d9f0c14a8070993039bbda2ffe4d52c8d2273",
"index": 1512,
"step-1": "<mask token>\n\n\ndef t200():\n return np.random.choice(2, 200, p=[0.1, 0.9])\n\n\n<mask token>\n\n\ndef t1000():\n return np.random.choice(2, 1000, p=[0.1, 0.9])\n\n\n<mask token>\n\n\ndef prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond\n ):\n x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])\n y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -\n prob_neg_given_no_cond])\n y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,\n prob_pos_given_cond])\n pos_no_meas = np.zeros(len(y0[y0 == 1]))\n pos_with_meas = np.ones(len(y1[y1 == 1]))\n tested_pos = np.concatenate((pos_no_meas, pos_with_meas))\n return len(tested_pos[tested_pos == 1]) / len(tested_pos)\n\n\ndef prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,\n prob_neg_given_no_cond):\n return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *\n prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calculated_weights(x):\n return sum(cs < x)\n\n\n<mask token>\nlen(x[x == 1]) / len(x)\n\n\ndef t200():\n return np.random.choice(2, 200, p=[0.1, 0.9])\n\n\n<mask token>\nplt.hist(y1)\nplt.title(\"Fraction of 1's for 200 biased coin tosses a 100 times\")\nplt.xlabel(\"Fraction of 1's in a given attempt (of 200 tosses)\")\nplt.ylabel('frequency')\n\n\ndef t1000():\n return np.random.choice(2, 1000, p=[0.1, 0.9])\n\n\n<mask token>\nfig.suptitle('Histograms for Y1 and Y2')\nax[0].hist(y1)\nax[1].hist(y2)\nax[0].set_xlim([0.85, 0.95])\nax[1].set_xlim([0.85, 0.95])\n<mask token>\n\n\ndef prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond\n ):\n x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])\n y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -\n prob_neg_given_no_cond])\n y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,\n prob_pos_given_cond])\n pos_no_meas = np.zeros(len(y0[y0 == 1]))\n pos_with_meas = np.ones(len(y1[y1 == 1]))\n tested_pos = np.concatenate((pos_no_meas, pos_with_meas))\n return len(tested_pos[tested_pos == 1]) / len(tested_pos)\n\n\ndef prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,\n prob_neg_given_no_cond):\n return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *\n prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))\n\n\n<mask token>\nplt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))\nplt.xlabel('Probability of condition (%)')\nplt.ylabel('Probability of condition if tested positive (%)')\n",
"step-3": "<mask token>\nx = np.random.choice(2, 200, p=[0.1, 0.9])\nx = np.random.sample(size=200)\nweights = [0.1, 0.9]\ncs = np.cumsum(weights)\n\n\ndef calculated_weights(x):\n return sum(cs < x)\n\n\nvectroized_calculated_weights = np.vectorize(calculated_weights)\nx = vectroized_calculated_weights(x)\nlen(x[x == 1]) / len(x)\n\n\ndef t200():\n return np.random.choice(2, 200, p=[0.1, 0.9])\n\n\ny1 = np.array([(len(t200()[t200() == 1]) / len(t200())) for i in range(100)])\nplt.hist(y1)\nplt.title(\"Fraction of 1's for 200 biased coin tosses a 100 times\")\nplt.xlabel(\"Fraction of 1's in a given attempt (of 200 tosses)\")\nplt.ylabel('frequency')\n\n\ndef t1000():\n return np.random.choice(2, 1000, p=[0.1, 0.9])\n\n\ny2 = np.array([(len(t1000()[t1000() == 1]) / len(t1000())) for i in range(100)]\n )\nfig, ax = plt.subplots(2)\nfig.suptitle('Histograms for Y1 and Y2')\nax[0].hist(y1)\nax[1].hist(y2)\nax[0].set_xlim([0.85, 0.95])\nax[1].set_xlim([0.85, 0.95])\nx = np.random.choice(2, 10000, p=[0.99, 0.01])\ny0 = np.random.choice(2, len(x[x == 0]), p=[0.95, 0.05])\ny1 = np.random.choice(2, len(x[x == 1]), p=[0.02, 0.98])\npos_no_meas = np.zeros(len(y0[y0 == 1]))\npos_with_meas = np.ones(len(y1[y1 == 1]))\ntested_pos = np.concatenate((pos_no_meas, pos_with_meas))\np = len(tested_pos[tested_pos == 1]) / len(tested_pos)\n\n\ndef prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond\n ):\n x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])\n y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -\n prob_neg_given_no_cond])\n y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,\n prob_pos_given_cond])\n pos_no_meas = np.zeros(len(y0[y0 == 1]))\n pos_with_meas = np.ones(len(y1[y1 == 1]))\n tested_pos = np.concatenate((pos_no_meas, pos_with_meas))\n return len(tested_pos[tested_pos == 1]) / len(tested_pos)\n\n\ndef prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,\n prob_neg_given_no_cond):\n return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *\n prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))\n\n\nx = np.linspace(0.001, 0.1, 100)\nplt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))\nplt.xlabel('Probability of condition (%)')\nplt.ylabel('Probability of condition if tested positive (%)')\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nx = np.random.choice(2, 200, p=[0.1, 0.9])\nx = np.random.sample(size=200)\nweights = [0.1, 0.9]\ncs = np.cumsum(weights)\n\n\ndef calculated_weights(x):\n return sum(cs < x)\n\n\nvectroized_calculated_weights = np.vectorize(calculated_weights)\nx = vectroized_calculated_weights(x)\nlen(x[x == 1]) / len(x)\n\n\ndef t200():\n return np.random.choice(2, 200, p=[0.1, 0.9])\n\n\ny1 = np.array([(len(t200()[t200() == 1]) / len(t200())) for i in range(100)])\nplt.hist(y1)\nplt.title(\"Fraction of 1's for 200 biased coin tosses a 100 times\")\nplt.xlabel(\"Fraction of 1's in a given attempt (of 200 tosses)\")\nplt.ylabel('frequency')\n\n\ndef t1000():\n return np.random.choice(2, 1000, p=[0.1, 0.9])\n\n\ny2 = np.array([(len(t1000()[t1000() == 1]) / len(t1000())) for i in range(100)]\n )\nfig, ax = plt.subplots(2)\nfig.suptitle('Histograms for Y1 and Y2')\nax[0].hist(y1)\nax[1].hist(y2)\nax[0].set_xlim([0.85, 0.95])\nax[1].set_xlim([0.85, 0.95])\nx = np.random.choice(2, 10000, p=[0.99, 0.01])\ny0 = np.random.choice(2, len(x[x == 0]), p=[0.95, 0.05])\ny1 = np.random.choice(2, len(x[x == 1]), p=[0.02, 0.98])\npos_no_meas = np.zeros(len(y0[y0 == 1]))\npos_with_meas = np.ones(len(y1[y1 == 1]))\ntested_pos = np.concatenate((pos_no_meas, pos_with_meas))\np = len(tested_pos[tested_pos == 1]) / len(tested_pos)\n\n\ndef prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond\n ):\n x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])\n y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -\n prob_neg_given_no_cond])\n y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,\n prob_pos_given_cond])\n pos_no_meas = np.zeros(len(y0[y0 == 1]))\n pos_with_meas = np.ones(len(y1[y1 == 1]))\n tested_pos = np.concatenate((pos_no_meas, pos_with_meas))\n return len(tested_pos[tested_pos == 1]) / len(tested_pos)\n\n\ndef prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,\n prob_neg_given_no_cond):\n return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *\n prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))\n\n\nx = np.linspace(0.001, 0.1, 100)\nplt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))\nplt.xlabel('Probability of condition (%)')\nplt.ylabel('Probability of condition if tested positive (%)')\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\nCST 383, measles simulation homework\r\n\r\n# Here's a question. Suppose 1% of people have measles, that the\r\n# test for measles if 98% accurate if you do have measles, and 98%\r\n# accurate if you don't have measles. Then what is the probability\r\n# that you have measles, given that you have tested positive for them?\r\n#\r\n# Try guessing an answer before you start on this assignment.\r\n#\r\n# In this homework we will use simulation to estimate the answer,\r\n# and we'll also compute the answer using Bayes' Law. There\r\n# are three parts below:\r\n# 1. Warm up by simulating some coin flips.\r\n# 2. Use simulation to answer the question above.\r\n# 3. Use Bayes' Law to answer the question without simulation.\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# Instructions: \r\n# Problems start with #@ and then give a number. Enter your\r\n# Python code after each problem. Do not use any variables\r\n# in your answer except for the ones that the problem says\r\n# you can assume are defined.\r\n\r\n\r\n#\r\n# Part 1: warmup\r\n#\r\n\r\n#@ 1\r\n# Simulate flipping a coin 200 times that has a 90% chance of\r\n# landing heads. Store your result in a NumPy array x of length\r\n# 200 that contains only 0 or 1, where 1 represents heads.\r\n# Use np.random.choice(). \r\n# (assignment to x)\r\nx = np.random.choice(2, 200, p=[0.1, 0.9])\r\n\r\n#@ 2\r\n# Repeat the problem above, but this time use np.random.sample(),\r\n# which gives values between 0 and 1. Obviously you will need to do\r\n# further processing to turn the output of sample() into your\r\n# array x. This will take a little thought.\r\n# (assignment to x)\r\nx = np.random.sample(size=200)\r\n\r\nweights = [0.1, 0.9]\r\ncs = np.cumsum(weights)\r\n\r\ndef calculated_weights(x):\r\n return sum(cs < x)\r\n\r\nvectroized_calculated_weights = np.vectorize(calculated_weights)\r\nx = vectroized_calculated_weights(x)\r\n\r\n#@ 3\r\n# compute the fraction of values in array x that are 1.\r\n# (expression)\r\nlen(x[x == 1]) / len(x)\r\n\r\n\r\n#@ 4\r\n# Flip the weighted coin of problem 1 200 times, compute the fraction\r\n# of values that are 1, and repeat this entire process 100 times to\r\n# get an array of length 100. Assign this array to variable y1.\r\n# (assignment to y1)\r\ndef t200():\r\n return np.random.choice(2, 200, p=[0.1, 0.9])\r\n\r\ny1 = np.array([len(t200()[t200()==1])/len(t200()) for i in range(100)])\r\n\r\n#@ 5\r\n# plot a histogram of y1 using matplotlib\r\n# (produce a plot)\r\nplt.hist(y1)\r\nplt.title(\"Fraction of 1's for 200 biased coin tosses a 100 times\")\r\nplt.xlabel(\"Fraction of 1's in a given attempt (of 200 tosses)\")\r\nplt.ylabel(\"frequency\")\r\n\r\n#@ 6\r\n# compute a NumPy array y2 that is just like y1, except that in creating y2\r\n# we do 1000 coin flips in each experiment, not 200.\r\n# (assignment to y2)\r\ndef t1000():\r\n return np.random.choice(2, 1000, p=[0.1, 0.9])\r\n\r\ny2 = np.array([len(t1000()[t1000()==1])/len(t1000()) for i in range(100)])\r\n\r\n#@ 7\r\n# plot histograms for y1 and y2, with the histogram for y1 above \r\n# the plot for y2. Our lecture notes show how to do this; see\r\n# the 'multiple subplots' slide. Use matplotlib. In both histograms, \r\n# let the x axis values range from 0.85 to 0.95. Please study\r\n# the two histograms and think about why they are different.\r\n# Assume y1 and y2 are defined.\r\n# (produce a plot)\r\n\r\nfig, ax = plt.subplots(2)\r\nfig.suptitle(\"Histograms for Y1 and Y2\")\r\nax[0].hist(y1)\r\nax[1].hist(y2)\r\nax[0].set_xlim([0.85, 0.95])\r\nax[1].set_xlim([0.85, 0.95])\r\n\r\n#\r\n# Part 2 - simulate the answer to the question\r\n#\r\n\r\n#@ 8\r\n# Simulate the overall occurrence of measles among 10,000 people,\r\n# based on the assumption that each person has a 0.01% chance of\r\n# having measles. \r\n# Compute a NumPy array x of length 10,000, where each value is \r\n# either 0 or 1. Each of the 10,000 values should be found by \r\n# \"flipping a 0/1 coin\" that is weighted 99% to 0. Approximately \r\n# 99% of the values in x should be 0, and the others should be one.\r\n# (assignment to x)\r\nx = np.random.choice(2, 10000, p=[0.99, 0.01])\r\n\r\n#@ 9\r\n# Simulate the measles test results on the people without measles,\r\n# based on the assumption that the measles test gives the right\r\n# answer about 95% of the time on people without measles.\r\n# Create an array y0, which is as long as the number of 0's in\r\n# array x, by flipping a 0/1 coin that is weighted 95% to 0.\r\n# Assume x is defined.\r\n# (assignment to y0)\r\ny0 = np.random.choice(2, len(x[x==0]), p=[0.95, 0.05])\r\n\r\n\r\n#@ 10\r\n# Simulate the measles test results on the people with measles,\r\n# based on the assumption that the measles test gives the right\r\n# answer about 98% of the time on people with measles.\r\n# Create an array y1, which is as long as the number of 1's in\r\n# array x, by flipping a 0/1 coin that is weighted 98% to 1.\r\n# Assume x is defined.\r\n# (assignment to y1)\r\ny1 = np.random.choice(2, len(x[x==1]), p=[0.02, 0.98])\r\n\r\n\r\n#@ 11\r\n# Collect the measles-free people among those who tested positive.\r\n# Compute a vector pos_no_meas that is all 0's, and is as long as the\r\n# number of 1's in y0.\r\n# Assume y0 is defined.\r\n# (assignment to pos_no_meas)\r\npos_no_meas = np.zeros(len(y0[y0==1]))\r\n\r\n#@ 12\r\n# Collect the measles-infected people among those who tested positive.\r\n# Compute a vector pos_with_meas that is all 1's, and is as long as\r\n# the number of 1's in y1.\r\n# Assume y1 is defined.\r\n# (assignment to pos_with_meas)\r\npos_with_meas = np.ones(len(y1[y1==1]))\r\n\r\n#@ 13\r\n# Collect information about all people who tested positive.\r\n# Concatenate arrays pos_no_meas and pos_with_meas, and assign\r\n# the result to array 'tested_pos'. A 0 in in this array means \r\n# no measles; a 1 means measles.\r\n# Assume pos_no_meas and pos_with_meas are defined.\r\n# (assignment to tested_pos)\r\ntested_pos = np.concatenate((pos_no_meas, pos_with_meas))\r\n\r\n#@ 14\r\n# Estimate the probability of having measles if you've tested\r\n# positive for measles. Compute the fraction of values in \r\n# tested_positive that are 1, and assign the result to \r\n# variable 'p'.\r\n# Assume tested_pos is defined.\r\n# (assignment to p) \r\np = len(tested_pos[tested_pos == 1]) / len(tested_pos)\r\n\r\n\r\n#@ 15\r\n# Package up your code into a function 'prob_cond_given_pos'. This\r\n# function will return the probability of having a condition, based\r\n# on certain probabilities.\r\n# The function should have the following parameters:\r\n# prob_cond - probability of a condition (above you used 0.01)\r\n# prob_pos_given_cond - probability of testing positive given condition (you used 0.98)\r\n# prob_neg_given_no_cond - probability of testing negative given no condition (you used 0.95)\r\n# The function must return the probability of having the condition.\r\n#\r\n# Your function should return a slightly different value every time.\r\n# When you run prob_cond_given_pos(0.01, 0.98, 0.95), you should get an answer\r\n# similar to the value of p you just computed.\r\n#\r\n# Here is the output from tests I ran with my code:\r\n# test 1:\r\n# np.array([prob_cond_given_pos(0.5, 0.9, 0.8) for i in range(1000)]).mean()\r\n# output: 0.8180582615720287\r\n# test 2:\r\n# np.array([prob_cond_given_pos(0.3, 0.8, 0.7) for i in range(1000)]).mean()\r\n# output: 0.5334712339397902\r\n# test 3:\r\n# np.array([prob_cond_given_pos(0.5, 0.9, 0.8) for i in range(100)]).std()\r\n# output: 0.00550051982001144\r\n#\r\n## I provided the function header. You should fill out the function body,\r\n# including the return statement.\r\n# (define a function)\r\n\r\ndef prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond):\r\n x = np.random.choice(2, 10000, p=[1-prob_cond, prob_cond])\r\n y0 = np.random.choice(2, len(x[x==0]), p=[prob_neg_given_no_cond, 1-prob_neg_given_no_cond])\r\n y1 = np.random.choice(2, len(x[x==1]), p=[1-prob_pos_given_cond, prob_pos_given_cond])\r\n pos_no_meas = np.zeros(len(y0[y0==1]))\r\n pos_with_meas = np.ones(len(y1[y1==1]))\r\n tested_pos = np.concatenate((pos_no_meas, pos_with_meas))\r\n return len(tested_pos[tested_pos == 1]) / len(tested_pos)\r\n\r\n#\r\n# Part 3 - compute the answer using Bayes' Law\r\n#\r\n\r\n#@ 16\r\n# Write a function 'prob_cond_given_pos_bayes'. This function\r\n# will take the same parameters as prob_cond_given_pos, but will\r\n# use Bayes' Law to compute the result.\r\n#\r\n# Here is some output from my code:\r\n# test1:\r\n# prob_cond_given_pos_bayes(0.5, 0.9, 0.8)\r\n# output: 0.1818...\r\n# test 2:\r\n# prob_cond_given_pos_bayes(0.3, 0.8, 0.7) \r\n# output: 0.5333...\r\n#\r\n# I provided the function header. You should fill out the function body,\r\n# including the return statement.\r\n# (define a function)\r\n\r\ndef prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond):\r\n return (prob_pos_given_cond*prob_cond) / ((prob_pos_given_cond*prob_cond)+(1-prob_neg_given_no_cond)*(1-prob_cond))\r\n\r\n#@ 17\r\n# How does the probability of having a condition given you\r\n# tested positive for it change based on how rare the \r\n# condition is? \r\n# Produce a histogram showing the probability of having measles\r\n# given you tested positive for measles. Compute \r\n# prob_cond_given_pos_bayes(x, 0.98, 0.95) for x ranging\r\n# from 0.001 to 0.10 (x is the probability of having the \r\n# condition). Use at least 100 values of x.\r\n# Plot the results as a scatter plot, with x on the x axis\r\n# and probability on the y axis. Label the x and y axes\r\n# appropriately. Use matplotlib.\r\n# Assume function prob_cond_given_pos_bayes() is defined.\r\n# (produce a plot)\r\n#x = np.arange(0.001, 0.1, ((0.1-0.001)/100))\r\nx = np.linspace(0.001, 0.1, 100)\r\nplt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))\r\nplt.xlabel(\"Probability of condition (%)\")\r\nplt.ylabel(\"Probability of condition if tested positive (%)\")",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
class Node:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node:
def __init__(self, val):
self.childleft = None
self.childright = None
self.nodedata = val
<|reserved_special_token_0|>
def trying():
if message == 'root':
def InOrd(root):
if root:
InOrd(root.childleft)
print(root.nodedata)
InOrd(root.childright)
InOrd(root)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node:
def __init__(self, val):
self.childleft = None
self.childright = None
self.nodedata = val
<|reserved_special_token_0|>
print()
def trying():
if message == 'root':
def InOrd(root):
if root:
InOrd(root.childleft)
print(root.nodedata)
InOrd(root.childright)
InOrd(root)
trying()
<|reserved_special_token_1|>
class Node:
def __init__(self, val):
self.childleft = None
self.childright = None
self.nodedata = val
root = Node('Kaif')
root.childleft = Node('name')
root.childright = Node('!')
root.childleft.childleft = Node('My')
root.childleft.childright = Node('is')
message = input('Solve In order traversal, Type root: ')
print()
def trying():
if message == 'root':
def InOrd(root):
if root:
InOrd(root.childleft)
print(root.nodedata)
InOrd(root.childright)
InOrd(root)
trying()
<|reserved_special_token_1|>
class Node:
def __init__ (self, val):
self.childleft = None
self.childright = None
self.nodedata = val
root = Node("Kaif")
root.childleft = Node("name")
root.childright = Node("!")
root.childleft.childleft = Node("My")
root.childleft.childright = Node("is")
message = input("Solve In order traversal, Type root: ")
print()
def trying():
if message == "root":
def InOrd(root):
if root:
InOrd(root.childleft)
print(root.nodedata)
InOrd(root.childright)
InOrd(root)
trying()
|
flexible
|
{
"blob_id": "73e4346007acae769b94a55ef53a48a9d3325002",
"index": 7262,
"step-1": "class Node:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Node:\n\n def __init__(self, val):\n self.childleft = None\n self.childright = None\n self.nodedata = val\n\n\n<mask token>\n\n\ndef trying():\n if message == 'root':\n\n def InOrd(root):\n if root:\n InOrd(root.childleft)\n print(root.nodedata)\n InOrd(root.childright)\n InOrd(root)\n\n\n<mask token>\n",
"step-3": "class Node:\n\n def __init__(self, val):\n self.childleft = None\n self.childright = None\n self.nodedata = val\n\n\n<mask token>\nprint()\n\n\ndef trying():\n if message == 'root':\n\n def InOrd(root):\n if root:\n InOrd(root.childleft)\n print(root.nodedata)\n InOrd(root.childright)\n InOrd(root)\n\n\ntrying()\n",
"step-4": "class Node:\n\n def __init__(self, val):\n self.childleft = None\n self.childright = None\n self.nodedata = val\n\n\nroot = Node('Kaif')\nroot.childleft = Node('name')\nroot.childright = Node('!')\nroot.childleft.childleft = Node('My')\nroot.childleft.childright = Node('is')\nmessage = input('Solve In order traversal, Type root: ')\nprint()\n\n\ndef trying():\n if message == 'root':\n\n def InOrd(root):\n if root:\n InOrd(root.childleft)\n print(root.nodedata)\n InOrd(root.childright)\n InOrd(root)\n\n\ntrying()\n",
"step-5": "class Node:\r\n def __init__ (self, val):\r\n self.childleft = None\r\n self.childright = None\r\n self.nodedata = val\r\n\r\nroot = Node(\"Kaif\")\r\nroot.childleft = Node(\"name\")\r\nroot.childright = Node(\"!\")\r\nroot.childleft.childleft = Node(\"My\")\r\nroot.childleft.childright = Node(\"is\")\r\n\r\n\r\n\r\nmessage = input(\"Solve In order traversal, Type root: \")\r\nprint()\r\ndef trying():\r\n if message == \"root\":\r\n def InOrd(root):\r\n if root:\r\n InOrd(root.childleft)\r\n print(root.nodedata)\r\n InOrd(root.childright)\r\n InOrd(root)\r\ntrying()",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class TestMicrophone:
<|reserved_special_token_0|>
def test_config(self):
required_config = ['card_number', 'device_index', 'sample_rate',
'phrase_time_limit', 'energy_threshold']
for config_key in required_config:
assert config_key in self.config
def test_listen(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestMicrophone:
<|reserved_special_token_0|>
def test_config(self):
required_config = ['card_number', 'device_index', 'sample_rate',
'phrase_time_limit', 'energy_threshold']
for config_key in required_config:
assert config_key in self.config
def test_listen(self):
pass
def test_recognise_command(self):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestMicrophone:
def setup_method(self, method):
self.config = config.SENSOR['microphone']
def test_config(self):
required_config = ['card_number', 'device_index', 'sample_rate',
'phrase_time_limit', 'energy_threshold']
for config_key in required_config:
assert config_key in self.config
def test_listen(self):
pass
def test_recognise_command(self):
pass
<|reserved_special_token_1|>
from arnold import config
class TestMicrophone:
def setup_method(self, method):
self.config = config.SENSOR['microphone']
def test_config(self):
required_config = ['card_number', 'device_index', 'sample_rate',
'phrase_time_limit', 'energy_threshold']
for config_key in required_config:
assert config_key in self.config
def test_listen(self):
pass
def test_recognise_command(self):
pass
<|reserved_special_token_1|>
from arnold import config
class TestMicrophone:
def setup_method(self, method):
self.config = config.SENSOR['microphone']
def test_config(self):
required_config = [
'card_number', 'device_index', 'sample_rate', 'phrase_time_limit',
'energy_threshold'
]
for config_key in required_config:
assert config_key in self.config
def test_listen(self):
# TODO: Figure out how to mock this
pass
def test_recognise_command(self):
# TODO: Figure out how to mock this
pass
|
flexible
|
{
"blob_id": "164167590051fac3f3fd80c5ed82621ba55c4cc4",
"index": 9597,
"step-1": "<mask token>\n\n\nclass TestMicrophone:\n <mask token>\n\n def test_config(self):\n required_config = ['card_number', 'device_index', 'sample_rate',\n 'phrase_time_limit', 'energy_threshold']\n for config_key in required_config:\n assert config_key in self.config\n\n def test_listen(self):\n pass\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMicrophone:\n <mask token>\n\n def test_config(self):\n required_config = ['card_number', 'device_index', 'sample_rate',\n 'phrase_time_limit', 'energy_threshold']\n for config_key in required_config:\n assert config_key in self.config\n\n def test_listen(self):\n pass\n\n def test_recognise_command(self):\n pass\n",
"step-3": "<mask token>\n\n\nclass TestMicrophone:\n\n def setup_method(self, method):\n self.config = config.SENSOR['microphone']\n\n def test_config(self):\n required_config = ['card_number', 'device_index', 'sample_rate',\n 'phrase_time_limit', 'energy_threshold']\n for config_key in required_config:\n assert config_key in self.config\n\n def test_listen(self):\n pass\n\n def test_recognise_command(self):\n pass\n",
"step-4": "from arnold import config\n\n\nclass TestMicrophone:\n\n def setup_method(self, method):\n self.config = config.SENSOR['microphone']\n\n def test_config(self):\n required_config = ['card_number', 'device_index', 'sample_rate',\n 'phrase_time_limit', 'energy_threshold']\n for config_key in required_config:\n assert config_key in self.config\n\n def test_listen(self):\n pass\n\n def test_recognise_command(self):\n pass\n",
"step-5": "from arnold import config\n\n\nclass TestMicrophone:\n\n def setup_method(self, method):\n self.config = config.SENSOR['microphone']\n\n def test_config(self):\n required_config = [\n 'card_number', 'device_index', 'sample_rate', 'phrase_time_limit',\n 'energy_threshold'\n ]\n for config_key in required_config:\n assert config_key in self.config\n\n def test_listen(self):\n # TODO: Figure out how to mock this\n pass\n\n def test_recognise_command(self):\n # TODO: Figure out how to mock this\n pass\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.