code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# <NAME>, 2017-2018
from flask_restful import Resource
from mobile.database import Database
from utils.data import rename_json_keys, camelcase_to_underscore
from utils.responses import Success, Error
from utils.validators import validate_json, value_exists
database = Database()
class MobileUpdateDataRoute(Resource):
headers = {'Location': '/update'}
resource_type = 'MobileUpdateData'
def _fail_on_deprecated_prompts(self, prompts):
for p in prompts:
if not 'uuid' in p:
return Error(status_code=400,
headers=self.headers,
resource_type=self.resource_type,
errors=['Missing parameter (uuid): A prompt uuid must be supplied for each event.'])
# this route is modeled off the legacy PHP mobile api, API v2 should
# separate each call into a separate route
def post(self):
validations = [{
'uuid': {
'key': 'uuid',
'validator': value_exists,
'response': Error,
'error': 'UUID must be supplied. No action taken.'
},
'survey_answers': {
'key': 'survey',
'validator': None
},
'coordinates': {
'key': 'coordinates',
'validator': None
},
'prompts_answers': {
'key': 'prompts',
'validator': None
},
'cancelled_prompts': {
'key': 'cancelledPrompts',
'validator': None
}
}]
validated = validate_json(validations, self.headers, self.resource_type)
user = database.user.find_by_uuid(validated['uuid'])
if user:
survey_answers, coordinates, prompts_answers, cancelled_prompts = None, None, None, None
response = {
'survey': 'No new survey data supplied.',
'coordinates': 'No new coordinates data supplied.',
'prompts': 'No new prompt answers supplied.',
'cancelledPrompts': 'No cancelled prompts supplied.'
}
if validated['survey_answers']:
survey_answers = database.survey.upsert(user=user,
answers=validated['survey_answers'])
if survey_answers:
response['survey'] = 'Survey answer for {} upserted.'.format(user.uuid)
if validated['coordinates']:
coordinates = rename_json_keys(validated['coordinates'], camelcase_to_underscore)
coordinates = database.coordinates.insert(user=user,
coordinates=coordinates)
if coordinates:
response['coordinates'] = (
'New coordinates for {} inserted.'.format(user.uuid))
# upsert prompts answers and remove any existing conflicting cancelled prompt responses
if validated['prompts_answers']:
formatted_prompts = rename_json_keys(validated['prompts_answers'], camelcase_to_underscore)
error = self._fail_on_deprecated_prompts(formatted_prompts)
if error:
return error
prompts_answers = database.prompts.upsert(user=user,
prompts=formatted_prompts)
prompts_uuids = {p.prompt_uuid for p in prompts_answers}
database.cancelled_prompts.delete(prompts_uuids)
if prompts_answers:
response['prompts'] = (
'New prompt answers for {} inserted.'.format(user.uuid))
# filter cancelled prompts which conflict with a provided response by uuid and insert cancelled prompts
if validated['cancelled_prompts']:
formatted_cancelled_prompts = rename_json_keys(validated['cancelled_prompts'], camelcase_to_underscore)
# fail gracefully on older version of mobile app that do not provide a prompt uuid
error = self._fail_on_deprecated_prompts(formatted_cancelled_prompts)
if error:
return error
if validated['prompts_answers']:
answers_uuids = {p['uuid'] for p in validated['prompts_answers']}
filtered_cancelled_prompts = []
for c in formatted_cancelled_prompts:
if c['uuid'] not in answers_uuids:
filtered_cancelled_prompts.append(c)
else:
filtered_cancelled_prompts = formatted_cancelled_prompts
cancelled_prompts = database.cancelled_prompts.insert(user=user,
cancelled_prompts=filtered_cancelled_prompts)
if cancelled_prompts:
response['cancelledPrompts'] = (
'New cancelled prompts for {} inserted.'.format(user.uuid))
status = None
if any([survey_answers, coordinates, prompts_answers, cancelled_prompts]):
database.commit()
status = 201
else:
status = 200
# add in deprecation warning for v1 api
return Success(status_code=status,
headers=self.headers,
resource_type=self.resource_type,
status='Warning (deprecated): API v1 will soon be phased out. Please refer to documentation for v2 calls.',
body=response)
return Error(status_code=410,
headers=self.headers,
resource_type=self.resource_type,
errors=['Could not find survey for {}.'.format(validated['uuid'])])
|
[
"utils.data.rename_json_keys",
"mobile.database.Database",
"utils.validators.validate_json",
"utils.responses.Success",
"utils.responses.Error"
] |
[((316, 326), 'mobile.database.Database', 'Database', ([], {}), '()\n', (324, 326), False, 'from mobile.database import Database\n'), ((1712, 1772), 'utils.validators.validate_json', 'validate_json', (['validations', 'self.headers', 'self.resource_type'], {}), '(validations, self.headers, self.resource_type)\n', (1725, 1772), False, 'from utils.validators import validate_json, value_exists\n'), ((5550, 5771), 'utils.responses.Success', 'Success', ([], {'status_code': 'status', 'headers': 'self.headers', 'resource_type': 'self.resource_type', 'status': '"""Warning (deprecated): API v1 will soon be phased out. Please refer to documentation for v2 calls."""', 'body': 'response'}), "(status_code=status, headers=self.headers, resource_type=self.\n resource_type, status=\n 'Warning (deprecated): API v1 will soon be phased out. Please refer to documentation for v2 calls.'\n , body=response)\n", (5557, 5771), False, 'from utils.responses import Success, Error\n'), ((579, 757), 'utils.responses.Error', 'Error', ([], {'status_code': '(400)', 'headers': 'self.headers', 'resource_type': 'self.resource_type', 'errors': "['Missing parameter (uuid): A prompt uuid must be supplied for each event.']"}), "(status_code=400, headers=self.headers, resource_type=self.\n resource_type, errors=[\n 'Missing parameter (uuid): A prompt uuid must be supplied for each event.']\n )\n", (584, 757), False, 'from utils.responses import Success, Error\n'), ((2651, 2718), 'utils.data.rename_json_keys', 'rename_json_keys', (["validated['coordinates']", 'camelcase_to_underscore'], {}), "(validated['coordinates'], camelcase_to_underscore)\n", (2667, 2718), False, 'from utils.data import rename_json_keys, camelcase_to_underscore\n'), ((3223, 3294), 'utils.data.rename_json_keys', 'rename_json_keys', (["validated['prompts_answers']", 'camelcase_to_underscore'], {}), "(validated['prompts_answers'], camelcase_to_underscore)\n", (3239, 3294), False, 'from utils.data import rename_json_keys, camelcase_to_underscore\n'), ((4094, 4167), 'utils.data.rename_json_keys', 'rename_json_keys', (["validated['cancelled_prompts']", 'camelcase_to_underscore'], {}), "(validated['cancelled_prompts'], camelcase_to_underscore)\n", (4110, 4167), False, 'from utils.data import rename_json_keys, camelcase_to_underscore\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20150421_2037'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wod_rules', '0011_auto_20150421_1653'),
('characters', '0017_auto_20150424_1354'),
]
operations = [
migrations.CreateModel(
name='MortalCharacter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text="Your character's real name.", unique=True, max_length=50, verbose_name='name')),
('xp', models.IntegerField(null=True, verbose_name='Experience points', blank=True)),
('intelligence', models.IntegerField(default=1, verbose_name='Intelligence', choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('wits', models.IntegerField(default=1, verbose_name='Wits', choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('resolve', models.IntegerField(default=1, verbose_name='Resolve', choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('strength', models.IntegerField(default=1, verbose_name='Strength', choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('dexterity', models.IntegerField(default=1, verbose_name='Dexterity', choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('stamina', models.IntegerField(default=1, verbose_name='Stamina', choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('presence', models.IntegerField(default=1, verbose_name='Presence', choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('manipulation', models.IntegerField(default=1, verbose_name='Manipulation', choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('composure', models.IntegerField(default=1, verbose_name='Composure', choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('description', models.TextField(help_text='A description of your character.', null=True, verbose_name='Description', blank=True)),
('age', models.IntegerField(verbose_name='Age')),
('type', models.CharField(default=b'mortal', max_length=25, verbose_name='Character type', choices=[(b'mortal', b'Mortal'), (b'mage', b'Mage')])),
('is_npc', models.BooleanField(default=False)),
('health', models.IntegerField(null=True, verbose_name='Maximum Health', blank=True)),
('willpower', models.IntegerField(null=True, verbose_name='Willpower', blank=True)),
('size', models.IntegerField(default=5, verbose_name='Size')),
('speed', models.IntegerField(null=True, verbose_name='Speed', blank=True)),
('initiative', models.IntegerField(null=True, verbose_name='Initiative modifier', blank=True)),
('defense', models.IntegerField(null=True, verbose_name='Defense', blank=True)),
('armor', models.IntegerField(null=True, verbose_name='Armor', blank=True)),
('academics', models.IntegerField(default=0, null=True, verbose_name='Academics', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('computer', models.IntegerField(default=0, null=True, verbose_name='Computer', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('crafts', models.IntegerField(default=0, null=True, verbose_name='Crafts', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('investigation', models.IntegerField(default=0, null=True, verbose_name='Investigation', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('medicine', models.IntegerField(default=0, null=True, verbose_name='Medicine', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('occult', models.IntegerField(default=0, null=True, verbose_name='Occult', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('politics', models.IntegerField(default=0, null=True, verbose_name='Politics', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('science', models.IntegerField(default=0, null=True, verbose_name='Science', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('athletics', models.IntegerField(default=0, null=True, verbose_name='Athletics', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('brawl', models.IntegerField(default=0, null=True, verbose_name='Brawl', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('drive', models.IntegerField(default=0, null=True, verbose_name='Drive', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('firearms', models.IntegerField(default=0, null=True, verbose_name='Firearms', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('larceny', models.IntegerField(default=0, null=True, verbose_name='Larceny', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('stealth', models.IntegerField(default=0, null=True, verbose_name='Stealth', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('survival', models.IntegerField(default=0, null=True, verbose_name='Survival', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('weaponry', models.IntegerField(default=0, null=True, verbose_name='Weaponry', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('animal_ken', models.IntegerField(default=0, null=True, verbose_name='Animal_ken', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('empathy', models.IntegerField(default=0, null=True, verbose_name='Empathy', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('expression', models.IntegerField(default=0, null=True, verbose_name='Expression', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('intimidation', models.IntegerField(default=0, null=True, verbose_name='Intimidation', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('persuasion', models.IntegerField(default=0, null=True, verbose_name='Persuasion', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('socialize', models.IntegerField(default=0, null=True, verbose_name='Socialize', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('streetwise', models.IntegerField(default=0, null=True, verbose_name='Streetwise', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('subterfuge', models.IntegerField(default=0, null=True, verbose_name='Subterfuge', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('morality', models.IntegerField(default=7, verbose_name='Morality', choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6'), (7, b'7'), (8, b'8'), (9, b'9'), (10, b'10')])),
],
options={
'ordering': ('-name',),
'verbose_name': 'mortal',
'verbose_name_plural': 'mortals',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MortalNPC',
fields=[
('mortalcharacter_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='characters.MortalCharacter')),
('player_description', models.TextField(help_text='Players can add their own description for this character.', null=True, verbose_name='Description', blank=True)),
('gm_notes', models.TextField(help_text='Special notes visible only to the GM', null=True, verbose_name='Description', blank=True)),
],
options={
'verbose_name_plural': "mortal npc's",
},
bases=('characters.mortalcharacter',),
),
migrations.AddField(
model_name='mortalcharacter',
name='campaign',
field=models.ForeignKey(to='users.Campaign'),
preserve_default=True,
),
migrations.AddField(
model_name='mortalcharacter',
name='player',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='mortalcharacter',
name='vice',
field=models.ForeignKey(to='wod_rules.Vice'),
preserve_default=True,
),
migrations.AddField(
model_name='mortalcharacter',
name='virtue',
field=models.ForeignKey(to='wod_rules.Virtue'),
preserve_default=True,
),
migrations.AlterField(
model_name='magecharacter',
name='campaign',
field=models.ForeignKey(to='users.Campaign'),
preserve_default=True,
),
]
|
[
"django.db.models.TextField",
"django.db.models.OneToOneField",
"django.db.migrations.swappable_dependency",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.IntegerField"
] |
[((256, 313), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (287, 313), False, 'from django.db import models, migrations\n'), ((9273, 9311), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""users.Campaign"""'}), "(to='users.Campaign')\n", (9290, 9311), False, 'from django.db import models, migrations\n'), ((9475, 9521), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': 'settings.AUTH_USER_MODEL'}), '(to=settings.AUTH_USER_MODEL)\n', (9492, 9521), False, 'from django.db import models, migrations\n'), ((9683, 9721), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""wod_rules.Vice"""'}), "(to='wod_rules.Vice')\n", (9700, 9721), False, 'from django.db import models, migrations\n'), ((9885, 9925), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""wod_rules.Virtue"""'}), "(to='wod_rules.Virtue')\n", (9902, 9925), False, 'from django.db import models, migrations\n'), ((10091, 10129), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""users.Campaign"""'}), "(to='users.Campaign')\n", (10108, 10129), False, 'from django.db import models, migrations\n'), ((554, 647), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (570, 647), False, 'from django.db import models, migrations\n'), ((671, 781), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Your character\'s real name."""', 'unique': '(True)', 'max_length': '(50)', 'verbose_name': '"""name"""'}), '(help_text="Your character\'s real name.", unique=True,\n max_length=50, verbose_name=\'name\')\n', (687, 781), False, 'from django.db import models, migrations\n'), ((803, 879), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'verbose_name': '"""Experience points"""', 'blank': '(True)'}), "(null=True, verbose_name='Experience points', blank=True)\n", (822, 879), False, 'from django.db import models, migrations\n'), ((915, 1054), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""Intelligence"""', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=1, verbose_name='Intelligence', choices=[(0,\n b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])\n", (934, 1054), False, 'from django.db import models, migrations\n'), ((1078, 1209), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""Wits"""', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=1, verbose_name='Wits', choices=[(0, b'0'), (1,\n b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])\n", (1097, 1209), False, 'from django.db import models, migrations\n'), ((1236, 1370), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""Resolve"""', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=1, verbose_name='Resolve', choices=[(0, b'0'),\n (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])\n", (1255, 1370), False, 'from django.db import models, migrations\n'), ((1398, 1533), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""Strength"""', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=1, verbose_name='Strength', choices=[(0, b'0'),\n (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])\n", (1417, 1533), False, 'from django.db import models, migrations\n'), ((1562, 1698), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""Dexterity"""', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=1, verbose_name='Dexterity', choices=[(0, b'0'),\n (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])\n", (1581, 1698), False, 'from django.db import models, migrations\n'), ((1725, 1859), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""Stamina"""', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=1, verbose_name='Stamina', choices=[(0, b'0'),\n (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])\n", (1744, 1859), False, 'from django.db import models, migrations\n'), ((1887, 2022), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""Presence"""', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=1, verbose_name='Presence', choices=[(0, b'0'),\n (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])\n", (1906, 2022), False, 'from django.db import models, migrations\n'), ((2054, 2193), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""Manipulation"""', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=1, verbose_name='Manipulation', choices=[(0,\n b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])\n", (2073, 2193), False, 'from django.db import models, migrations\n'), ((2222, 2358), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)', 'verbose_name': '"""Composure"""', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=1, verbose_name='Composure', choices=[(0, b'0'),\n (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])\n", (2241, 2358), False, 'from django.db import models, migrations\n'), ((2389, 2506), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""A description of your character."""', 'null': '(True)', 'verbose_name': '"""Description"""', 'blank': '(True)'}), "(help_text='A description of your character.', null=True,\n verbose_name='Description', blank=True)\n", (2405, 2506), False, 'from django.db import models, migrations\n'), ((2529, 2568), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""Age"""'}), "(verbose_name='Age')\n", (2548, 2568), False, 'from django.db import models, migrations\n'), ((2596, 2736), 'django.db.models.CharField', 'models.CharField', ([], {'default': "b'mortal'", 'max_length': '(25)', 'verbose_name': '"""Character type"""', 'choices': "[(b'mortal', b'Mortal'), (b'mage', b'Mage')]"}), "(default=b'mortal', max_length=25, verbose_name=\n 'Character type', choices=[(b'mortal', b'Mortal'), (b'mage', b'Mage')])\n", (2612, 2736), False, 'from django.db import models, migrations\n'), ((2761, 2795), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2780, 2795), False, 'from django.db import models, migrations\n'), ((2825, 2898), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'verbose_name': '"""Maximum Health"""', 'blank': '(True)'}), "(null=True, verbose_name='Maximum Health', blank=True)\n", (2844, 2898), False, 'from django.db import models, migrations\n'), ((2931, 2999), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'verbose_name': '"""Willpower"""', 'blank': '(True)'}), "(null=True, verbose_name='Willpower', blank=True)\n", (2950, 2999), False, 'from django.db import models, migrations\n'), ((3027, 3078), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(5)', 'verbose_name': '"""Size"""'}), "(default=5, verbose_name='Size')\n", (3046, 3078), False, 'from django.db import models, migrations\n'), ((3107, 3171), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'verbose_name': '"""Speed"""', 'blank': '(True)'}), "(null=True, verbose_name='Speed', blank=True)\n", (3126, 3171), False, 'from django.db import models, migrations\n'), ((3205, 3283), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'verbose_name': '"""Initiative modifier"""', 'blank': '(True)'}), "(null=True, verbose_name='Initiative modifier', blank=True)\n", (3224, 3283), False, 'from django.db import models, migrations\n'), ((3314, 3380), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'verbose_name': '"""Defense"""', 'blank': '(True)'}), "(null=True, verbose_name='Defense', blank=True)\n", (3333, 3380), False, 'from django.db import models, migrations\n'), ((3409, 3473), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'verbose_name': '"""Armor"""', 'blank': '(True)'}), "(null=True, verbose_name='Armor', blank=True)\n", (3428, 3473), False, 'from django.db import models, migrations\n'), ((3506, 3671), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Academics"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Academics', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (3525, 3671), False, 'from django.db import models, migrations\n'), ((3693, 3857), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Computer"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Computer', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (3712, 3857), False, 'from django.db import models, migrations\n'), ((3877, 4033), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Crafts"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Crafts', blank=True,\n choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])\n", (3896, 4033), False, 'from django.db import models, migrations\n'), ((4066, 4233), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Investigation"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Investigation',\n blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4,\n b'4'), (5, b'5')])\n", (4085, 4233), False, 'from django.db import models, migrations\n'), ((4257, 4421), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Medicine"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Medicine', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (4276, 4421), False, 'from django.db import models, migrations\n'), ((4441, 4597), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Occult"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Occult', blank=True,\n choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])\n", (4460, 4597), False, 'from django.db import models, migrations\n'), ((4625, 4789), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Politics"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Politics', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (4644, 4789), False, 'from django.db import models, migrations\n'), ((4810, 4973), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Science"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Science', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (4829, 4973), False, 'from django.db import models, migrations\n'), ((4996, 5161), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Athletics"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Athletics', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (5015, 5161), False, 'from django.db import models, migrations\n'), ((5180, 5335), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Brawl"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Brawl', blank=True,\n choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])\n", (5199, 5335), False, 'from django.db import models, migrations\n'), ((5360, 5515), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Drive"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Drive', blank=True,\n choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])\n", (5379, 5515), False, 'from django.db import models, migrations\n'), ((5543, 5707), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Firearms"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Firearms', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (5562, 5707), False, 'from django.db import models, migrations\n'), ((5728, 5891), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Larceny"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Larceny', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (5747, 5891), False, 'from django.db import models, migrations\n'), ((5912, 6075), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Stealth"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Stealth', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (5931, 6075), False, 'from django.db import models, migrations\n'), ((6097, 6261), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Survival"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Survival', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (6116, 6261), False, 'from django.db import models, migrations\n'), ((6283, 6447), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Weaponry"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Weaponry', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (6302, 6447), False, 'from django.db import models, migrations\n'), ((6471, 6637), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Animal_ken"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Animal_ken', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (6490, 6637), False, 'from django.db import models, migrations\n'), ((6658, 6821), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Empathy"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Empathy', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (6677, 6821), False, 'from django.db import models, migrations\n'), ((6845, 7011), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Expression"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Expression', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (6864, 7011), False, 'from django.db import models, migrations\n'), ((7037, 7203), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Intimidation"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Intimidation',\n blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4,\n b'4'), (5, b'5')])\n", (7056, 7203), False, 'from django.db import models, migrations\n'), ((7229, 7395), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Persuasion"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Persuasion', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (7248, 7395), False, 'from django.db import models, migrations\n'), ((7418, 7583), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Socialize"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Socialize', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (7437, 7583), False, 'from django.db import models, migrations\n'), ((7607, 7773), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Streetwise"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Streetwise', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (7626, 7773), False, 'from django.db import models, migrations\n'), ((7797, 7963), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)', 'verbose_name': '"""Subterfuge"""', 'blank': '(True)', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]"}), "(default=0, null=True, verbose_name='Subterfuge', blank=\n True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (\n 5, b'5')])\n", (7816, 7963), False, 'from django.db import models, migrations\n'), ((7985, 8181), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(7)', 'verbose_name': '"""Morality"""', 'choices': "[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6'\n ), (7, b'7'), (8, b'8'), (9, b'9'), (10, b'10')]"}), "(default=7, verbose_name='Morality', choices=[(0, b'0'),\n (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6'), (7,\n b'7'), (8, b'8'), (9, b'9'), (10, b'10')])\n", (8004, 8181), False, 'from django.db import models, migrations\n'), ((8529, 8658), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'parent_link': '(True)', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""characters.MortalCharacter"""'}), "(parent_link=True, auto_created=True, primary_key=True,\n serialize=False, to='characters.MortalCharacter')\n", (8549, 8658), False, 'from django.db import models, migrations\n'), ((8696, 8843), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""Players can add their own description for this character."""', 'null': '(True)', 'verbose_name': '"""Description"""', 'blank': '(True)'}), "(help_text=\n 'Players can add their own description for this character.', null=True,\n verbose_name='Description', blank=True)\n", (8712, 8843), False, 'from django.db import models, migrations\n'), ((8866, 8988), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""Special notes visible only to the GM"""', 'null': '(True)', 'verbose_name': '"""Description"""', 'blank': '(True)'}), "(help_text='Special notes visible only to the GM', null=\n True, verbose_name='Description', blank=True)\n", (8882, 8988), False, 'from django.db import models, migrations\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
dataset=pd.read_csv(r'C:\Users\santosh\Downloads\PAASBAAN-crime-prediction-master\data.csv')
data=pd.read_csv(r'C:\Users\santosh\Downloads\PAASBAAN-crime-prediction-master\data.csv')
print(dataset.head())
for col in data:
print (type(data[col][1]))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data['timestamp'] = pd.to_datetime(data['timestamp'], format = '%d/%m/%Y %H:%M:%S')
data['timestamp']
column_1 = data.ix[:,0]
db=pd.DataFrame({"year": column_1.dt.year,
"month": column_1.dt.month,
"day": column_1.dt.day,
"hour": column_1.dt.hour,
"dayofyear": column_1.dt.dayofyear,
"week": column_1.dt.week,
"weekofyear": column_1.dt.weekofyear,
"dayofweek": column_1.dt.dayofweek,
"weekday": column_1.dt.weekday,
"quarter": column_1.dt.quarter,
})
dataset1=dataset.drop('timestamp',axis=1)
data1=pd.concat([db,dataset1],axis=1)
data1.info()
data1.dropna(inplace=True)
data1.head()
sns.pairplot(data1,hue='act363')
|
[
"pandas.DataFrame",
"pandas.read_csv",
"pandas.to_datetime",
"seaborn.pairplot",
"pandas.concat"
] |
[((101, 199), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\santosh\\\\Downloads\\\\PAASBAAN-crime-prediction-master\\\\data.csv"""'], {}), "(\n 'C:\\\\Users\\\\santosh\\\\Downloads\\\\PAASBAAN-crime-prediction-master\\\\data.csv'\n )\n", (112, 199), True, 'import pandas as pd\n'), ((191, 289), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\santosh\\\\Downloads\\\\PAASBAAN-crime-prediction-master\\\\data.csv"""'], {}), "(\n 'C:\\\\Users\\\\santosh\\\\Downloads\\\\PAASBAAN-crime-prediction-master\\\\data.csv'\n )\n", (202, 289), True, 'import pandas as pd\n'), ((366, 399), 'pandas.to_datetime', 'pd.to_datetime', (["data['timestamp']"], {}), "(data['timestamp'])\n", (380, 399), True, 'import pandas as pd\n'), ((420, 481), 'pandas.to_datetime', 'pd.to_datetime', (["data['timestamp']"], {'format': '"""%d/%m/%Y %H:%M:%S"""'}), "(data['timestamp'], format='%d/%m/%Y %H:%M:%S')\n", (434, 481), True, 'import pandas as pd\n'), ((530, 867), 'pandas.DataFrame', 'pd.DataFrame', (["{'year': column_1.dt.year, 'month': column_1.dt.month, 'day': column_1.dt.\n day, 'hour': column_1.dt.hour, 'dayofyear': column_1.dt.dayofyear,\n 'week': column_1.dt.week, 'weekofyear': column_1.dt.weekofyear,\n 'dayofweek': column_1.dt.dayofweek, 'weekday': column_1.dt.weekday,\n 'quarter': column_1.dt.quarter}"], {}), "({'year': column_1.dt.year, 'month': column_1.dt.month, 'day':\n column_1.dt.day, 'hour': column_1.dt.hour, 'dayofyear': column_1.dt.\n dayofyear, 'week': column_1.dt.week, 'weekofyear': column_1.dt.\n weekofyear, 'dayofweek': column_1.dt.dayofweek, 'weekday': column_1.dt.\n weekday, 'quarter': column_1.dt.quarter})\n", (542, 867), True, 'import pandas as pd\n'), ((1039, 1072), 'pandas.concat', 'pd.concat', (['[db, dataset1]'], {'axis': '(1)'}), '([db, dataset1], axis=1)\n', (1048, 1072), True, 'import pandas as pd\n'), ((1125, 1158), 'seaborn.pairplot', 'sns.pairplot', (['data1'], {'hue': '"""act363"""'}), "(data1, hue='act363')\n", (1137, 1158), True, 'import seaborn as sns\n')]
|
"""
Copyright (c) Jupyter Development Team.
Distributed under the terms of the Modified BSD License.
"""
import os
import json
import os.path as osp
from jupyter_server.base.handlers import JupyterHandler, FileFindHandler
from jupyter_server.extension.handler import ExtensionHandlerMixin, ExtensionHandlerJinjaMixin
from jupyterlab_server import LabServerApp, LabConfig
from jupyter_server.utils import url_path_join as ujoin
from traitlets import Unicode
HERE = osp.dirname(__file__)
with open(os.path.join(HERE, 'package.json')) as fid:
version = json.load(fid)['version']
def _jupyter_server_extension_points():
return [
{
'module': __name__,
'app': ExampleApp
}
]
class ExampleHandler(
ExtensionHandlerJinjaMixin,
ExtensionHandlerMixin,
JupyterHandler
):
"""Handle requests between the main app page and notebook server."""
def get(self):
"""Get the main page for the application's interface."""
config_data = {
# Use camelCase here, since that's what the lab components expect
"appVersion": version,
'baseUrl': self.base_url,
'token': self.settings['token'],
'fullStaticUrl': ujoin(self.base_url, 'static', self.name),
'frontendUrl': ujoin(self.base_url, 'example/')
}
return self.write(
self.render_template(
'index.html',
static=self.static_url,
base_url=self.base_url,
token=self.settings['token'],
page_config=config_data
)
)
class ExampleApp(LabServerApp):
extension_url = '/example'
default_url = '/example'
app_url = "/example"
name = __name__
load_other_extensions = False
app_name = 'JupyterLab Example Service'
app_settings_dir = os.path.join(HERE, 'build', 'application_settings')
app_version = version
schemas_dir = os.path.join(HERE, 'build', 'schemas')
static_dir = os.path.join(HERE, 'build')
templates_dir = os.path.join(HERE, 'templates')
themes_dir = os.path.join(HERE, 'build', 'themes')
user_settings_dir = os.path.join(HERE, 'build', 'user_settings')
workspaces_dir = os.path.join(HERE, 'build', 'workspaces')
def initialize_handlers(self):
"""Add example handler to Lab Server's handler list.
"""
self.handlers.append(
('/example', ExampleHandler)
)
if __name__ == '__main__':
ExampleApp.launch_instance()
|
[
"os.path.dirname",
"json.load",
"os.path.join",
"jupyter_server.utils.url_path_join"
] |
[((466, 487), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (477, 487), True, 'import os.path as osp\n'), ((1882, 1933), 'os.path.join', 'os.path.join', (['HERE', '"""build"""', '"""application_settings"""'], {}), "(HERE, 'build', 'application_settings')\n", (1894, 1933), False, 'import os\n'), ((1978, 2016), 'os.path.join', 'os.path.join', (['HERE', '"""build"""', '"""schemas"""'], {}), "(HERE, 'build', 'schemas')\n", (1990, 2016), False, 'import os\n'), ((2034, 2061), 'os.path.join', 'os.path.join', (['HERE', '"""build"""'], {}), "(HERE, 'build')\n", (2046, 2061), False, 'import os\n'), ((2082, 2113), 'os.path.join', 'os.path.join', (['HERE', '"""templates"""'], {}), "(HERE, 'templates')\n", (2094, 2113), False, 'import os\n'), ((2131, 2168), 'os.path.join', 'os.path.join', (['HERE', '"""build"""', '"""themes"""'], {}), "(HERE, 'build', 'themes')\n", (2143, 2168), False, 'import os\n'), ((2193, 2237), 'os.path.join', 'os.path.join', (['HERE', '"""build"""', '"""user_settings"""'], {}), "(HERE, 'build', 'user_settings')\n", (2205, 2237), False, 'import os\n'), ((2259, 2300), 'os.path.join', 'os.path.join', (['HERE', '"""build"""', '"""workspaces"""'], {}), "(HERE, 'build', 'workspaces')\n", (2271, 2300), False, 'import os\n'), ((499, 533), 'os.path.join', 'os.path.join', (['HERE', '"""package.json"""'], {}), "(HERE, 'package.json')\n", (511, 533), False, 'import os\n'), ((557, 571), 'json.load', 'json.load', (['fid'], {}), '(fid)\n', (566, 571), False, 'import json\n'), ((1240, 1281), 'jupyter_server.utils.url_path_join', 'ujoin', (['self.base_url', '"""static"""', 'self.name'], {}), "(self.base_url, 'static', self.name)\n", (1245, 1281), True, 'from jupyter_server.utils import url_path_join as ujoin\n'), ((1310, 1342), 'jupyter_server.utils.url_path_join', 'ujoin', (['self.base_url', '"""example/"""'], {}), "(self.base_url, 'example/')\n", (1315, 1342), True, 'from jupyter_server.utils import url_path_join as ujoin\n')]
|
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name = 'svgrepodl',
author="callan",
author_email="<EMAIL>",
description="Pack Downloader for SVG REPO",
long_description=long_description,
long_description_content_type="text/markdown",
homepage="https://github.com/AllanCerveaux/svg_repo_dl/blob/master/README.md",
version = '0.0.1',
license="MIT",
py_modules=['svgrepodl'],
install_requires = [
'selenium',
'progress',
'click',
'colored',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points = {
'console_scripts': [
'svgrepodl = svgrepodl.__main__:main'
]
}
)
|
[
"setuptools.setup"
] |
[((99, 739), 'setuptools.setup', 'setup', ([], {'name': '"""svgrepodl"""', 'author': '"""callan"""', 'author_email': '"""<EMAIL>"""', 'description': '"""Pack Downloader for SVG REPO"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'homepage': '"""https://github.com/AllanCerveaux/svg_repo_dl/blob/master/README.md"""', 'version': '"""0.0.1"""', 'license': '"""MIT"""', 'py_modules': "['svgrepodl']", 'install_requires': "['selenium', 'progress', 'click', 'colored']", 'classifiers': "['Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent']", 'entry_points': "{'console_scripts': ['svgrepodl = svgrepodl.__main__:main']}"}), "(name='svgrepodl', author='callan', author_email='<EMAIL>',\n description='Pack Downloader for SVG REPO', long_description=\n long_description, long_description_content_type='text/markdown',\n homepage=\n 'https://github.com/AllanCerveaux/svg_repo_dl/blob/master/README.md',\n version='0.0.1', license='MIT', py_modules=['svgrepodl'],\n install_requires=['selenium', 'progress', 'click', 'colored'],\n classifiers=['Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent'], entry_points={'console_scripts':\n ['svgrepodl = svgrepodl.__main__:main']})\n", (104, 739), False, 'from setuptools import setup\n')]
|
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import cv2
import numpy as np
from PIL import Image
import math
class Erosion2d(nn.Layer):
"""
Erosion2d
"""
def __init__(self, m=1):
super(Erosion2d, self).__init__()
self.m = m
self.pad = [m, m, m, m]
def forward(self, x):
batch_size, c, h, w = x.shape
x_pad = F.pad(x, pad=self.pad, mode='constant', value=1e9)
channel = nn.functional.unfold(x_pad, 2 * self.m + 1, strides=1, paddings=0).reshape([batch_size, c, -1, h, w])
result = paddle.min(channel, axis=2)
return result
class Dilation2d(nn.Layer):
"""
Dilation2d
"""
def __init__(self, m=1):
super(Dilation2d, self).__init__()
self.m = m
self.pad = [m, m, m, m]
def forward(self, x):
batch_size, c, h, w = x.shape
x_pad = F.pad(x, pad=self.pad, mode='constant', value=-1e9)
channel = nn.functional.unfold(x_pad, 2 * self.m + 1, strides=1, paddings=0).reshape([batch_size, c, -1, h, w])
result = paddle.max(channel, axis=2)
return result
def param2stroke(param, H, W, meta_brushes):
"""
param2stroke
"""
b = param.shape[0]
param_list = paddle.split(param, 8, axis=1)
x0, y0, w, h, theta = [item.squeeze(-1) for item in param_list[:5]]
sin_theta = paddle.sin(math.pi * theta)
cos_theta = paddle.cos(math.pi * theta)
index = paddle.full((b,), -1, dtype='int64').numpy()
index[(h > w).numpy()] = 0
index[(h <= w).numpy()] = 1
meta_brushes_resize = F.interpolate(meta_brushes, (H, W)).numpy()
brush = paddle.to_tensor(meta_brushes_resize[index])
warp_00 = cos_theta / w
warp_01 = sin_theta * H / (W * w)
warp_02 = (1 - 2 * x0) * cos_theta / w + (1 - 2 * y0) * sin_theta * H / (W * w)
warp_10 = -sin_theta * W / (H * h)
warp_11 = cos_theta / h
warp_12 = (1 - 2 * y0) * cos_theta / h - (1 - 2 * x0) * sin_theta * W / (H * h)
warp_0 = paddle.stack([warp_00, warp_01, warp_02], axis=1)
warp_1 = paddle.stack([warp_10, warp_11, warp_12], axis=1)
warp = paddle.stack([warp_0, warp_1], axis=1)
grid = nn.functional.affine_grid(warp, [b, 3, H, W]) # paddle和torch默认值是反过来的
brush = nn.functional.grid_sample(brush, grid)
return brush
def read_img(img_path, img_type='RGB', h=None, w=None):
"""
read img
"""
img = Image.open(img_path).convert(img_type)
if h is not None and w is not None:
img = img.resize((w, h), resample=Image.NEAREST)
img = np.array(img)
if img.ndim == 2:
img = np.expand_dims(img, axis=-1)
img = img.transpose((2, 0, 1)) # 矩阵的维度由 (X, Y, Z) -> (Z, X, Y)
img = paddle.to_tensor(img).unsqueeze(0).astype('float32') / 255. # 矩阵变为归一化张量
return img
def preprocess(img, w=512, h=512):
image = cv2.resize(img, (w, h), cv2.INTER_NEAREST)
image = image.transpose((2, 0, 1))
image = paddle.to_tensor(image).unsqueeze(0).astype('float32') / 255.
return image
def pad(img, H, W):
b, c, h, w = img.shape
pad_h = (H - h) // 2
pad_w = (W - w) // 2
remainder_h = (H - h) % 2
remainder_w = (W - w) % 2
expand_img = nn.functional.pad(img, [pad_w, pad_w + remainder_w,
pad_h, pad_h + remainder_h])
return expand_img
|
[
"paddle.to_tensor",
"paddle.nn.functional.affine_grid",
"paddle.nn.functional.grid_sample",
"paddle.sin",
"paddle.stack",
"paddle.nn.functional.unfold",
"numpy.expand_dims",
"paddle.cos",
"PIL.Image.open",
"paddle.nn.functional.pad",
"paddle.min",
"paddle.full",
"numpy.array",
"paddle.max",
"paddle.nn.functional.interpolate",
"paddle.split",
"cv2.resize"
] |
[((1255, 1285), 'paddle.split', 'paddle.split', (['param', '(8)'], {'axis': '(1)'}), '(param, 8, axis=1)\n', (1267, 1285), False, 'import paddle\n'), ((1374, 1401), 'paddle.sin', 'paddle.sin', (['(math.pi * theta)'], {}), '(math.pi * theta)\n', (1384, 1401), False, 'import paddle\n'), ((1418, 1445), 'paddle.cos', 'paddle.cos', (['(math.pi * theta)'], {}), '(math.pi * theta)\n', (1428, 1445), False, 'import paddle\n'), ((1649, 1693), 'paddle.to_tensor', 'paddle.to_tensor', (['meta_brushes_resize[index]'], {}), '(meta_brushes_resize[index])\n', (1665, 1693), False, 'import paddle\n'), ((2009, 2058), 'paddle.stack', 'paddle.stack', (['[warp_00, warp_01, warp_02]'], {'axis': '(1)'}), '([warp_00, warp_01, warp_02], axis=1)\n', (2021, 2058), False, 'import paddle\n'), ((2072, 2121), 'paddle.stack', 'paddle.stack', (['[warp_10, warp_11, warp_12]'], {'axis': '(1)'}), '([warp_10, warp_11, warp_12], axis=1)\n', (2084, 2121), False, 'import paddle\n'), ((2133, 2171), 'paddle.stack', 'paddle.stack', (['[warp_0, warp_1]'], {'axis': '(1)'}), '([warp_0, warp_1], axis=1)\n', (2145, 2171), False, 'import paddle\n'), ((2183, 2228), 'paddle.nn.functional.affine_grid', 'nn.functional.affine_grid', (['warp', '[b, 3, H, W]'], {}), '(warp, [b, 3, H, W])\n', (2208, 2228), True, 'import paddle.nn as nn\n'), ((2264, 2302), 'paddle.nn.functional.grid_sample', 'nn.functional.grid_sample', (['brush', 'grid'], {}), '(brush, grid)\n', (2289, 2302), True, 'import paddle.nn as nn\n'), ((2564, 2577), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2572, 2577), True, 'import numpy as np\n'), ((2855, 2897), 'cv2.resize', 'cv2.resize', (['img', '(w, h)', 'cv2.INTER_NEAREST'], {}), '(img, (w, h), cv2.INTER_NEAREST)\n', (2865, 2897), False, 'import cv2\n'), ((3203, 3288), 'paddle.nn.functional.pad', 'nn.functional.pad', (['img', '[pad_w, pad_w + remainder_w, pad_h, pad_h + remainder_h]'], {}), '(img, [pad_w, pad_w + remainder_w, pad_h, pad_h + remainder_h]\n )\n', (3220, 3288), True, 'import paddle.nn as nn\n'), ((395, 454), 'paddle.nn.functional.pad', 'F.pad', (['x'], {'pad': 'self.pad', 'mode': '"""constant"""', 'value': '(1000000000.0)'}), "(x, pad=self.pad, mode='constant', value=1000000000.0)\n", (400, 454), True, 'import paddle.nn.functional as F\n'), ((583, 610), 'paddle.min', 'paddle.min', (['channel'], {'axis': '(2)'}), '(channel, axis=2)\n', (593, 610), False, 'import paddle\n'), ((897, 957), 'paddle.nn.functional.pad', 'F.pad', (['x'], {'pad': 'self.pad', 'mode': '"""constant"""', 'value': '(-1000000000.0)'}), "(x, pad=self.pad, mode='constant', value=-1000000000.0)\n", (902, 957), True, 'import paddle.nn.functional as F\n'), ((1086, 1113), 'paddle.max', 'paddle.max', (['channel'], {'axis': '(2)'}), '(channel, axis=2)\n', (1096, 1113), False, 'import paddle\n'), ((2614, 2642), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(-1)'}), '(img, axis=-1)\n', (2628, 2642), True, 'import numpy as np\n'), ((1458, 1494), 'paddle.full', 'paddle.full', (['(b,)', '(-1)'], {'dtype': '"""int64"""'}), "((b,), -1, dtype='int64')\n", (1469, 1494), False, 'import paddle\n'), ((1593, 1628), 'paddle.nn.functional.interpolate', 'F.interpolate', (['meta_brushes', '(H, W)'], {}), '(meta_brushes, (H, W))\n', (1606, 1628), True, 'import paddle.nn.functional as F\n'), ((2418, 2438), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2428, 2438), False, 'from PIL import Image\n'), ((464, 530), 'paddle.nn.functional.unfold', 'nn.functional.unfold', (['x_pad', '(2 * self.m + 1)'], {'strides': '(1)', 'paddings': '(0)'}), '(x_pad, 2 * self.m + 1, strides=1, paddings=0)\n', (484, 530), True, 'import paddle.nn as nn\n'), ((967, 1033), 'paddle.nn.functional.unfold', 'nn.functional.unfold', (['x_pad', '(2 * self.m + 1)'], {'strides': '(1)', 'paddings': '(0)'}), '(x_pad, 2 * self.m + 1, strides=1, paddings=0)\n', (987, 1033), True, 'import paddle.nn as nn\n'), ((2720, 2741), 'paddle.to_tensor', 'paddle.to_tensor', (['img'], {}), '(img)\n', (2736, 2741), False, 'import paddle\n'), ((2949, 2972), 'paddle.to_tensor', 'paddle.to_tensor', (['image'], {}), '(image)\n', (2965, 2972), False, 'import paddle\n')]
|
import sys
import traceback
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.process.traversal import T
from gremlin_python.process.traversal import Order
from gremlin_python.process.traversal import Cardinality
from gremlin_python.process.traversal import Column
from gremlin_python.process.traversal import Direction
from gremlin_python.process.traversal import Operator
from gremlin_python.process.traversal import P
from gremlin_python.process.traversal import Pop
from gremlin_python.process.traversal import Scope
from gremlin_python.process.traversal import Barrier
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
statics.load_statics(globals())
graph = Graph()
try:
# 8182 is the default gremlin Server port
g = graph.traversal().withRemote(DriverRemoteConnection('ws://localhost:8182/gremlin','g'))
except Exception as e:
print ('Connection to gremlin server failed. Connection error stack trace:')
print (traceback.format_exc())
sys.exit(-1)
# g.V().drop().iterate()
# g.E().drop().iterate()
# print (g.V().count().next())
import csv
# The skill domain and skill vertices are stores in a file which needs to be loaded into the
# Graph DB to form the Knowledge Graph
try:
skillreader = csv.reader(open('skill_vertex.csv'), delimiter=',')
except Exception as e:
print ('Failed to load Vertex CSV File "skill_vertex.csv".')
exit(-1)
# Remove any stray whitespace characters when reading the skill names from the CSV files
skillreader = [(row[0].strip(), row[1].strip()) for row in skillreader]
for row in skillreader:
# See if the skill node is present already
count = g.V().has('name', row[1]).count().next()
# print (count > 1)
if count == 0:
# print ('Adding vertex ' + row[1])
g.addV(row[0]).property('name', row[1]).next()
if count > 0:
print ('Node already present with count: ' + row[1] + ' ' + str(count))
print ("*" * 76)
print (" " * 25 + "Vertex loading complete")
print ("*" * 76)
# The relationships between nodes are stored as a edge list in a CSV file where the
# first column has the originating node name and the second column has the destination
# node name
try:
skillreader = csv.reader(open('skill_edges.csv'), delimiter=',')
except Exception as e:
print ('Failed to load Vertex CSV File "skill_edges.csv".')
exit(-1)
skillreader = [(row[0].strip(), row[1].strip()) for row in skillreader]
for row in skillreader:
# See if the node is present already
for idx in [0,1]:
count = g.V().has('name', row[idx]).count().next()
if count != 1:
# Ensure there are no duplicate skills before adding an edge between them
print ('Duplicate vertex found: ' + row[idx])
sys.exit(-1)
left = g.V().has('name', row[0]).next().id
right = g.V().has('name', row[1]).next().id
count = g.V(left).out('related').hasId(right).count().next()
if count == 0:
print ('Adding edge from ' + row[0] + " to " + row[1])
g.V(right).as_('r').V(left).addE('related').to('r').toList()
print ()
print ("*" * 76)
print (" " * 25 + "Edge creation complete")
print ("*" * 76)
|
[
"gremlin_python.structure.graph.Graph",
"traceback.format_exc",
"gremlin_python.driver.driver_remote_connection.DriverRemoteConnection",
"sys.exit"
] |
[((853, 860), 'gremlin_python.structure.graph.Graph', 'Graph', ([], {}), '()\n', (858, 860), False, 'from gremlin_python.structure.graph import Graph\n'), ((950, 1008), 'gremlin_python.driver.driver_remote_connection.DriverRemoteConnection', 'DriverRemoteConnection', (['"""ws://localhost:8182/gremlin"""', '"""g"""'], {}), "('ws://localhost:8182/gremlin', 'g')\n", (972, 1008), False, 'from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection\n'), ((1152, 1164), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1160, 1164), False, 'import sys\n'), ((1124, 1146), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1144, 1146), False, 'import traceback\n'), ((2950, 2962), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (2958, 2962), False, 'import sys\n')]
|
#!/usr/bin/env python
# Copyright 2012-2014 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, re
import pdf_decanter
from optparse import OptionParser
op = OptionParser(usage = "%prog [options] <filename.pdf>")
op.add_option("--no-opengl", action = "store_false",
dest = "use_opengl", default = True,
help = "disable OpenGL for rendering (default: use OpenGL)")
op.add_option("--size", "-s", default = '1024x768',
help = "set target rendering / window size in pixels")
op.add_option("--cache", action = "store_true",
dest = "create_cache", default = False,
help = "use caching, create new cache if necessary (default: use if present, but don't create cache file)")
op.add_option("--ignore-cache", action = "store_false",
dest = "use_cache", default = None,
help = "ignore cache file (even if it seems to be up-to-date)")
op.add_option("--no-gui", action = "store_false",
dest = "show_gui", default = True,
help = "skip main GUI (use for benchmarking / cache generation)")
op.add_option("--profile", action = "store_true",
help = "enable profiling (and dump to 'pdf_decanter.prof')")
options, args = op.parse_args()
pdfFilename, = args
ma = re.match('([0-9]+)[ x*,/]([0-9]+)', options.size)
if not ma:
sys.stderr.write('ERROR: Could not parse size argument %r; expected format like 1024x768\n')
sys.exit(1)
slideSize = list(map(int, ma.groups()))
g = pdf_decanter.start(show = options.show_gui, slideSize = slideSize)
if options.use_opengl and options.show_gui:
g.enableGL()
if options.profile:
import cProfile
pr = cProfile.Profile()
pr.enable()
g.loadPDF(pdfFilename,
useCache = options.use_cache,
createCache = options.create_cache)
if options.profile:
pr.disable()
pr.dump_stats('pdf_decanter.prof')
pixelCount = g._slides.pixelCount()
sw, sh = g.slideSize() # _slides[0].sizeF()
rawCount = g._slides.frameCount() * sw * sh
print("%d pixels out of %d retained. (%.1f%%)" % (pixelCount, rawCount, 100.0 * pixelCount / rawCount))
if options.show_gui and not g.hadEventLoop:
from pdf_decanter.dynqt import QtWidgets
sys.exit(QtWidgets.qApp.exec_())
|
[
"optparse.OptionParser",
"re.match",
"cProfile.Profile",
"pdf_decanter.start",
"pdf_decanter.dynqt.QtWidgets.qApp.exec_",
"sys.stderr.write",
"sys.exit"
] |
[((694, 746), 'optparse.OptionParser', 'OptionParser', ([], {'usage': '"""%prog [options] <filename.pdf>"""'}), "(usage='%prog [options] <filename.pdf>')\n", (706, 746), False, 'from optparse import OptionParser\n'), ((1820, 1869), 're.match', 're.match', (['"""([0-9]+)[ x*,/]([0-9]+)"""', 'options.size'], {}), "('([0-9]+)[ x*,/]([0-9]+)', options.size)\n", (1828, 1869), False, 'import sys, re\n'), ((2039, 2101), 'pdf_decanter.start', 'pdf_decanter.start', ([], {'show': 'options.show_gui', 'slideSize': 'slideSize'}), '(show=options.show_gui, slideSize=slideSize)\n', (2057, 2101), False, 'import pdf_decanter\n'), ((1885, 1982), 'sys.stderr.write', 'sys.stderr.write', (['"""ERROR: Could not parse size argument %r; expected format like 1024x768\n"""'], {}), "(\n 'ERROR: Could not parse size argument %r; expected format like 1024x768\\n')\n", (1901, 1982), False, 'import sys, re\n'), ((1982, 1993), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1990, 1993), False, 'import sys, re\n'), ((2218, 2236), 'cProfile.Profile', 'cProfile.Profile', ([], {}), '()\n', (2234, 2236), False, 'import cProfile\n'), ((2772, 2794), 'pdf_decanter.dynqt.QtWidgets.qApp.exec_', 'QtWidgets.qApp.exec_', ([], {}), '()\n', (2792, 2794), False, 'from pdf_decanter.dynqt import QtWidgets\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""\
[python] 8ebed2c.py [-achimrtuv] [-e compiler] [-f format] [-p pedigree]
(in.8ebed|@testprog) (out.c|-)
8ebed2c.py: A compiler (to C) for the Eightebed programming language.
Language version 1.1. Implementation version 2021.0621.
The @testprog syntax can be used to acquire input from the
specified attribute of the Tests class of the tests module.
Using a single hyphen for the output filename will send
the generated C source to stdout.\
"""
import logging
import sys
from optparse import OptionParser
from eightebed import tests, context, rooibos
from eightebed.drivers import parse_and_gen, compile_and_run, cmdline
logger = logging.getLogger("main")
def main(argv):
optparser = OptionParser(__doc__)
optparser.add_option("-a", "--dump-ast",
action="store_true", dest="dump_ast", default=False,
help="dump AST after source is parsed")
optparser.add_option("-c", "--compile",
action="store_true", dest="compile", default=False,
help="compile generated C code")
optparser.add_option("-e", "--c-compiler", metavar='EXECUTABLE',
dest="compiler", default="gcc",
help="specify program to use for compiling C "
"(default: %default)")
optparser.add_option("-f", "--pointer-format", metavar='FORMAT',
dest="pointer_format", default="$%08lx",
help="printf format to use for pointers in "
"--trace-marking (default: %default)")
optparser.add_option("-i", "--interactive",
action="store_true", dest="interactive",
default=False,
help="enter interactive mode")
optparser.add_option("-m", "--trace-marking",
action="store_true", dest="trace_marking",
default=False,
help="trace marking actions in generated C source")
optparser.add_option("-p", "--pedigree",
dest="pedigree", default=__file__,
help="entity to list as creator of generated C "
"source (default: %default)")
optparser.add_option("-r", "--run",
action="store_true", dest="run", default=False,
help="run compiled program (implies --compile)")
optparser.add_option("-t", "--test",
action="store_true", dest="test", default=False,
help="run test cases and exit")
optparser.add_option("-u", "--clean",
action="store_true", dest="clean", default=False,
help="delete generated C source and executable")
optparser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="produce extra status output")
(options, args) = optparser.parse_args(argv[1:])
if options.verbose:
logging.basicConfig(level=logging.INFO)
if options.run:
options.compile = True
if options.test:
import doctest
(f1, smth) = doctest.testmod(rooibos)
(f2, smth) = doctest.testmod(context)
(f3, smth) = doctest.testmod(tests)
if f1 + f2 + f3 == 0:
sys.exit(0)
else:
sys.exit(1)
if options.interactive:
cmdline(options)
sys.exit(0)
try:
infilename = args[0]
outfilename = args[1]
except IndexError:
print("Usage: {}\n".format(__doc__))
print("Run with the -h option to see a list of all options.")
sys.exit(1)
parse_and_gen(options, infilename, outfilename, tests=tests.Tests)
if options.compile:
result = compile_and_run(outfilename, options)
sys.stdout.write(result)
if __name__ == "__main__":
main(sys.argv)
|
[
"sys.stdout.write",
"eightebed.drivers.parse_and_gen",
"logging.basicConfig",
"optparse.OptionParser",
"doctest.testmod",
"eightebed.drivers.compile_and_run",
"eightebed.drivers.cmdline",
"sys.exit",
"logging.getLogger"
] |
[((717, 742), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (734, 742), False, 'import logging\n'), ((777, 798), 'optparse.OptionParser', 'OptionParser', (['__doc__'], {}), '(__doc__)\n', (789, 798), False, 'from optparse import OptionParser\n'), ((3853, 3919), 'eightebed.drivers.parse_and_gen', 'parse_and_gen', (['options', 'infilename', 'outfilename'], {'tests': 'tests.Tests'}), '(options, infilename, outfilename, tests=tests.Tests)\n', (3866, 3919), False, 'from eightebed.drivers import parse_and_gen, compile_and_run, cmdline\n'), ((3187, 3226), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (3206, 3226), False, 'import logging\n'), ((3343, 3367), 'doctest.testmod', 'doctest.testmod', (['rooibos'], {}), '(rooibos)\n', (3358, 3367), False, 'import doctest\n'), ((3389, 3413), 'doctest.testmod', 'doctest.testmod', (['context'], {}), '(context)\n', (3404, 3413), False, 'import doctest\n'), ((3435, 3457), 'doctest.testmod', 'doctest.testmod', (['tests'], {}), '(tests)\n', (3450, 3457), False, 'import doctest\n'), ((3586, 3602), 'eightebed.drivers.cmdline', 'cmdline', (['options'], {}), '(options)\n', (3593, 3602), False, 'from eightebed.drivers import parse_and_gen, compile_and_run, cmdline\n'), ((3611, 3622), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3619, 3622), False, 'import sys\n'), ((3961, 3998), 'eightebed.drivers.compile_and_run', 'compile_and_run', (['outfilename', 'options'], {}), '(outfilename, options)\n', (3976, 3998), False, 'from eightebed.drivers import parse_and_gen, compile_and_run, cmdline\n'), ((4007, 4031), 'sys.stdout.write', 'sys.stdout.write', (['result'], {}), '(result)\n', (4023, 4031), False, 'import sys\n'), ((3500, 3511), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3508, 3511), False, 'import sys\n'), ((3538, 3549), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3546, 3549), False, 'import sys\n'), ((3837, 3848), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3845, 3848), False, 'import sys\n')]
|
import pylab, optparse, os
#opt parse arguements, type advect_ini.dat, advect_fin.dat, and norm.dat
#---------------------------------------------------------------------------------
# Not in use now
parser = optparse.OptionParser()
opts, args = parser.parse_args()
#Method that runs the advection program in C for a certain method, initial con
#-ditions and Nx. It makes a plot of the analytical and estimated solutions
# and it also
#----------------------------------------------------------------------------
def advect(initial, initialName, method, methodName, Nx):
#Initialize parmeter file
#--------------------------------------------------------
filename = "param.cfg"
pfile = open(filename, 'w')
pfile.write("Nx = %d \n" % Nx)
pfile.write("x1 = 1.0 \n")
pfile.write("x0 = -1.0 \n")
pfile.write("CFL = 0.5 \n")
pfile.write("a = 1.0 \n")
pfile.write("t_max = 2.0 \n ")
pfile.write("init = %d \n " % initial)
pfile.write("meth = %d \n" % method)
pfile.close()
# run c program
#-----------------------------
os.system("./advect_dbr250")
#recover the calculation of the norm
#-----------------------------------
normFile = open("norm.dat", 'r')
lines = normFile.readlines()
norm = float(lines[0])
Nx = int(lines[1])
#Plot results
#-------------------------------------------------------
pylab.title("Advection of a %s wave using the %s method \n The L1_norm is calculated to be: %f" % (initialName ,methodName, norm))
plotTitles = ["Initial Condition","Analytical Solution", "Estimated Solution"]
plotFiles = ["advect_ini.dat", "advect_ana.dat", "advect_fin.dat"]
for infile_title,infile_name in zip(plotTitles,plotFiles):
infile = open(infile_name)
lines = infile.readlines()
strvals = [l.strip().split() for l in lines]
x = [float(v[0]) for v in strvals]
y = [float(v[1]) for v in strvals]
pylab.plot(x,y, label = infile_title)
pylab.xlabel("x (with Nx = %d )" % Nx )
pylab.ylabel("u")
pylab.legend()
#----------------------------------------------------------
#Method that runs the advection program in C for a certain method, initial con
#-ditions and values of Nx. It makes a plot of the convergence rate and prints
# the slope
#----------------------------------------------------------------------------
def advectConv(initial, initialName, method, methodName, Nxs):
L1 = list()
Nx = list()
for Nxvalue in Nxs:
#Initialize parmeter file
#--------------------------------------------------------
filename = "param.cfg"
pfile = open(filename, 'w')
pfile.write("Nx = %d \n" % Nxvalue)
pfile.write("x1 = 1.0 \n")
pfile.write("x0 = -1.0 \n")
pfile.write("CFL = 0.5 \n")
pfile.write("a = 1.0 \n")
pfile.write("t_max = 2.0 \n ")
pfile.write("init = %d \n " % initial)
pfile.write("meth = %d \n" % method)
pfile.close()
# run c program
#-----------------------------
os.system("./advect_dbr250")
#recover the calculation of the norm
#-----------------------------------
normFile = open("norm.dat", 'r')
lines = normFile.readlines()
norm = float(lines[0])
#append result onto list of norms and nx values
#---------------------------------
L1.append(norm)
Nx.append(Nxvalue)
#Calculate slope of convergence
#---------------------------------------------------------------
slope = (pylab.log(L1[-1])-pylab.log(L1[1]))/(pylab.log(Nx[-1])-pylab.log(Nx[1]))
#Plot Results
#---------------------------------------------------------------
pylab.loglog(Nx,L1,'-o', label = "%s with slope= %f" % (methodName, slope))
pylab.xlabel("Nx")
pylab.ylabel("L1_norm")
pylab.legend()
#Run advection for Square wave and Gaussian with both methods
#-------------------------------------------------------------------
pylab.figure()
initialConditions = ["Gaussian","Square"]
methodNames = ["Lax-Friedrichs", "Lax-Wendroff"]
Nx = 40
count = 1
for j in range(0,2):
for i in range(0,2):
pylab.subplot(2,2,count)
advect(i+1,initialConditions[i],j+1,methodNames[j], Nx)
count = count +1
#Run advection and plot convergence for both with both methods
#-------------------------------------------------------------------
Nxs = [10,30,100,300,1000,3000]
initialConditions = ["Gaussian","Square"]
methodNames = ["Lax-Friedrichs", "Lax-Wendroff"]
pylab.figure()
for j in range(0,2):
pylab.subplot(2,1,j+1)
pylab.title("Convergence rates for %s wave" % initialConditions[j])
for i in range(0,2):
advectConv(j+1,initialConditions[j],i+1,methodNames[i], Nxs)
pylab.show()
|
[
"pylab.title",
"pylab.show",
"optparse.OptionParser",
"pylab.ylabel",
"os.system",
"pylab.loglog",
"pylab.plot",
"pylab.log",
"pylab.subplot",
"pylab.figure",
"pylab.xlabel",
"pylab.legend"
] |
[((210, 233), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (231, 233), False, 'import pylab, optparse, os\n'), ((4162, 4176), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (4174, 4176), False, 'import pylab, optparse, os\n'), ((4727, 4741), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (4739, 4741), False, 'import pylab, optparse, os\n'), ((4966, 4978), 'pylab.show', 'pylab.show', ([], {}), '()\n', (4976, 4978), False, 'import pylab, optparse, os\n'), ((1085, 1113), 'os.system', 'os.system', (['"""./advect_dbr250"""'], {}), "('./advect_dbr250')\n", (1094, 1113), False, 'import pylab, optparse, os\n'), ((1407, 1550), 'pylab.title', 'pylab.title', (['("""Advection of a %s wave using the %s method \n The L1_norm is calculated to be: %f"""\n % (initialName, methodName, norm))'], {}), '(\n """Advection of a %s wave using the %s method \n The L1_norm is calculated to be: %f"""\n % (initialName, methodName, norm))\n', (1418, 1550), False, 'import pylab, optparse, os\n'), ((3854, 3929), 'pylab.loglog', 'pylab.loglog', (['Nx', 'L1', '"""-o"""'], {'label': "('%s with slope= %f' % (methodName, slope))"}), "(Nx, L1, '-o', label='%s with slope= %f' % (methodName, slope))\n", (3866, 3929), False, 'import pylab, optparse, os\n'), ((3934, 3952), 'pylab.xlabel', 'pylab.xlabel', (['"""Nx"""'], {}), "('Nx')\n", (3946, 3952), False, 'import pylab, optparse, os\n'), ((3957, 3980), 'pylab.ylabel', 'pylab.ylabel', (['"""L1_norm"""'], {}), "('L1_norm')\n", (3969, 3980), False, 'import pylab, optparse, os\n'), ((3985, 3999), 'pylab.legend', 'pylab.legend', ([], {}), '()\n', (3997, 3999), False, 'import pylab, optparse, os\n'), ((4768, 4794), 'pylab.subplot', 'pylab.subplot', (['(2)', '(1)', '(j + 1)'], {}), '(2, 1, j + 1)\n', (4781, 4794), False, 'import pylab, optparse, os\n'), ((4795, 4862), 'pylab.title', 'pylab.title', (["('Convergence rates for %s wave' % initialConditions[j])"], {}), "('Convergence rates for %s wave' % initialConditions[j])\n", (4806, 4862), False, 'import pylab, optparse, os\n'), ((2004, 2040), 'pylab.plot', 'pylab.plot', (['x', 'y'], {'label': 'infile_title'}), '(x, y, label=infile_title)\n', (2014, 2040), False, 'import pylab, optparse, os\n'), ((2051, 2089), 'pylab.xlabel', 'pylab.xlabel', (["('x (with Nx = %d )' % Nx)"], {}), "('x (with Nx = %d )' % Nx)\n", (2063, 2089), False, 'import pylab, optparse, os\n'), ((2099, 2116), 'pylab.ylabel', 'pylab.ylabel', (['"""u"""'], {}), "('u')\n", (2111, 2116), False, 'import pylab, optparse, os\n'), ((2126, 2140), 'pylab.legend', 'pylab.legend', ([], {}), '()\n', (2138, 2140), False, 'import pylab, optparse, os\n'), ((3172, 3200), 'os.system', 'os.system', (['"""./advect_dbr250"""'], {}), "('./advect_dbr250')\n", (3181, 3200), False, 'import pylab, optparse, os\n'), ((4342, 4368), 'pylab.subplot', 'pylab.subplot', (['(2)', '(2)', 'count'], {}), '(2, 2, count)\n', (4355, 4368), False, 'import pylab, optparse, os\n'), ((3690, 3707), 'pylab.log', 'pylab.log', (['L1[-1]'], {}), '(L1[-1])\n', (3699, 3707), False, 'import pylab, optparse, os\n'), ((3708, 3724), 'pylab.log', 'pylab.log', (['L1[1]'], {}), '(L1[1])\n', (3717, 3724), False, 'import pylab, optparse, os\n'), ((3727, 3744), 'pylab.log', 'pylab.log', (['Nx[-1]'], {}), '(Nx[-1])\n', (3736, 3744), False, 'import pylab, optparse, os\n'), ((3745, 3761), 'pylab.log', 'pylab.log', (['Nx[1]'], {}), '(Nx[1])\n', (3754, 3761), False, 'import pylab, optparse, os\n')]
|
import warnings
import parse
import glob
import pytorch_lightning as pl
import xxhash
import re
from nlstruct.registry import get_config
import torch
import os
import traceback
def flat_config(d):
if d is None:
return d
if isinstance(d, dict):
return tuple(sorted(((k, flat_config(v)) for k, v in sorted(d.items())), key=lambda x: x[0]))
elif isinstance(d, (list, tuple)) and len(d) > 0 and isinstance(d[0], dict):
return tuple((flat_config(v) for v in d))
elif isinstance(d, (list, tuple)):
return tuple(d)
elif isinstance(d, (int, bool, str, float)):
return d
else:
return str(d)
def get_hashkey(model):
xxhash.xxh64(str(flat_config(model.hparams_initial))).hexdigest()
return xxhash.xxh64(str(flat_config(model.hparams_initial))).hexdigest()
class AlreadyRunningException(Exception):
pass
class ModelCheckpoint(pl.callbacks.Callback):
def __init__(self, path, keep_n=1, only_last=False):
super().__init__()
if not (path.endswith('.ckpt') or path.endswith('.pt')):
path = path + ".ckpt"
assert keep_n is False or keep_n > 0
self.keep_n = keep_n
self.path = path
self._all_logged_metrics = []
self.last_train_metrics = {}
self._hashkey = None
self.only_last = only_last
@property
def hashkey(self):
return self._hashkey
def list_paths(self, model):
if self._hashkey is None:
self._hashkey = get_hashkey(model)
glob_search = re.sub('{.*?}', '*', self.path.replace("{hashkey}", self._hashkey))
paths = glob.glob(glob_search, recursive=True)
parsed_paths = [(path, parse.parse(self.path, path)) for path in paths]
return parsed_paths
def lock_file_path(self, model):
if self._hashkey is None:
self._hashkey = get_hashkey(model)
lock_file = self.path.replace("{hashkey}", self._hashkey).format(global_step=0, current_epoch=0).replace(".ckpt", '.lock').replace(".pt", '.lock')
return lock_file
def on_fit_start(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule', unused: 'Optional' = None):
parsed_paths = self.list_paths(pl_module)
finished_path = next(((path, r) for path, r in parsed_paths if r.named.get('global_step', 0) == pl_module.max_steps - 1), (None, None))[0]
if finished_path is not None:
pl_module._is_resuming_finished_model = True
else:
print("Will save checkpoints under path {}".format(self.path.replace("{hashkey}", self._hashkey)))
lock_file_path = self.lock_file_path(pl_module)
if os.path.exists(lock_file_path):
raise AlreadyRunningException("Found a lock file {} indicating that the experiment is already running.".format(lock_file_path))
else:
open(lock_file_path, 'a').close()
def on_fit_end(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule', unused: 'Optional' = None):
pl_module._is_resuming_finished_model = False
def on_train_end(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule', unused: 'Optional' = None):
lock_file_path = self.lock_file_path(pl_module)
if os.path.exists(lock_file_path):
os.remove(lock_file_path)
def on_train_start(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule', unused: 'Optional' = None):
parsed_paths = self.list_paths(pl_module)
if not len(parsed_paths):
return
latest_path = max(parsed_paths, key=lambda r: r[1].named.get('global_step', 0))[0]
print("Resuming from {}".format(latest_path))
state = torch.load(latest_path)
pl_module._load_state(state)
if "all_logged_metrics" in state:
self._all_logged_metrics = state["all_logged_metrics"]
for log_dict in state["all_logged_metrics"]:
trainer.logger.log_metrics(log_dict)
def on_train_epoch_end(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule', unused: 'Optional' = None):
if trainer.global_step == 0:
return
self._all_logged_metrics.append({**trainer.logged_metrics, "step": int(trainer.global_step) - 1, "epoch": int(trainer.current_epoch)})
state = pl_module._save_state(increment_step=True)
state["all_logged_metrics"] = self._all_logged_metrics
if self._hashkey is None:
self._hashkey = get_hashkey(pl_module)
save_path = self.path.format(
global_step=trainer.global_step - 1,
epoch=trainer.current_epoch,
hashkey=self._hashkey)
if not self.only_last or trainer.global_step == pl_module.max_steps:
torch.save(state, save_path + ".tmp")
os.rename(save_path + ".tmp", save_path)
parsed_paths = self.list_paths(pl_module)
if self.keep_n is not False:
for remove_path, _ in sorted(parsed_paths, key=lambda r: r[1].named.get('global_step', 0))[:-self.keep_n]:
os.remove(remove_path)
|
[
"os.remove",
"torch.load",
"os.rename",
"os.path.exists",
"torch.save",
"glob.glob",
"parse.parse"
] |
[((1641, 1679), 'glob.glob', 'glob.glob', (['glob_search'], {'recursive': '(True)'}), '(glob_search, recursive=True)\n', (1650, 1679), False, 'import glob\n'), ((3283, 3313), 'os.path.exists', 'os.path.exists', (['lock_file_path'], {}), '(lock_file_path)\n', (3297, 3313), False, 'import os\n'), ((3732, 3755), 'torch.load', 'torch.load', (['latest_path'], {}), '(latest_path)\n', (3742, 3755), False, 'import torch\n'), ((2696, 2726), 'os.path.exists', 'os.path.exists', (['lock_file_path'], {}), '(lock_file_path)\n', (2710, 2726), False, 'import os\n'), ((3327, 3352), 'os.remove', 'os.remove', (['lock_file_path'], {}), '(lock_file_path)\n', (3336, 3352), False, 'import os\n'), ((4792, 4829), 'torch.save', 'torch.save', (['state', "(save_path + '.tmp')"], {}), "(state, save_path + '.tmp')\n", (4802, 4829), False, 'import torch\n'), ((4842, 4882), 'os.rename', 'os.rename', (["(save_path + '.tmp')", 'save_path'], {}), "(save_path + '.tmp', save_path)\n", (4851, 4882), False, 'import os\n'), ((1711, 1739), 'parse.parse', 'parse.parse', (['self.path', 'path'], {}), '(self.path, path)\n', (1722, 1739), False, 'import parse\n'), ((5106, 5128), 'os.remove', 'os.remove', (['remove_path'], {}), '(remove_path)\n', (5115, 5128), False, 'import os\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from libnmap.parser import NmapParser
rep = NmapParser.parse_fromfile("libnmap/test/files/full_sudo6.xml")
print(
"Nmap scan discovered {0}/{1} hosts up".format(
rep.hosts_up, rep.hosts_total
)
)
for _host in rep.hosts:
if _host.is_up():
print(
"+ Host: {0} {1}".format(_host.address, " ".join(_host.hostnames))
)
# get CPE from service if available
for s in _host.services:
print(
" Service: {0}/{1} ({2})".format(
s.port, s.protocol, s.state
)
)
# NmapService.cpelist returns an array of CPE objects
for _serv_cpe in s.cpelist:
print(" CPE: {0}".format(_serv_cpe.cpestring))
if _host.os_fingerprinted:
print(" OS Fingerprints")
for osm in _host.os.osmatches:
print(
" Found Match:{0} ({1}%)".format(osm.name, osm.accuracy)
)
# NmapOSMatch.get_cpe() method return an array of string
# unlike NmapOSClass.cpelist which returns an array of CPE obj
for cpe in osm.get_cpe():
print("\t CPE: {0}".format(cpe))
|
[
"libnmap.parser.NmapParser.parse_fromfile"
] |
[((92, 154), 'libnmap.parser.NmapParser.parse_fromfile', 'NmapParser.parse_fromfile', (['"""libnmap/test/files/full_sudo6.xml"""'], {}), "('libnmap/test/files/full_sudo6.xml')\n", (117, 154), False, 'from libnmap.parser import NmapParser\n')]
|
# takes pmids_not_found from get_pubmed_xml.py, and pmids_by_mods from parse_dqm_json.py, and
# generates a set sorted by MODs of pmids that were not found in pubmed.
#
# python generate_chunk_files.py
from os import environ, path, makedirs
import logging
import logging.config
import re
from datetime import datetime
from dotenv import load_dotenv
load_dotenv()
log_file_path = path.join(path.dirname(path.abspath(__file__)), '../logging.conf')
logging.config.fileConfig(log_file_path)
logger = logging.getLogger('literature logger')
# base_path = '/home/azurebrd/git/agr_literature_service_demo/src/xml_processing/'
base_path = environ.get('XML_PATH', "")
process_path = base_path + 'chunking_pmids/'
def generate_chunk_files():
"""
:return:
"""
main_chunk_file = process_path + 'chunking_set'
chunk_to_pmid_to_ftp = dict()
current_chunk = 1
chunk_to_pmid_to_ftp[current_chunk] = dict()
with open(main_chunk_file) as main_file:
main_data = main_file.read()
main_split = main_data.split("\n")
count = 0
# tot_count = 0
for line in main_split:
pmid_re_output = re.search(r"INFO - Download (\d+) (ftp.+?tar.gz)", line)
if pmid_re_output is not None:
pmid = pmid_re_output.group(1)
ftp = pmid_re_output.group(2)
count += 1
# tot_count += 1
# if tot_count > 10:
# continue
# if count > 3:
if count > 10000:
current_chunk = current_chunk + 1
chunk_to_pmid_to_ftp[current_chunk] = dict()
count = 1
chunk_to_pmid_to_ftp[current_chunk][pmid] = ftp
main_file.close()
now = datetime.now()
date = now.strftime("%Y%m%d")
# date = '20210426'
for chunk_number in chunk_to_pmid_to_ftp:
chunk_count_string = str(chunk_number)
if chunk_number < 10:
chunk_count_string = '0' + chunk_count_string
chunk_dir = process_path + 'pubmed_tgz_' + date + '_' + chunk_count_string
if not path.exists(chunk_dir):
makedirs(chunk_dir)
output_chunk_file = process_path + date + '_' + chunk_count_string + '.txt'
with open(output_chunk_file, "w") as output_fh:
for pmid in chunk_to_pmid_to_ftp[chunk_number]:
ftp = chunk_to_pmid_to_ftp[chunk_number][pmid]
output_fh.write("%s\t%s\n" % (pmid, ftp))
output_fh.close()
move_chunk_file = process_path + date + '_' + chunk_count_string + '.mv'
with open(move_chunk_file, "w") as move_fh:
for pmid in chunk_to_pmid_to_ftp[chunk_number]:
ftp = chunk_to_pmid_to_ftp[chunk_number][pmid]
move_fh.write("mv %spubmed_tgz/%s.tar.gz %s\n" % (base_path, pmid, chunk_dir))
move_fh.close()
if __name__ == "__main__":
"""
call main start function
"""
generate_chunk_files()
|
[
"os.path.abspath",
"os.makedirs",
"os.path.exists",
"datetime.datetime.now",
"dotenv.load_dotenv",
"os.environ.get",
"re.search",
"logging.config.fileConfig",
"logging.getLogger"
] |
[((354, 367), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (365, 367), False, 'from dotenv import load_dotenv\n'), ((452, 492), 'logging.config.fileConfig', 'logging.config.fileConfig', (['log_file_path'], {}), '(log_file_path)\n', (477, 492), False, 'import logging\n'), ((502, 540), 'logging.getLogger', 'logging.getLogger', (['"""literature logger"""'], {}), "('literature logger')\n", (519, 540), False, 'import logging\n'), ((638, 665), 'os.environ.get', 'environ.get', (['"""XML_PATH"""', '""""""'], {}), "('XML_PATH', '')\n", (649, 665), False, 'from os import environ, path, makedirs\n'), ((1794, 1808), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1806, 1808), False, 'from datetime import datetime\n'), ((408, 430), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (420, 430), False, 'from os import environ, path, makedirs\n'), ((1157, 1213), 're.search', 're.search', (['"""INFO - Download (\\\\d+) (ftp.+?tar.gz)"""', 'line'], {}), "('INFO - Download (\\\\d+) (ftp.+?tar.gz)', line)\n", (1166, 1213), False, 'import re\n'), ((2147, 2169), 'os.path.exists', 'path.exists', (['chunk_dir'], {}), '(chunk_dir)\n', (2158, 2169), False, 'from os import environ, path, makedirs\n'), ((2183, 2202), 'os.makedirs', 'makedirs', (['chunk_dir'], {}), '(chunk_dir)\n', (2191, 2202), False, 'from os import environ, path, makedirs\n')]
|
#!/usr/bin/python3
import os
import re
import subprocess
def run(args, output_file_name):
with open(output_file_name, 'w') as output_file:
subprocess.run(['python3'] + args, check=True, universal_newlines=True, stdout=output_file)
run(['./processstatsruns.py', 'runsperinningstats'], 'runsperinning.xml')
run(['./processballsstrikesstatsruns.py', 'runsperinningballsstrikesstats'], 'runsperinningballsstrikes.xml')
try:
os.mkdir('statsruns')
except FileExistsError:
# directory already exists
pass
file_name_re = re.compile('runsperinningstatscumulative\.(\d+)')
file_name_balls_strikes_re = re.compile('runsperinningballsstrikesstatscumulative\.(\d+)')
stats_years_dir = os.path.abspath('statsyears')
for file_name in os.listdir(stats_years_dir):
match = file_name_re.match(file_name)
if match:
year = match.group(1)
output_file_name = os.path.join(os.path.abspath('statsruns'), 'runsperinningcumulative' + year + '.xml')
run(['./processstatsruns.py', os.path.join(os.path.abspath('statsyears'), file_name)], output_file_name)
if False:
print(year)
balls_strikes_match = file_name_balls_strikes_re.match(file_name)
if balls_strikes_match:
year = balls_strikes_match.group(1)
output_file_name = os.path.join(os.path.abspath('statsruns'), 'runsperinningballsstrikescumulative' + year + '.xml')
run(['./processballsstrikesstatsruns.py', os.path.join(os.path.abspath('statsyears'), file_name)], output_file_name)
if False:
print(year)
|
[
"os.mkdir",
"os.path.abspath",
"subprocess.run",
"os.listdir",
"re.compile"
] |
[((539, 590), 're.compile', 're.compile', (['"""runsperinningstatscumulative\\\\.(\\\\d+)"""'], {}), "('runsperinningstatscumulative\\\\.(\\\\d+)')\n", (549, 590), False, 'import re\n'), ((618, 681), 're.compile', 're.compile', (['"""runsperinningballsstrikesstatscumulative\\\\.(\\\\d+)"""'], {}), "('runsperinningballsstrikesstatscumulative\\\\.(\\\\d+)')\n", (628, 681), False, 'import re\n'), ((698, 727), 'os.path.abspath', 'os.path.abspath', (['"""statsyears"""'], {}), "('statsyears')\n", (713, 727), False, 'import os\n'), ((745, 772), 'os.listdir', 'os.listdir', (['stats_years_dir'], {}), '(stats_years_dir)\n', (755, 772), False, 'import os\n'), ((438, 459), 'os.mkdir', 'os.mkdir', (['"""statsruns"""'], {}), "('statsruns')\n", (446, 459), False, 'import os\n'), ((152, 247), 'subprocess.run', 'subprocess.run', (["(['python3'] + args)"], {'check': '(True)', 'universal_newlines': '(True)', 'stdout': 'output_file'}), "(['python3'] + args, check=True, universal_newlines=True,\n stdout=output_file)\n", (166, 247), False, 'import subprocess\n'), ((900, 928), 'os.path.abspath', 'os.path.abspath', (['"""statsruns"""'], {}), "('statsruns')\n", (915, 928), False, 'import os\n'), ((1310, 1338), 'os.path.abspath', 'os.path.abspath', (['"""statsruns"""'], {}), "('statsruns')\n", (1325, 1338), False, 'import os\n'), ((1024, 1053), 'os.path.abspath', 'os.path.abspath', (['"""statsyears"""'], {}), "('statsyears')\n", (1039, 1053), False, 'import os\n'), ((1458, 1487), 'os.path.abspath', 'os.path.abspath', (['"""statsyears"""'], {}), "('statsyears')\n", (1473, 1487), False, 'import os\n')]
|
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" tests of the Parameters class """
# pylint: disable=missing-function-docstring,missing-class-docstring,abstract-method,protected-access
import unittest
import traceback
import armi
from armi.reactor import parameters
from armi.reactor import composites
class MockComposite:
def __init__(self, name):
self.name = name
self.p = {}
class MockCompositeGrandParent(MockComposite):
pass
class MockCompositeParent(MockCompositeGrandParent):
pass
class MockCompositeChild(MockCompositeParent):
pass
class ParameterTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.defs = parameters.ALL_DEFINITIONS._paramDefs
@classmethod
def tearDownClass(cls):
parameters.ALL_DEFINITIONS._paramDefs = cls.defs
def setUp(self):
parameters.ALL_DEFINITIONS._paramDefs = []
def test_mutableDefaultsNotSupported(self):
class Mock(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
with self.assertRaises(AssertionError):
pb.defParam("units", "description", "location", default=[])
with self.assertRaises(AssertionError):
pb.defParam("units", "description", "location", default={})
with self.assertRaises(AssertionError):
fail = pDefs.createBuilder(default=[])
with self.assertRaises(AssertionError):
fail = pDefs.createBuilder(default={})
def test_paramPropertyDoesNotConflict(self):
class Mock(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("doodle", "units", "description", "location", default=42)
with pDefs.createBuilder(MockComposite, default=0.0) as pb:
pb.defParam("cheese", "kg", "pressed curds of milk", "avg")
pb.defParam("fudge", "kg", "saturated chocolate", "avg", default=19)
pb.defParam(
"noodles",
"kg",
"strip, ring, or tube of pasta",
"avg",
default=None,
)
mock1 = Mock()
mock2 = Mock()
self.assertEqual(42, mock1.doodle)
self.assertEqual(42, mock2.doodle)
self.assertEqual(0.0, mock1.cheese) # make sure factory default is applied
self.assertEqual(
19, mock2.fudge
) # make sure we can override the factory default
self.assertEqual(
None, mock2.noodles
) # make sure we can override the factory default
mock1.doodle = 17
self.assertEqual(17, mock1.doodle)
self.assertEqual(42, mock2.doodle)
def test_paramPropertyDoesNotConflictWithNoneDefault(self):
class Mock(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam(
"noneDefault", "units", "description", "location", default=None
)
mock1 = Mock()
mock2 = Mock()
self.assertIsNone(mock1.noneDefault)
self.assertIsNone(mock2.noneDefault)
mock1.noneDefault = 1.234
self.assertEqual(1.234, mock1.noneDefault)
self.assertEqual(None, mock2.noneDefault)
def test_getWithoutDefaultRaisesParameterError(self):
class Mock(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("noDefault", "units", "description", "location")
mock = Mock()
with self.assertRaises(parameters.ParameterError):
print(mock.noDefault)
def test_attemptingToSetParamWithoutSetterFails(self):
class Mock(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam(
"noSetter",
"noSetter",
"units",
"description",
"location",
default="encapsulated",
setter=None,
)
mock = Mock()
self.assertEqual("encapsulated", mock.noSetter)
with self.assertRaises(parameters.ParameterError):
mock.noSetter = False
self.assertEqual("encapsulated", mock.noSetter)
def test_setter(self):
class Mock(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
def n(self, value):
self._p_n = value
self._p_nPlus1 = value + 1
pb.defParam("n", "units", "description", "location", setter=n)
def nPlus1(self, value):
self._p_nPlus1 = value
self._p_n = value - 1
pb.defParam("nPlus1", "units", "description", "location", setter=nPlus1)
mock = Mock()
self.assertTrue(
all(
pd.assigned == parameters.NEVER
for pd in mock.paramDefs
if pd.name != "serialNum"
)
)
with self.assertRaises(parameters.ParameterError):
print(mock.n)
with self.assertRaises(parameters.ParameterError):
print(mock.nPlus1)
mock.n = 15
self.assertEqual(15, mock.n)
self.assertEqual(16, mock.nPlus1)
mock.nPlus1 = 22
self.assertEqual(21, mock.n)
self.assertEqual(22, mock.nPlus1)
self.assertTrue(all(pd.assigned for pd in mock.paramDefs))
def test_cannotDefineParameterWithSameName(self):
with self.assertRaises(parameters.ParameterDefinitionError):
class MockParamCollection(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("sameName", "units", "description 1", "location")
pb.defParam("sameName", "units", "description 2", "location")
_ = MockParamCollection()
def test_paramDefinitionsCompose(self):
class MockBaseParamCollection(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("base1", "units", "a param on the base collection", "avg")
pb.defParam(
"base2", "units", "another param on the base collection", "avg"
)
class MockDerivedACollection(MockBaseParamCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("derAp1", "units", "derived a p 1", "centroid")
pb.defParam("derAp2", "units", "derived a p 2", "centroid")
class MockDerivedBCollection(MockDerivedACollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("derBp", "units", "derived b param", "centroid")
base = MockBaseParamCollection()
derA = MockDerivedACollection()
derB = MockDerivedBCollection()
self.assertTrue(
set(base.paramDefs._paramDefs).issubset(set(derA.paramDefs._paramDefs))
)
self.assertTrue(
set(base.paramDefs._paramDefs).issubset(set(derB.paramDefs._paramDefs))
)
self.assertTrue(
set(derA.paramDefs._paramDefs).issubset(set(derB.paramDefs._paramDefs))
)
def test_cannotDefineParameterWithSameNameForCollectionSubclass(self):
class MockPCParent(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("sameName", "units", "description 3", "location")
with self.assertRaises(parameters.ParameterDefinitionError):
class MockPCChild(MockPCParent):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("sameName", "units", "description 4", "location")
_ = MockPCChild()
# same name along a different branch from the base ParameterCollection should be fine
class MockPCUncle(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("sameName", "units", "description 5", "location")
def test_cannotCreateAttrbuteOnParameterCollectionSubclass(self):
class MockPC(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("someParam", "units", "description", "location")
_ = MockPC()
def test_cannotCreateInstanceOf_NoDefault(self):
with self.assertRaises(NotImplementedError):
_ = parameters.NoDefault()
def test_cannotCreateInstanceOf_Undefined(self):
with self.assertRaises(NotImplementedError):
_ = parameters.parameterDefinitions._Undefined()
def test_defaultLocation(self):
class MockPC(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=parameters.ParamLocation.AVERAGE) as pb:
pb.defParam("p1", "units", "p1 description")
pb.defParam(
"p2", "units", "p2 description", parameters.ParamLocation.TOP
)
pc = MockPC()
self.assertEqual(pc.paramDefs["p1"].location, parameters.ParamLocation.AVERAGE)
self.assertEqual(pc.paramDefs["p2"].location, parameters.ParamLocation.TOP)
def test_categories(self):
class MockPC0(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("p0", "units", "p0 description", "location")
pc = MockPC0()
self.assertEqual(pc.paramDefs.categories, set())
class MockPC(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(categories=["awesome", "stuff"]) as pb:
pb.defParam("p1", "units", "p1 description", "location")
pb.defParam(
"p2", "units", "p2 description", "location", categories=["bacon"]
)
with pDefs.createBuilder() as pb:
pb.defParam(
"p3", "units", "p3 description", "location", categories=["bacon"]
)
pc = MockPC()
self.assertEqual(pc.paramDefs.categories, set(["awesome", "stuff", "bacon"]))
p1 = pc.paramDefs["p1"]
p2 = pc.paramDefs["p2"]
p3 = pc.paramDefs["p3"]
self.assertEqual(p1.categories, set(["awesome", "stuff"]))
self.assertEqual(p2.categories, set(["awesome", "stuff", "bacon"]))
self.assertEqual(p3.categories, set(["bacon"]))
self.assertEqual(set(pc.paramDefs.inCategory("awesome")), set([p1, p2]))
self.assertEqual(set(pc.paramDefs.inCategory("stuff")), set([p1, p2]))
self.assertEqual(set(pc.paramDefs.inCategory("bacon")), set([p2, p3]))
def test_parameterCollectionsHave__slots__(self):
"""Make sure something is implemented to prevent accidental creation of attributes"""
self.assertEqual(
set(["_hist", "_backup", "assigned", "_p_serialNum", "serialNum"]),
set(parameters.ParameterCollection._slots),
)
class MockPC(parameters.ParameterCollection):
pass
pc = MockPC()
# No longer protecting against __dict__ access. If someone REALLY wants to
# staple something to a parameter collection with no guarantees of anything,
# that's on them
# with self.assertRaises(AttributeError):
# pc.__dict__["foo"] = 5
with self.assertRaises(AssertionError):
pc.whatever = 22
# try again after using a ParameterBuilder
class MockPC(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
# use of the ParameterBuilder creates an empty __slots__
with pDefs.createBuilder() as pb:
pb.defParam("p0", "units", "p0 description", "location")
pc = MockPC()
self.assertTrue("_p_p0" in MockPC._slots)
# Make sure we aren't making any weird copies of anything
self.assertTrue(pc._slots is MockPC._slots)
with self.assertRaises(AssertionError):
pc.whatever = 33
self.assertEqual(["serialNum"], pc.keys())
pc.p0 = "hi"
self.assertEqual({"p0", "serialNum"}, set(pc.keys()))
# Also make sure that subclasses of ParameterCollection subclasses use __slots__
class MockPCChild(MockPC):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam("p2", "foo", "bar")
pcc = MockPCChild()
with self.assertRaises(AssertionError):
pcc.whatever = 33
class MockSyncPC(parameters.ParameterCollection):
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(
default=0.0, location=parameters.ParamLocation.AVERAGE
) as pb:
pb.defParam("param1", "units", "p1 description", categories=["cat1"])
pb.defParam("param2", "units", "p2 description", categories=["cat2"])
pb.defParam("param3", "units", "p3 description", categories=["cat3"])
def makeComp(name):
c = composites.Composite(name)
c.p = MockSyncPC()
return c
class SynchronizationTests:
"""Some unit tests that must be run with mpirun instead of the standard unittest system."""
def setUp(self):
self.r = makeComp("reactor")
self.r.core = makeComp("core")
self.r.add(self.r.core)
for ai in range(armi.MPI_SIZE * 4):
a = makeComp("assembly{}".format(ai))
self.r.core.add(a)
for bi in range(10):
a.add(makeComp("block{}-{}".format(ai, bi)))
self.comps = [self.r.core] + self.r.core.getChildren(deep=True)
for pd in MockSyncPC().paramDefs:
pd.assigned = parameters.NEVER
def tearDown(self):
del self.r
def run(self, testNamePrefix="mpitest_"):
with open("mpitest{}.temp".format(armi.MPI_RANK), "w") as self.l:
for methodName in sorted(dir(self)):
if methodName.startswith(testNamePrefix):
self.write("{}.{}".format(self.__class__.__name__, methodName))
try:
self.setUp()
getattr(self, methodName)()
except Exception:
self.write("failed, big time")
traceback.print_exc(file=self.l)
self.write("*** printed exception")
try:
self.tearDown()
except:
pass
self.l.write("done.")
def write(self, msg):
self.l.write("{}\n".format(msg))
self.l.flush()
def assertRaises(self, exceptionType):
class ExceptionCatcher:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is exceptionType:
return True
raise AssertionError(
"Expected {}, but got {}".format(exceptionType, exc_type)
)
return ExceptionCatcher()
def assertEqual(self, expected, actual):
if expected != actual:
raise AssertionError(
"(expected) {} != {} (actual)".format(expected, actual)
)
def assertNotEqual(self, expected, actual):
if expected == actual:
raise AssertionError(
"(expected) {} == {} (actual)".format(expected, actual)
)
def mpitest_noConflicts(self):
for ci, comp in enumerate(self.comps):
if ci % armi.MPI_SIZE == armi.MPI_RANK:
comp.p.param1 = (armi.MPI_RANK + 1) * 30.0
else:
self.assertNotEqual((armi.MPI_RANK + 1) * 30.0, comp.p.param1)
# numUpdates = len(self.comps) // armi.MPI_SIZE + (len(self.comps) % armi.MPI_SIZE > armi.MPI_RANK)
self.assertEqual(len(self.comps), self.r.syncMpiState())
for ci, comp in enumerate(self.comps):
self.assertEqual((ci % armi.MPI_SIZE + 1) * 30.0, comp.p.param1)
def mpitest_noConflicts_setByString(self):
"""Make sure params set by string also work with sync."""
for ci, comp in enumerate(self.comps):
if ci % armi.MPI_SIZE == armi.MPI_RANK:
comp.p.param2 = (armi.MPI_RANK + 1) * 30.0
else:
self.assertNotEqual((armi.MPI_RANK + 1) * 30.0, comp.p.param2)
# numUpdates = len(self.comps) // armi.MPI_SIZE + (len(self.comps) % armi.MPI_SIZE > armi.MPI_RANK)
self.assertEqual(len(self.comps), self.r.syncMpiState())
for ci, comp in enumerate(self.comps):
self.assertEqual((ci % armi.MPI_SIZE + 1) * 30.0, comp.p.param2)
def mpitest_withConflicts(self):
self.r.core.p.param1 = (armi.MPI_RANK + 1) * 99.0
with self.assertRaises(ValueError):
self.r.syncMpiState()
def mpitest_withConflictsButSameValue(self):
self.r.core.p.param1 = (armi.MPI_SIZE + 1) * 99.0
self.r.syncMpiState()
self.assertEqual((armi.MPI_SIZE + 1) * 99.0, self.r.core.p.param1)
def mpitest_noConflictsMaintainWithStateRetainer(self):
assigned = []
with self.r.retainState(parameters.inCategory("cat1")):
for ci, comp in enumerate(self.comps):
comp.p.param2 = 99 * ci
if ci % armi.MPI_SIZE == armi.MPI_RANK:
comp.p.param1 = (armi.MPI_RANK + 1) * 30.0
assigned.append(parameters.SINCE_ANYTHING)
else:
self.assertNotEqual((armi.MPI_RANK + 1) * 30.0, comp.p.param1)
assigned.append(parameters.NEVER)
# 1st inside state retainer
self.assertEqual(
True, all(c.p.assigned == parameters.SINCE_ANYTHING for c in self.comps)
)
# confirm outside state retainer
self.assertEqual(assigned, [c.p.assigned for ci, c in enumerate(self.comps)])
# this rank's "assigned" components are not assigned on the workers, and so will be updated
self.assertEqual(len(self.comps), self.r.syncMpiState())
for ci, comp in enumerate(self.comps):
self.assertEqual((ci % armi.MPI_SIZE + 1) * 30.0, comp.p.param1)
def mpitest_conflictsMaintainWithStateRetainer(self):
with self.r.retainState(parameters.inCategory("cat2")):
for _, comp in enumerate(self.comps):
comp.p.param2 = 99 * armi.MPI_RANK
with self.assertRaises(ValueError):
self.r.syncMpiState()
def mpitest_rxCoeffsProcess(self):
"""This test mimics the process for rxCoeffs when doing distributed doppler"""
def do():
# we will do this over 4 passes (there are 4 * MPI_SIZE assemblies)
for passNum in range(4):
with self.r.retainState(parameters.inCategory("cat2")):
self.r.p.param3 = "hi"
for c in self.comps:
c.p.param1 = (
99 * armi.MPI_RANK
) # this will get reset after state retainer
a = self.r.core[passNum * armi.MPI_SIZE + armi.MPI_RANK]
a.p.param2 = armi.MPI_RANK * 20.0
for b in a:
b.p.param2 = armi.MPI_RANK * 10.0
for ai, a2 in enumerate(self.r):
if ai % armi.MPI_SIZE != armi.MPI_RANK:
assert "param2" not in a2.p
self.assertEqual(parameters.SINCE_ANYTHING, param1.assigned)
self.assertEqual(parameters.SINCE_ANYTHING, param2.assigned)
self.assertEqual(parameters.SINCE_ANYTHING, param3.assigned)
self.assertEqual(parameters.SINCE_ANYTHING, a.p.assigned)
self.r.syncMpiState()
self.assertEqual(
parameters.SINCE_ANYTHING
& ~parameters.SINCE_LAST_DISTRIBUTE_STATE,
param1.assigned,
)
self.assertEqual(
parameters.SINCE_ANYTHING
& ~parameters.SINCE_LAST_DISTRIBUTE_STATE,
param2.assigned,
)
self.assertEqual(
parameters.SINCE_ANYTHING
& ~parameters.SINCE_LAST_DISTRIBUTE_STATE,
param3.assigned,
)
self.assertEqual(
parameters.SINCE_ANYTHING
& ~parameters.SINCE_LAST_DISTRIBUTE_STATE,
a.p.assigned,
)
self.assertEqual(parameters.NEVER, param1.assigned)
self.assertEqual(parameters.SINCE_ANYTHING, param2.assigned)
self.assertEqual(parameters.NEVER, param3.assigned)
self.assertEqual(parameters.SINCE_ANYTHING, a.p.assigned)
do_assert(passNum)
param1 = self.r.p.paramDefs["param1"]
param2 = self.r.p.paramDefs["param2"]
param3 = self.r.p.paramDefs["param3"]
def do_assert(passNum):
# ensure all assemblies and blocks set values for param2, but param1 is empty
for rank in range(armi.MPI_SIZE):
a = self.r.core[passNum * armi.MPI_SIZE + rank]
assert "param1" not in a.p
assert "param3" not in a.p
self.assertEqual(rank * 20, a.p.param2)
for b in a:
self.assertEqual(rank * 10, b.p.param2)
assert "param1" not in b.p
assert "param3" not in b.p
if armi.MPI_RANK == 0:
with self.r.retainState(parameters.inCategory("cat2")):
armi.MPI_COMM.bcast(self.r)
do()
[do_assert(passNum) for passNum in range(4)]
[do_assert(passNum) for passNum in range(4)]
else:
del self.r
self.r = armi.MPI_COMM.bcast(None)
do()
if __name__ == "__main__":
if armi.MPI_SIZE == 1:
unittest.main()
else:
SynchronizationTests().run()
|
[
"unittest.main",
"traceback.print_exc",
"armi.MPI_COMM.bcast",
"armi.reactor.parameters.ParameterDefinitionCollection",
"armi.reactor.parameters.NoDefault",
"armi.reactor.composites.Composite",
"armi.reactor.parameters.parameterDefinitions._Undefined",
"armi.reactor.parameters.inCategory"
] |
[((14349, 14391), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (14389, 14391), False, 'from armi.reactor import parameters\n'), ((14762, 14788), 'armi.reactor.composites.Composite', 'composites.Composite', (['name'], {}), '(name)\n', (14782, 14788), False, 'from armi.reactor import composites\n'), ((24031, 24046), 'unittest.main', 'unittest.main', ([], {}), '()\n', (24044, 24046), False, 'import unittest\n'), ((1554, 1596), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (1594, 1596), False, 'from armi.reactor import parameters\n'), ((2253, 2295), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (2293, 2295), False, 'from armi.reactor import parameters\n'), ((3576, 3618), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (3616, 3618), False, 'from armi.reactor import parameters\n'), ((4199, 4241), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (4239, 4241), False, 'from armi.reactor import parameters\n'), ((4613, 4655), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (4653, 4655), False, 'from armi.reactor import parameters\n'), ((5314, 5356), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (5354, 5356), False, 'from armi.reactor import parameters\n'), ((7141, 7183), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (7181, 7183), False, 'from armi.reactor import parameters\n'), ((7532, 7574), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (7572, 7574), False, 'from armi.reactor import parameters\n'), ((7856, 7898), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (7896, 7898), False, 'from armi.reactor import parameters\n'), ((8658, 8700), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (8698, 8700), False, 'from armi.reactor import parameters\n'), ((9345, 9387), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (9385, 9387), False, 'from armi.reactor import parameters\n'), ((9657, 9699), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (9697, 9699), False, 'from armi.reactor import parameters\n'), ((9968, 9990), 'armi.reactor.parameters.NoDefault', 'parameters.NoDefault', ([], {}), '()\n', (9988, 9990), False, 'from armi.reactor import parameters\n'), ((10114, 10158), 'armi.reactor.parameters.parameterDefinitions._Undefined', 'parameters.parameterDefinitions._Undefined', ([], {}), '()\n', (10156, 10158), False, 'from armi.reactor import parameters\n'), ((10270, 10312), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (10310, 10312), False, 'from armi.reactor import parameters\n'), ((10892, 10934), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (10932, 10934), False, 'from armi.reactor import parameters\n'), ((11210, 11252), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (11250, 11252), False, 'from armi.reactor import parameters\n'), ((13261, 13303), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (13301, 13303), False, 'from armi.reactor import parameters\n'), ((14041, 14083), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (14081, 14083), False, 'from armi.reactor import parameters\n'), ((23924, 23949), 'armi.MPI_COMM.bcast', 'armi.MPI_COMM.bcast', (['None'], {}), '(None)\n', (23943, 23949), False, 'import armi\n'), ((6709, 6751), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (6749, 6751), False, 'from armi.reactor import parameters\n'), ((8965, 9007), 'armi.reactor.parameters.ParameterDefinitionCollection', 'parameters.ParameterDefinitionCollection', ([], {}), '()\n', (9005, 9007), False, 'from armi.reactor import parameters\n'), ((18999, 19028), 'armi.reactor.parameters.inCategory', 'parameters.inCategory', (['"""cat1"""'], {}), "('cat1')\n", (19020, 19028), False, 'from armi.reactor import parameters\n'), ((20147, 20176), 'armi.reactor.parameters.inCategory', 'parameters.inCategory', (['"""cat2"""'], {}), "('cat2')\n", (20168, 20176), False, 'from armi.reactor import parameters\n'), ((23699, 23726), 'armi.MPI_COMM.bcast', 'armi.MPI_COMM.bcast', (['self.r'], {}), '(self.r)\n', (23718, 23726), False, 'import armi\n'), ((23651, 23680), 'armi.reactor.parameters.inCategory', 'parameters.inCategory', (['"""cat2"""'], {}), "('cat2')\n", (23672, 23680), False, 'from armi.reactor import parameters\n'), ((20662, 20691), 'armi.reactor.parameters.inCategory', 'parameters.inCategory', (['"""cat2"""'], {}), "('cat2')\n", (20683, 20691), False, 'from armi.reactor import parameters\n'), ((16044, 16076), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'self.l'}), '(file=self.l)\n', (16063, 16076), False, 'import traceback\n')]
|
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], ".."))
from juon import Graph
from juon.graphs import walk
from data.graph_data import build_graph, graph_is_as_expected
def test_traversal():
graph = build_graph()
graph_is_as_expected(graph)
d_1 = walk(graph, "Lainie")
# test the start is from the right point
assert sorted(d_1.list_relationships()) == ["Likes", "Lives In", "Mother"]
assert d_1.active_nodes() == {"Lainie"}
# test follow
d_2 = d_1.follow("Mother")
assert sorted(d_2.list_relationships()) == [
"Daughter",
"Likes",
"Lives In",
"Sister",
]
assert sorted(d_2.active_nodes()) == ["Ceanne", "Sharlene"]
assert d_2.values("node_type") == ["Person"]
# test filtering
d_3 = d_1.follow("Likes", "Lives In", "Mother")
assert sorted(d_3.has("node_type", "Person").active_nodes()) == [
"Ceanne",
"Sharlene",
]
assert d_3.has("node_type", "Locality").active_nodes() == {"Toodyay"}
assert d_3.select(lambda r: r["node_type"] == "Restaurant").active_nodes() == {
"<NAME>"
}
if __name__ == "__main__":
test_traversal()
print("okay")
|
[
"juon.graphs.walk",
"data.graph_data.graph_is_as_expected",
"os.path.join",
"data.graph_data.build_graph"
] |
[((41, 72), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""'], {}), "(sys.path[0], '..')\n", (53, 72), False, 'import os\n'), ((225, 238), 'data.graph_data.build_graph', 'build_graph', ([], {}), '()\n', (236, 238), False, 'from data.graph_data import build_graph, graph_is_as_expected\n'), ((243, 270), 'data.graph_data.graph_is_as_expected', 'graph_is_as_expected', (['graph'], {}), '(graph)\n', (263, 270), False, 'from data.graph_data import build_graph, graph_is_as_expected\n'), ((282, 303), 'juon.graphs.walk', 'walk', (['graph', '"""Lainie"""'], {}), "(graph, 'Lainie')\n", (286, 303), False, 'from juon.graphs import walk\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from newton_raphson import *
from random import uniform
import numpy.polynomial.legendre as L
###question 0
def E(X):
##Return the value of E(x1,...,xn)
E=0
N=len(X)
for i in range(N):
E+=np.log(abs(X[i]+1))+np.log(abs(X[i]-1))
for j in range(N):
if j!=i:
if X[i]!=X[j]:
E+=np.log(abs(X[i]-X[j]))
else:
return -7 ## To avoid a divsion by 0
return E
def grad_E(X):
## Return the vector grad E as it is described in the subject
n, m = np.shape(X)
assert (m == 1)
res = np.zeros([n,1])
for i in range(n):
s = 0
for j in range(n):
if (i != j):
s += 1. / (X[i, 0] - X[j, 0])
res[i, 0] = 1. / (1 + X[i]) + 1. / (X[i] - 1) + s
return res
def F(X):
return grad_E(X)
###quest1
def Jacobian_E(Y):
## Return the Jacobian matrix of delta_E
N = np.shape(Y)[0]
J=np.zeros((N,N))
for j in range(N):
for i in range(N):
if i==j:
J[j][i]=-1/(Y[i]+1)**2 - 1/(Y[i]-1)**2
for k in range(N):
if k!=j:
J[j][i]+=-1/(Y[i]-Y[k])**2
else:
J[i][j]=1/(Y[i]-Y[j])**2
return J
def J(X):
return Jacobian_E(X)
## Two global lists to the display of the curve
norme_F=[]
iterations=[]
def Newton_Raphson_curve(f, J, U0, N, eps):
## The Newton-Raphson algorithm modified to allow the display of the curve ||F(X)||
global norme_F
global iterations
norme_F=[]
iterations=[]
"""
Solve nonlinear system F=0 by Newton-Raphson's method.
J is the Jacobian of F. At input, U0 is the starting
position of the algorithm. The iteration continues
until ||F|| < eps or until N iterations are reached.
"""
F_value = f(U0)
U = U0
F_norm = np.linalg.norm(F_value, ord=2)
iteration_counter = 0
norme_F.append(F_norm)
iterations.append(iteration_counter)
while abs(F_norm) > eps and iteration_counter < N:
V = np.linalg.solve(J(U), -F_value)
U = U + V
F_value = f(U)
F_norm = np.linalg.norm(F_value, ord=2)
iteration_counter += 1
norme_F.append(F_norm)
iterations.append(iteration_counter)
# Here, either a solution is found, or too many iterations
if abs(F_norm) > eps:
iteration_counter = -1
return U, iteration_counter
def Newton_Raphson_backtracking_curve(f, J, U0, N, eps, alpha):
## The Newton-Raphson algorithm with backtracking modified to allow the display of the curve ||F(X)||
global norme_F
global iterations
norme_F=[]
iterations=[]
"""
Solve nonlinear system F=0 by Newton-Raphson's method.
J is the Jacobian of F. At input, U0 is the starting
position of the algorithm. The iteration continues
until ||F|| < eps or until N iterations are reached.
There is a backtracking to reach the solution faster.
"""
F_value, U = f(U0), U0
F_norm = np.linalg.norm(F_value, ord=2)
iteration_counter = 0
while abs(F_norm) > eps and iteration_counter < N:
V = np.linalg.lstsq(J(U), -F_value, rcond=None)[0]
nxt_F_norm = np.linalg.norm(f(U + V), ord=2)
i = 0
while nxt_F_norm >= F_norm :
print('BACKTRACKING')
i += 1
nxt_F_norm = np.linalg.norm(f(U + alpha ** i * V), ord=2)
U = U + alpha ** i * V
F_value = f(U)
F_norm = np.linalg.norm(F_value, ord=2)
iteration_counter += 1
norme_F.append(F_norm)
iterations.append(iteration_counter)
# Here, either a solution is found, or too many iterations
if abs(F_norm) > eps:
iteration_counter = -1
return U, iteration_counter
## Initialisation of Ten chosen charges
U0=np.zeros((10,1))
U0[0]=-0.86684278
U0[1]=0.86088026
U0[2]=0.80889216
U0[3]=-0.98098176
U0[4]=0.68707341
U0[5]=0.27329905
U0[6]=-0.07208807
U0[7]=0.6864963
U0[8]=-0.11970087
U0[9]=-0.1899953
#we could have imported the function from newton_raphson.py but this version allows to draw the curve
def Newton_Raphson_with_backtracking(f, J, U0, N, epsilon):
global norme_F
global iterations
norme_F=[]
iterations=[]
F_value, U = f(U0), U0
F_norm = np.linalg.norm(F_value, ord=2)
iteration_counter = 0
for i in range(N):
fu = f(U)
na = np.linalg.norm(fu)
if (na < epsilon):
return U
ju = J(U)
V = np.linalg.lstsq(ju,-fu)[0]
if (np.linalg.norm(f(U+V)) - na >= 0):
V = (1.0/3.0)*V
U = U + V
F_value = f(U)
F_norm = np.linalg.norm(F_value, ord=2)
iteration_counter += 1
norme_F.append(F_norm)
iterations.append(iteration_counter)
return U, iteration_counter
U,iteration_counter=Newton_Raphson_curve(F, J, U0, N=100, eps=1e-8)
def test_equilibrium():
#a function that shows the electrostatic equilibrium with 10 charges with and without backtracking
U,iteration_counter=Newton_Raphson_curve(F, J, U0, N=100, eps=1e-8)
print("Test with 10 charges, initialisation of U0 : ",U0.transpose())
print("Final positions of the charges : ",U.transpose())
plt.plot(iterations,norme_F,label="without backtracking")
plt.title("Electrostatic equilibrium with 10 charges")
Newton_Raphson_with_backtracking(F, J, U0, N=100, epsilon=1e-8)
#plt.plot(iterations,norme_F,label="using backtracking")
plt.xlabel("Number of iterations")
plt.ylabel("||F(X)||")
plt.title("Electrostatic equilibrium with 10 charges with and without backtracking")
plt.semilogy()
plt.legend()
plt.show()
def position_real_axis():
#a function that shows the final position ofthe charges on the real axis
plt.figure(num=2,figsize=(7,1.5))
print("Final positions on the real axis")
plt.title("Position of the charges")
plt.xlabel("x axis")
plt.plot([min(U),max(U)],[0,0],color="red",label="Real axis")
plt.plot(U,[0]*len(U),'o',color="yellow",label="Charge")
plt.legend()
plt.show()
def energy_one_charge_position():
## The plot of the curve for one charge
size=50
O=np.linspace(-0.99,0.99,size)
V=np.zeros((size,1))
for k in range(size):
V[k]=E([O[k]])
print("Graph describing the evolution the electrostatic energy of one charge")
plt.title("Electrostatic energy of one charge")
plt.ylabel("Energy")
plt.xlabel("Position of the charge")
plt.plot(O,V)
plt.show()
def mirror(A):
n = A.size
for i in range(n//2):
tmp = A[i]
A[i] = A[n-i-1]
A[n-i-1] = tmp
return A
#Plot Legendre polynomials and equilibrium positions
def add_plot(X, lbl, clr, type='o'):
Peq = Newton_Raphson(grad_E, Jacobian_E, X, 100, 1e-8)
n= Peq.size
for i in range(n):
plt.plot(Peq[i,0],0,type, color=clr)
c = [0]*(n+2)
c[n+1] = 1
d = L.legder(c)
P = L.leg2poly(d)
P = mirror(P)
Poly = np.poly1d(P)
x = np.linspace(-1,1,100)
y = Poly(x)
plt.plot(x, y, label=lbl, color=clr)
#Electrostatic Equilibrium Test
def elec_equ_test():
A = np.matrix([[0.2]])
B = np.matrix([[0.5],
[0.6]])
C = np.matrix([[0.4],
[-0.5],
[0.7]])
D = np.matrix([[0.4],
[-0.4],
[0.5],
[0.6]])
add_plot(A, 'n=1', 'r')
add_plot(B, 'n=2', 'y')
add_plot(C, 'n=3', 'b')
add_plot(D, 'n=4', 'g')
plt.plot([-1,1], [0,0], 'k')
plt.axis([-1,1,-4,4])
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.title("Legendre polynomials and equilibrium positions")
plt.show()
if __name__ == '__main__':
test_equilibrium()
position_real_axis()
energy_one_charge_position()
elec_equ_test()
|
[
"matplotlib.pyplot.title",
"numpy.poly1d",
"numpy.matrix",
"matplotlib.pyplot.show",
"numpy.linalg.lstsq",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.axis",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.polynomial.legendre.leg2poly",
"numpy.linalg.norm",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.xlabel",
"numpy.polynomial.legendre.legder"
] |
[((3963, 3980), 'numpy.zeros', 'np.zeros', (['(10, 1)'], {}), '((10, 1))\n', (3971, 3980), True, 'import numpy as np\n'), ((617, 628), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (625, 628), True, 'import numpy as np\n'), ((659, 675), 'numpy.zeros', 'np.zeros', (['[n, 1]'], {}), '([n, 1])\n', (667, 675), True, 'import numpy as np\n'), ((1021, 1037), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (1029, 1037), True, 'import numpy as np\n'), ((1957, 1987), 'numpy.linalg.norm', 'np.linalg.norm', (['F_value'], {'ord': '(2)'}), '(F_value, ord=2)\n', (1971, 1987), True, 'import numpy as np\n'), ((3129, 3159), 'numpy.linalg.norm', 'np.linalg.norm', (['F_value'], {'ord': '(2)'}), '(F_value, ord=2)\n', (3143, 3159), True, 'import numpy as np\n'), ((4434, 4464), 'numpy.linalg.norm', 'np.linalg.norm', (['F_value'], {'ord': '(2)'}), '(F_value, ord=2)\n', (4448, 4464), True, 'import numpy as np\n'), ((5382, 5441), 'matplotlib.pyplot.plot', 'plt.plot', (['iterations', 'norme_F'], {'label': '"""without backtracking"""'}), "(iterations, norme_F, label='without backtracking')\n", (5390, 5441), True, 'import matplotlib.pyplot as plt\n'), ((5444, 5498), 'matplotlib.pyplot.title', 'plt.title', (['"""Electrostatic equilibrium with 10 charges"""'], {}), "('Electrostatic equilibrium with 10 charges')\n", (5453, 5498), True, 'import matplotlib.pyplot as plt\n'), ((5632, 5666), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of iterations"""'], {}), "('Number of iterations')\n", (5642, 5666), True, 'import matplotlib.pyplot as plt\n'), ((5671, 5693), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""||F(X)||"""'], {}), "('||F(X)||')\n", (5681, 5693), True, 'import matplotlib.pyplot as plt\n'), ((5698, 5787), 'matplotlib.pyplot.title', 'plt.title', (['"""Electrostatic equilibrium with 10 charges with and without backtracking"""'], {}), "(\n 'Electrostatic equilibrium with 10 charges with and without backtracking')\n", (5707, 5787), True, 'import matplotlib.pyplot as plt\n'), ((5787, 5801), 'matplotlib.pyplot.semilogy', 'plt.semilogy', ([], {}), '()\n', (5799, 5801), True, 'import matplotlib.pyplot as plt\n'), ((5806, 5818), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5816, 5818), True, 'import matplotlib.pyplot as plt\n'), ((5823, 5833), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5831, 5833), True, 'import matplotlib.pyplot as plt\n'), ((5942, 5977), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(2)', 'figsize': '(7, 1.5)'}), '(num=2, figsize=(7, 1.5))\n', (5952, 5977), True, 'import matplotlib.pyplot as plt\n'), ((6026, 6062), 'matplotlib.pyplot.title', 'plt.title', (['"""Position of the charges"""'], {}), "('Position of the charges')\n", (6035, 6062), True, 'import matplotlib.pyplot as plt\n'), ((6067, 6087), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x axis"""'], {}), "('x axis')\n", (6077, 6087), True, 'import matplotlib.pyplot as plt\n'), ((6219, 6231), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6229, 6231), True, 'import matplotlib.pyplot as plt\n'), ((6236, 6246), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6244, 6246), True, 'import matplotlib.pyplot as plt\n'), ((6345, 6375), 'numpy.linspace', 'np.linspace', (['(-0.99)', '(0.99)', 'size'], {}), '(-0.99, 0.99, size)\n', (6356, 6375), True, 'import numpy as np\n'), ((6380, 6399), 'numpy.zeros', 'np.zeros', (['(size, 1)'], {}), '((size, 1))\n', (6388, 6399), True, 'import numpy as np\n'), ((6535, 6582), 'matplotlib.pyplot.title', 'plt.title', (['"""Electrostatic energy of one charge"""'], {}), "('Electrostatic energy of one charge')\n", (6544, 6582), True, 'import matplotlib.pyplot as plt\n'), ((6587, 6607), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Energy"""'], {}), "('Energy')\n", (6597, 6607), True, 'import matplotlib.pyplot as plt\n'), ((6612, 6648), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position of the charge"""'], {}), "('Position of the charge')\n", (6622, 6648), True, 'import matplotlib.pyplot as plt\n'), ((6653, 6667), 'matplotlib.pyplot.plot', 'plt.plot', (['O', 'V'], {}), '(O, V)\n', (6661, 6667), True, 'import matplotlib.pyplot as plt\n'), ((6671, 6681), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6679, 6681), True, 'import matplotlib.pyplot as plt\n'), ((7105, 7116), 'numpy.polynomial.legendre.legder', 'L.legder', (['c'], {}), '(c)\n', (7113, 7116), True, 'import numpy.polynomial.legendre as L\n'), ((7125, 7138), 'numpy.polynomial.legendre.leg2poly', 'L.leg2poly', (['d'], {}), '(d)\n', (7135, 7138), True, 'import numpy.polynomial.legendre as L\n'), ((7169, 7181), 'numpy.poly1d', 'np.poly1d', (['P'], {}), '(P)\n', (7178, 7181), True, 'import numpy as np\n'), ((7190, 7213), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (7201, 7213), True, 'import numpy as np\n'), ((7232, 7268), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': 'lbl', 'color': 'clr'}), '(x, y, label=lbl, color=clr)\n', (7240, 7268), True, 'import matplotlib.pyplot as plt\n'), ((7334, 7352), 'numpy.matrix', 'np.matrix', (['[[0.2]]'], {}), '([[0.2]])\n', (7343, 7352), True, 'import numpy as np\n'), ((7362, 7387), 'numpy.matrix', 'np.matrix', (['[[0.5], [0.6]]'], {}), '([[0.5], [0.6]])\n', (7371, 7387), True, 'import numpy as np\n'), ((7416, 7449), 'numpy.matrix', 'np.matrix', (['[[0.4], [-0.5], [0.7]]'], {}), '([[0.4], [-0.5], [0.7]])\n', (7425, 7449), True, 'import numpy as np\n'), ((7497, 7537), 'numpy.matrix', 'np.matrix', (['[[0.4], [-0.4], [0.5], [0.6]]'], {}), '([[0.4], [-0.4], [0.5], [0.6]])\n', (7506, 7537), True, 'import numpy as np\n'), ((7711, 7741), 'matplotlib.pyplot.plot', 'plt.plot', (['[-1, 1]', '[0, 0]', '"""k"""'], {}), "([-1, 1], [0, 0], 'k')\n", (7719, 7741), True, 'import matplotlib.pyplot as plt\n'), ((7744, 7768), 'matplotlib.pyplot.axis', 'plt.axis', (['[-1, 1, -4, 4]'], {}), '([-1, 1, -4, 4])\n', (7752, 7768), True, 'import matplotlib.pyplot as plt\n'), ((7770, 7785), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (7780, 7785), True, 'import matplotlib.pyplot as plt\n'), ((7790, 7805), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (7800, 7805), True, 'import matplotlib.pyplot as plt\n'), ((7810, 7822), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7820, 7822), True, 'import matplotlib.pyplot as plt\n'), ((7827, 7886), 'matplotlib.pyplot.title', 'plt.title', (['"""Legendre polynomials and equilibrium positions"""'], {}), "('Legendre polynomials and equilibrium positions')\n", (7836, 7886), True, 'import matplotlib.pyplot as plt\n'), ((7891, 7901), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7899, 7901), True, 'import matplotlib.pyplot as plt\n'), ((1000, 1011), 'numpy.shape', 'np.shape', (['Y'], {}), '(Y)\n', (1008, 1011), True, 'import numpy as np\n'), ((2239, 2269), 'numpy.linalg.norm', 'np.linalg.norm', (['F_value'], {'ord': '(2)'}), '(F_value, ord=2)\n', (2253, 2269), True, 'import numpy as np\n'), ((3616, 3646), 'numpy.linalg.norm', 'np.linalg.norm', (['F_value'], {'ord': '(2)'}), '(F_value, ord=2)\n', (3630, 3646), True, 'import numpy as np\n'), ((4545, 4563), 'numpy.linalg.norm', 'np.linalg.norm', (['fu'], {}), '(fu)\n', (4559, 4563), True, 'import numpy as np\n'), ((4802, 4832), 'numpy.linalg.norm', 'np.linalg.norm', (['F_value'], {'ord': '(2)'}), '(F_value, ord=2)\n', (4816, 4832), True, 'import numpy as np\n'), ((7017, 7056), 'matplotlib.pyplot.plot', 'plt.plot', (['Peq[i, 0]', '(0)', 'type'], {'color': 'clr'}), '(Peq[i, 0], 0, type, color=clr)\n', (7025, 7056), True, 'import matplotlib.pyplot as plt\n'), ((4642, 4666), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['ju', '(-fu)'], {}), '(ju, -fu)\n', (4657, 4666), True, 'import numpy as np\n')]
|
import os
import re
import shutil
import struct
from zlib import crc32
from collections import defaultdict
from .parser import SourceBuilder, TLParser, TLObject
AUTO_GEN_NOTICE = \
'"""File generated by TLObjects\' generator. All changes will be ERASED"""'
AUTO_CASTS = {
'InputPeer': 'utils.get_input_peer(client.get_input_entity({}))',
'InputChannel': 'utils.get_input_channel(client.get_input_entity({}))',
'InputUser': 'utils.get_input_user(client.get_input_entity({}))',
'InputMedia': 'utils.get_input_media({})',
'InputPhoto': 'utils.get_input_photo({})'
}
class TLGenerator:
def __init__(self, output_dir):
self.output_dir = output_dir
def _get_file(self, *paths):
return os.path.join(self.output_dir, *paths)
def _rm_if_exists(self, filename):
file = self._get_file(filename)
if os.path.exists(file):
if os.path.isdir(file):
shutil.rmtree(file)
else:
os.remove(file)
def tlobjects_exist(self):
"""Determines whether the TLObjects were previously
generated (hence exist) or not
"""
return os.path.isfile(self._get_file('all_tlobjects.py'))
def clean_tlobjects(self):
"""Cleans the automatically generated TLObjects from disk"""
for name in ('functions', 'types', 'all_tlobjects.py'):
self._rm_if_exists(name)
def generate_tlobjects(self, scheme_file, import_depth):
"""Generates all the TLObjects from scheme.tl to
tl/functions and tl/types
"""
# First ensure that the required parent directories exist
os.makedirs(self._get_file('functions'), exist_ok=True)
os.makedirs(self._get_file('types'), exist_ok=True)
# Step 0: Cache the parsed file on a tuple
tlobjects = tuple(TLParser.parse_file(scheme_file, ignore_core=True))
# Step 1: Group everything by {namespace: [tlobjects]} so we can
# easily generate __init__.py files with all the TLObjects on them.
namespace_functions = defaultdict(list)
namespace_types = defaultdict(list)
# Make use of this iteration to also store 'Type: [Constructors]',
# used when generating the documentation for the classes.
type_constructors = defaultdict(list)
for tlobject in tlobjects:
if tlobject.is_function:
namespace_functions[tlobject.namespace].append(tlobject)
else:
namespace_types[tlobject.namespace].append(tlobject)
type_constructors[tlobject.result].append(tlobject)
# Step 2: Generate the actual code
self._write_init_py(
self._get_file('functions'), import_depth,
namespace_functions, type_constructors
)
self._write_init_py(
self._get_file('types'), import_depth,
namespace_types, type_constructors
)
# Step 4: Once all the objects have been generated,
# we can now group them in a single file
filename = os.path.join(self._get_file('all_tlobjects.py'))
with open(filename, 'w', encoding='utf-8') as file:
with SourceBuilder(file) as builder:
builder.writeln(AUTO_GEN_NOTICE)
builder.writeln()
builder.writeln('from . import types, functions')
builder.writeln()
# Create a constant variable to indicate which layer this is
builder.writeln('LAYER = {}'.format(
TLParser.find_layer(scheme_file))
)
builder.writeln()
# Then create the dictionary containing constructor_id: class
builder.writeln('tlobjects = {')
builder.current_indent += 1
# Fill the dictionary (0x1a2b3c4f: tl.full.type.path.Class)
for tlobject in tlobjects:
constructor = hex(tlobject.id)
if len(constructor) != 10:
# Make it a nice length 10 so it fits well
constructor = '0x' + constructor[2:].zfill(8)
builder.write('{}: '.format(constructor))
builder.write(
'functions' if tlobject.is_function else 'types')
if tlobject.namespace:
builder.write('.' + tlobject.namespace)
builder.writeln('.{},'.format(tlobject.class_name()))
builder.current_indent -= 1
builder.writeln('}')
@staticmethod
def _write_init_py(out_dir, depth, namespace_tlobjects, type_constructors):
# namespace_tlobjects: {'namespace', [TLObject]}
os.makedirs(out_dir, exist_ok=True)
for ns, tlobjects in namespace_tlobjects.items():
file = os.path.join(out_dir, ns + '.py' if ns else '__init__.py')
with open(file, 'w', encoding='utf-8') as f, \
SourceBuilder(f) as builder:
builder.writeln(AUTO_GEN_NOTICE)
# Both types and functions inherit from the TLObject class
# so they all can be serialized and sent, however, only the
# functions are "content_related".
builder.writeln(
'from {}.tl.tlobject import TLObject'.format('.' * depth)
)
builder.writeln('from typing import Optional, List, Union, TYPE_CHECKING')
# Add the relative imports to the namespaces,
# unless we already are in a namespace.
if not ns:
builder.writeln('from . import {}'.format(', '.join(
x for x in namespace_tlobjects.keys() if x
)))
# Import 'os' for those needing access to 'os.urandom()'
# Currently only 'random_id' needs 'os' to be imported,
# for all those TLObjects with arg.can_be_inferred.
builder.writeln('import os')
# Import struct for the .__bytes__(self) serialization
builder.writeln('import struct')
tlobjects.sort(key=lambda x: x.name)
type_names = set()
type_defs = []
# Find all the types in this file and generate type definitions
# based on the types. The type definitions are written to the
# file at the end.
for t in tlobjects:
if not t.is_function:
type_name = t.result
if '.' in type_name:
type_name = type_name[type_name.rindex('.'):]
if type_name in type_names:
continue
type_names.add(type_name)
constructors = type_constructors[type_name]
if not constructors:
pass
elif len(constructors) == 1:
type_defs.append('Type{} = {}'.format(
type_name, constructors[0].class_name()))
else:
type_defs.append('Type{} = Union[{}]'.format(
type_name, ','.join(c.class_name()
for c in constructors)))
imports = {}
primitives = ('int', 'long', 'int128', 'int256', 'string',
'date', 'bytes', 'true')
# Find all the types in other files that are used in this file
# and generate the information required to import those types.
for t in tlobjects:
for arg in t.args:
name = arg.type
if not name or name in primitives:
continue
import_space = '{}.tl.types'.format('.' * depth)
if '.' in name:
namespace = name.split('.')[0]
name = name.split('.')[1]
import_space += '.{}'.format(namespace)
if name not in type_names:
type_names.add(name)
if name == 'date':
imports['datetime'] = ['datetime']
continue
elif not import_space in imports:
imports[import_space] = set()
imports[import_space].add('Type{}'.format(name))
# Add imports required for type checking.
builder.writeln('if TYPE_CHECKING:')
for namespace, names in imports.items():
builder.writeln('from {} import {}'.format(
namespace, ', '.join(names)))
else:
builder.writeln('pass')
builder.end_block()
# Generate the class for every TLObject
for t in tlobjects:
TLGenerator._write_source_code(
t, builder, depth, type_constructors
)
builder.current_indent = 0
# Write the type definitions generated earlier.
builder.writeln('')
for line in type_defs:
builder.writeln(line)
@staticmethod
def _write_source_code(tlobject, builder, depth, type_constructors):
"""Writes the source code corresponding to the given TLObject
by making use of the 'builder' SourceBuilder.
Additional information such as file path depth and
the Type: [Constructors] must be given for proper
importing and documentation strings.
"""
builder.writeln()
builder.writeln()
builder.writeln('class {}(TLObject):'.format(tlobject.class_name()))
# Class-level variable to store its Telegram's constructor ID
builder.writeln('CONSTRUCTOR_ID = {}'.format(hex(tlobject.id)))
builder.writeln('SUBCLASS_OF_ID = {}'.format(
hex(crc32(tlobject.result.encode('ascii'))))
)
builder.writeln()
# Flag arguments must go last
args = [
a for a in tlobject.sorted_args()
if not a.flag_indicator and not a.generic_definition
]
# Convert the args to string parameters, flags having =None
args = [
(a.name if not a.is_flag and not a.can_be_inferred
else '{}=None'.format(a.name))
for a in args
]
# Write the __init__ function
if args:
builder.writeln(
'def __init__(self, {}):'.format(', '.join(args))
)
else:
builder.writeln('def __init__(self):')
# Now update args to have the TLObject arguments, _except_
# those which are calculated on send or ignored, this is
# flag indicator and generic definitions.
#
# We don't need the generic definitions in Python
# because arguments can be any type
args = [arg for arg in tlobject.args
if not arg.flag_indicator and
not arg.generic_definition]
if args:
# Write the docstring, to know the type of the args
builder.writeln('"""')
for arg in args:
if not arg.flag_indicator:
builder.writeln(':param {} {}:'.format(
arg.doc_type_hint(), arg.name
))
builder.current_indent -= 1 # It will auto-indent (':')
# We also want to know what type this request returns
# or to which type this constructor belongs to
builder.writeln()
if tlobject.is_function:
builder.write(':returns {}: '.format(tlobject.result))
else:
builder.write('Constructor for {}: '.format(tlobject.result))
constructors = type_constructors[tlobject.result]
if not constructors:
builder.writeln('This type has no constructors.')
elif len(constructors) == 1:
builder.writeln('Instance of {}.'.format(
constructors[0].class_name()
))
else:
builder.writeln('Instance of either {}.'.format(
', '.join(c.class_name() for c in constructors)
))
builder.writeln('"""')
builder.writeln('super().__init__()')
# Functions have a result object and are confirmed by default
if tlobject.is_function:
builder.writeln('self.result = None')
builder.writeln(
'self.content_related = True')
# Set the arguments
if args:
# Leave an empty line if there are any args
builder.writeln()
for arg in args:
if not arg.can_be_inferred:
builder.writeln('self.{0} = {0} # type: {1}'.format(
arg.name, arg.python_type_hint()))
continue
# Currently the only argument that can be
# inferred are those called 'random_id'
if arg.name == 'random_id':
# Endianness doesn't really matter, and 'big' is shorter
code = "int.from_bytes(os.urandom({}), 'big', signed=True)" \
.format(8 if arg.type == 'long' else 4)
if arg.is_vector:
# Currently for the case of "messages.forwardMessages"
# Ensure we can infer the length from id:Vector<>
if not next(
a for a in args if a.name == 'id').is_vector:
raise ValueError(
'Cannot infer list of random ids for ', tlobject
)
code = '[{} for _ in range(len(id))]'.format(code)
builder.writeln(
"self.random_id = random_id if random_id "
"is not None else {}".format(code)
)
else:
raise ValueError('Cannot infer a value for ', arg)
builder.end_block()
# Write the resolve(self, client, utils) method
if any(arg.type in AUTO_CASTS for arg in args):
builder.writeln('def resolve(self, client, utils):')
for arg in args:
ac = AUTO_CASTS.get(arg.type, None)
if ac:
TLGenerator._write_self_assign(builder, arg, ac)
builder.end_block()
# Write the to_dict(self) method
builder.writeln('def to_dict(self):')
builder.writeln('return {')
builder.current_indent += 1
base_types = ('string', 'bytes', 'int', 'long', 'int128',
'int256', 'double', 'Bool', 'true', 'date')
builder.write("'_': '{}'".format(tlobject.class_name()))
for arg in args:
builder.writeln(',')
builder.write("'{}': ".format(arg.name))
if arg.type in base_types:
if arg.is_vector:
builder.write('[] if self.{0} is None else self.{0}[:]'
.format(arg.name))
else:
builder.write('self.{}'.format(arg.name))
else:
if arg.is_vector:
builder.write(
'[] if self.{0} is None else [None '
'if x is None else x.to_dict() for x in self.{0}]'
.format(arg.name)
)
else:
builder.write(
'None if self.{0} is None else self.{0}.to_dict()'
.format(arg.name)
)
builder.writeln()
builder.current_indent -= 1
builder.writeln("}")
builder.end_block()
# Write the .__bytes__() function
builder.writeln('def __bytes__(self):')
# Some objects require more than one flag parameter to be set
# at the same time. In this case, add an assertion.
repeated_args = defaultdict(list)
for arg in tlobject.args:
if arg.is_flag:
repeated_args[arg.flag_index].append(arg)
for ra in repeated_args.values():
if len(ra) > 1:
cnd1 = ('(self.{0} or self.{0} is not None)'
.format(a.name) for a in ra)
cnd2 = ('(self.{0} is None or self.{0} is False)'
.format(a.name) for a in ra)
builder.writeln(
"assert ({}) or ({}), '{} parameters must all "
"be False-y (like None) or all me True-y'".format(
' and '.join(cnd1), ' and '.join(cnd2),
', '.join(a.name for a in ra)
)
)
builder.writeln("return b''.join((")
builder.current_indent += 1
# First constructor code, we already know its bytes
builder.writeln('{},'.format(repr(struct.pack('<I', tlobject.id))))
for arg in tlobject.args:
if TLGenerator.write_to_bytes(builder, arg, tlobject.args):
builder.writeln(',')
builder.current_indent -= 1
builder.writeln('))')
builder.end_block()
# Write the static from_reader(reader) function
builder.writeln('@staticmethod')
builder.writeln('def from_reader(reader):')
for arg in tlobject.args:
TLGenerator.write_read_code(
builder, arg, tlobject.args, name='_' + arg.name
)
builder.writeln('return {}({})'.format(
tlobject.class_name(), ', '.join(
'{0}=_{0}'.format(a.name) for a in tlobject.sorted_args()
if not a.flag_indicator and not a.generic_definition
)
))
# Only requests can have a different response that's not their
# serialized body, that is, we'll be setting their .result.
#
# The default behaviour is reading a TLObject too, so no need
# to override it unless necessary.
if tlobject.is_function and not TLGenerator._is_boxed(tlobject.result):
builder.end_block()
builder.writeln('def on_response(self, reader):')
TLGenerator.write_request_result_code(builder, tlobject)
@staticmethod
def _is_boxed(type_):
# https://core.telegram.org/mtproto/serialize#boxed-and-bare-types
# TL;DR; boxed types start with uppercase always, so we can use
# this to check whether everything in it is boxed or not.
#
# The API always returns a boxed type, but it may inside a Vector<>
# or a namespace, and the Vector may have a not-boxed type. For this
# reason we find whatever index, '<' or '.'. If neither are present
# we will get -1, and the 0th char is always upper case thus works.
# For Vector types and namespaces, it will check in the right place.
check_after = max(type_.find('<'), type_.find('.'))
return type_[check_after + 1].isupper()
@staticmethod
def _write_self_assign(builder, arg, get_input_code):
"""Writes self.arg = input.format(self.arg), considering vectors"""
if arg.is_vector:
builder.write('self.{0} = [{1} for _x in self.{0}]'
.format(arg.name, get_input_code.format('_x')))
else:
builder.write('self.{} = {}'.format(
arg.name, get_input_code.format('self.' + arg.name)))
builder.writeln(
' if self.{} else None'.format(arg.name) if arg.is_flag else ''
)
@staticmethod
def get_file_name(tlobject, add_extension=False):
"""Gets the file name in file_name_format.py for the given TLObject"""
# Courtesy of http://stackoverflow.com/a/1176023/4759433
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', tlobject.name)
result = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
if add_extension:
return result + '.py'
else:
return result
@staticmethod
def write_to_bytes(builder, arg, args, name=None):
"""
Writes the .__bytes__() code for the given argument
:param builder: The source code builder
:param arg: The argument to write
:param args: All the other arguments in TLObject same __bytes__.
This is required to determine the flags value
:param name: The name of the argument. Defaults to "self.argname"
This argument is an option because it's required when
writing Vectors<>
"""
if arg.generic_definition:
return # Do nothing, this only specifies a later type
if name is None:
name = 'self.{}'.format(arg.name)
# The argument may be a flag, only write if it's not None AND
# if it's not a True type.
# True types are not actually sent, but instead only used to
# determine the flags.
if arg.is_flag:
if arg.type == 'true':
return # Exit, since True type is never written
elif arg.is_vector:
# Vector flags are special since they consist of 3 values,
# so we need an extra join here. Note that empty vector flags
# should NOT be sent either!
builder.write("b'' if {0} is None or {0} is False "
"else b''.join((".format(name))
else:
builder.write("b'' if {0} is None or {0} is False "
"else (".format(name))
if arg.is_vector:
if arg.use_vector_id:
# vector code, unsigned 0x1cb5c415 as little endian
builder.write(r"b'\x15\xc4\xb5\x1c',")
builder.write("struct.pack('<i', len({})),".format(name))
# Cannot unpack the values for the outer tuple through *[(
# since that's a Python >3.5 feature, so add another join.
builder.write("b''.join(")
# Temporary disable .is_vector, not to enter this if again
# Also disable .is_flag since it's not needed per element
old_flag = arg.is_flag
arg.is_vector = arg.is_flag = False
TLGenerator.write_to_bytes(builder, arg, args, name='x')
arg.is_vector = True
arg.is_flag = old_flag
builder.write(' for x in {})'.format(name))
elif arg.flag_indicator:
# Calculate the flags with those items which are not None
if not any(f.is_flag for f in args):
# There's a flag indicator, but no flag arguments so it's 0
builder.write(r"b'\0\0\0\0'")
else:
builder.write("struct.pack('<I', ")
builder.write(
' | '.join('(0 if {0} is None or {0} is False else {1})'
.format('self.{}'.format(flag.name),
1 << flag.flag_index)
for flag in args if flag.is_flag)
)
builder.write(')')
elif 'int' == arg.type:
# struct.pack is around 4 times faster than int.to_bytes
builder.write("struct.pack('<i', {})".format(name))
elif 'long' == arg.type:
builder.write("struct.pack('<q', {})".format(name))
elif 'int128' == arg.type:
builder.write("{}.to_bytes(16, 'little', signed=True)".format(name))
elif 'int256' == arg.type:
builder.write("{}.to_bytes(32, 'little', signed=True)".format(name))
elif 'double' == arg.type:
builder.write("struct.pack('<d', {})".format(name))
elif 'string' == arg.type:
builder.write('TLObject.serialize_bytes({})'.format(name))
elif 'Bool' == arg.type:
# 0x997275b5 if boolean else 0xbc799737
builder.write(
r"b'\xb5ur\x99' if {} else b'7\x97y\xbc'".format(name)
)
elif 'true' == arg.type:
pass # These are actually NOT written! Only used for flags
elif 'bytes' == arg.type:
builder.write('TLObject.serialize_bytes({})'.format(name))
elif 'date' == arg.type: # Custom format
builder.write('TLObject.serialize_datetime({})'.format(name))
else:
# Else it may be a custom type
builder.write('bytes({})'.format(name))
if arg.is_flag:
builder.write(')')
if arg.is_vector:
builder.write(')') # We were using a tuple
return True # Something was written
@staticmethod
def write_read_code(builder, arg, args, name):
"""
Writes the read code for the given argument, setting the
arg.name variable to its read value.
:param builder: The source code builder
:param arg: The argument to write
:param args: All the other arguments in TLObject same on_send.
This is required to determine the flags value
:param name: The name of the argument. Defaults to "self.argname"
This argument is an option because it's required when
writing Vectors<>
"""
if arg.generic_definition:
return # Do nothing, this only specifies a later type
# The argument may be a flag, only write that flag was given!
was_flag = False
if arg.is_flag:
# Treat 'true' flags as a special case, since they're true if
# they're set, and nothing else needs to actually be read.
if 'true' == arg.type:
builder.writeln(
'{} = bool(flags & {})'.format(name, 1 << arg.flag_index)
)
return
was_flag = True
builder.writeln('if flags & {}:'.format(
1 << arg.flag_index
))
# Temporary disable .is_flag not to enter this if
# again when calling the method recursively
arg.is_flag = False
if arg.is_vector:
if arg.use_vector_id:
# We have to read the vector's constructor ID
builder.writeln("reader.read_int()")
builder.writeln('{} = []'.format(name))
builder.writeln('for _ in range(reader.read_int()):')
# Temporary disable .is_vector, not to enter this if again
arg.is_vector = False
TLGenerator.write_read_code(builder, arg, args, name='_x')
builder.writeln('{}.append(_x)'.format(name))
arg.is_vector = True
elif arg.flag_indicator:
# Read the flags, which will indicate what items we should read next
builder.writeln('flags = reader.read_int()')
builder.writeln()
elif 'int' == arg.type:
builder.writeln('{} = reader.read_int()'.format(name))
elif 'long' == arg.type:
builder.writeln('{} = reader.read_long()'.format(name))
elif 'int128' == arg.type:
builder.writeln(
'{} = reader.read_large_int(bits=128)'.format(name)
)
elif 'int256' == arg.type:
builder.writeln(
'{} = reader.read_large_int(bits=256)'.format(name)
)
elif 'double' == arg.type:
builder.writeln('{} = reader.read_double()'.format(name))
elif 'string' == arg.type:
builder.writeln('{} = reader.tgread_string()'.format(name))
elif 'Bool' == arg.type:
builder.writeln('{} = reader.tgread_bool()'.format(name))
elif 'true' == arg.type:
# Arbitrary not-None value, don't actually read "true" flags
builder.writeln('{} = True'.format(name))
elif 'bytes' == arg.type:
builder.writeln('{} = reader.tgread_bytes()'.format(name))
elif 'date' == arg.type: # Custom format
builder.writeln('{} = reader.tgread_date()'.format(name))
else:
# Else it may be a custom type
if not arg.skip_constructor_id:
builder.writeln('{} = reader.tgread_object()'.format(name))
else:
# Import the correct type inline to avoid cyclic imports.
# There may be better solutions so that we can just access
# all the types before the files have been parsed, but I
# don't know of any.
sep_index = arg.type.find('.')
if sep_index == -1:
ns, t = '.', arg.type
else:
ns, t = '.' + arg.type[:sep_index], arg.type[sep_index+1:]
class_name = TLObject.class_name_for(t)
# There would be no need to import the type if we're in the
# file with the same namespace, but since it does no harm
# and we don't have information about such thing in the
# method we just ignore that case.
builder.writeln('from {} import {}'.format(ns, class_name))
builder.writeln('{} = {}.from_reader(reader)'.format(
name, class_name
))
# End vector and flag blocks if required (if we opened them before)
if arg.is_vector:
builder.end_block()
if was_flag:
builder.current_indent -= 1
builder.writeln('else:')
builder.writeln('{} = None'.format(name))
builder.current_indent -= 1
# Restore .is_flag
arg.is_flag = True
@staticmethod
def write_request_result_code(builder, tlobject):
"""
Writes the receive code for the given function
:param builder: The source code builder
:param tlobject: The TLObject for which the 'self.result = '
will be written
"""
if tlobject.result.startswith('Vector<'):
# Vector results are a bit special since they can also be composed
# of integer values and such; however, the result of requests is
# not parsed as arguments are and it's a bit harder to tell which
# is which.
if tlobject.result == 'Vector<int>':
builder.writeln('reader.read_int() # Vector ID')
builder.writeln('count = reader.read_int()')
builder.writeln(
'self.result = [reader.read_int() for _ in range(count)]'
)
elif tlobject.result == 'Vector<long>':
builder.writeln('reader.read_int() # Vector ID')
builder.writeln('count = reader.read_long()')
builder.writeln(
'self.result = [reader.read_long() for _ in range(count)]'
)
else:
builder.writeln('self.result = reader.tgread_vector()')
else:
builder.writeln('self.result = reader.tgread_object()')
|
[
"os.remove",
"os.makedirs",
"os.path.isdir",
"os.path.exists",
"struct.pack",
"collections.defaultdict",
"shutil.rmtree",
"os.path.join",
"re.sub"
] |
[((733, 770), 'os.path.join', 'os.path.join', (['self.output_dir', '*paths'], {}), '(self.output_dir, *paths)\n', (745, 770), False, 'import os\n'), ((862, 882), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (876, 882), False, 'import os\n'), ((2089, 2106), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2100, 2106), False, 'from collections import defaultdict\n'), ((2133, 2150), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2144, 2150), False, 'from collections import defaultdict\n'), ((2321, 2338), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2332, 2338), False, 'from collections import defaultdict\n'), ((4809, 4844), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (4820, 4844), False, 'import os\n'), ((16607, 16624), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (16618, 16624), False, 'from collections import defaultdict\n'), ((20461, 20513), 're.sub', 're.sub', (['"""(.)([A-Z][a-z]+)"""', '"""\\\\1_\\\\2"""', 'tlobject.name'], {}), "('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', tlobject.name)\n", (20467, 20513), False, 'import re\n'), ((899, 918), 'os.path.isdir', 'os.path.isdir', (['file'], {}), '(file)\n', (912, 918), False, 'import os\n'), ((4922, 4980), 'os.path.join', 'os.path.join', (['out_dir', "(ns + '.py' if ns else '__init__.py')"], {}), "(out_dir, ns + '.py' if ns else '__init__.py')\n", (4934, 4980), False, 'import os\n'), ((936, 955), 'shutil.rmtree', 'shutil.rmtree', (['file'], {}), '(file)\n', (949, 955), False, 'import shutil\n'), ((990, 1005), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (999, 1005), False, 'import os\n'), ((20530, 20572), 're.sub', 're.sub', (['"""([a-z0-9])([A-Z])"""', '"""\\\\1_\\\\2"""', 's1'], {}), "('([a-z0-9])([A-Z])', '\\\\1_\\\\2', s1)\n", (20536, 20572), False, 'import re\n'), ((17564, 17594), 'struct.pack', 'struct.pack', (['"""<I"""', 'tlobject.id'], {}), "('<I', tlobject.id)\n", (17575, 17594), False, 'import struct\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.utils.functional import lazy
from .models import QuerySet
from opps.core.forms import model_choices
class QuerySetAdminForm(forms.ModelForm):
model = forms.ChoiceField(choices=lazy(model_choices, tuple)())
class Meta:
model = QuerySet
|
[
"django.utils.functional.lazy"
] |
[((266, 292), 'django.utils.functional.lazy', 'lazy', (['model_choices', 'tuple'], {}), '(model_choices, tuple)\n', (270, 292), False, 'from django.utils.functional import lazy\n')]
|
# Concord
#
# Copyright (c) 2019 VMware, Inc. All Rights Reserved.
#
# This product is licensed to you under the Apache 2.0 license (the "License").
# You may not use this product except in compliance with the Apache 2.0 License.
#
# This product may include a number of subcomponents with separate copyright
# notices and license terms. Your use of these subcomponents is subject to the
# terms and conditions of the subcomponent's license, as noted in the LICENSE
# file.
import time
from enum import Enum
from util import skvbc as kvbc
import trio
import random
from functools import wraps
from util.skvbc_exceptions import(
ConflictingBlockWriteError,
StaleReadError,
NoConflictError,
InvalidReadError,
PhantomBlockError
)
MAX_LOOKBACK=10
def verify_linearizability(pre_exec_enabled=False, no_conflicts=False):
"""
Creates a tracker and provide him to the decorated method.
In the end of the method it checks the linearizability of the resulting history.
"""
def decorator(async_fn):
@wraps(async_fn)
async def wrapper(*args, **kwargs):
if 'disable_linearizability_checks' in kwargs:
kwargs.pop('disable_linearizability_checks')
bft_network = kwargs['bft_network']
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
tracker = PassThroughSkvbcTracker(skvbc, bft_network, pre_exec_enabled, no_conflicts)
await async_fn(*args, **kwargs, tracker=tracker)
else:
bft_network = kwargs['bft_network']
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
init_state = skvbc.initial_state()
tracker = SkvbcTracker(init_state, skvbc, bft_network, pre_exec_enabled, no_conflicts)
await async_fn(*args, **kwargs, tracker=tracker)
await tracker.fill_missing_blocks_and_verify()
return wrapper
return decorator
class SkvbcWriteRequest:
"""
A write request sent to an Skvbc cluster. A request may or may not complete.
"""
def __init__(self, client_id, seq_num, readset, writeset, read_block_id=0):
self.timestamp = time.monotonic()
self.client_id = client_id
self.seq_num = seq_num
self.readset = readset
self.writeset = writeset
self.read_block_id = read_block_id
def __repr__(self):
return (f'{self.__class__.__name__}:\n'
f' timestamp={self.timestamp}\n'
f' client_id={self.client_id}\n'
f' seq_num={self.seq_num}\n'
f' readset={self.readset}\n'
f' writeset={self.writeset}\n'
f' read_block_id={self.read_block_id}\n')
class SkvbcReadRequest:
"""
A read request sent to an Skvbc cluster. A request may or may not complete.
"""
def __init__(self, client_id, seq_num, readset, read_block_id=0):
self.timestamp = time.monotonic()
self.client_id = client_id
self.seq_num = seq_num
self.readset = readset
self.read_block_id = read_block_id
def __repr__(self):
return (f'{self.__class__.__name__}:\n'
f' timestamp={self.timestamp}\n'
f' client_id={self.client_id}\n'
f' seq_num={self.seq_num}\n'
f' readset={self.readset}\n'
f' read_block_id={self.read_block_id}\n')
class SkvbcGetLastBlockReq:
"""
A GET_LAST_BLOCK request sent to an skvbc cluster. A request may or may not
complete.
"""
def __init__(self, client_id, seq_num):
self.timestamp = time.monotonic()
self.client_id = client_id
self.seq_num = seq_num
def __repr__(self):
return (f'{self.__class__.__name__}:\n'
f' timestamp={self.timestamp}\n'
f' client_id={self.client_id}\n'
f' seq_num={self.seq_num}\n')
class SkvbcWriteReply:
"""A reply to an outstanding write request sent to an Skvbc cluster."""
def __init__(self, client_id, seq_num, reply):
self.timestamp = time.monotonic()
self.client_id = client_id
self.seq_num = seq_num
self.reply = reply
def __repr__(self):
return (f'{self.__class__.__name__}:\n'
f' timestamp={self.timestamp}\n'
f' client_id={self.client_id}\n'
f' seq_num={self.seq_num}\n'
f' reply={self.reply}\n')
class SkvbcReadReply:
"""A reply to an outstanding read request sent to an Skvbc cluster."""
def __init__(self, client_id, seq_num, kvpairs):
self.timestamp = time.monotonic()
self.client_id = client_id
self.seq_num = seq_num
self.kvpairs = kvpairs
def __repr__(self):
return (f'{self.__class__.__name__}:\n'
f' timestamp={self.timestamp}\n'
f' client_id={self.client_id}\n'
f' seq_num={self.seq_num}\n'
f' kvpairs={self.kvpairs}\n')
class SkvbcGetLastBlockReply:
"""
A reply to an outstanding get last block request sent to an Skvbc cluster.
"""
def __init__(self, client_id, seq_num, reply):
self.timestamp = time.monotonic()
self.client_id = client_id
self.seq_num = seq_num
self.reply = reply
def __repr__(self):
return (f'{self.__class__.__name__}:\n'
f' timestamp={self.timestamp}\n'
f' client_id={self.client_id}\n'
f' seq_num={self.seq_num}\n'
f' reply={self.reply}\n')
class Result(Enum):
"""
Whether an operation succeeded, failed, or the result is unknown
"""
WRITE_SUCCESS = 1
WRITE_FAIL = 2
UNKNOWN_WRITE = 3
UNKNOWN_READ = 4
READ_REPLY = 5
class CompletedRead:
def __init__(self, causal_state, kvpairs):
self.causal_state = causal_state
self.kvpairs = kvpairs
def __repr__(self):
return (f'{self.__class__.__name__}:\n'
f' causal_state={self.causal_state}\n'
f' kvpairs={self.kvpairs}\n')
class CausalState:
"""Relevant state of the tracker before a request is started"""
def __init__(self,
req_index,
last_known_block,
last_consecutive_block,
missing_intermediate_blocks,
kvpairs):
self.req_index = req_index
self.last_known_block = last_known_block
self.last_consecutive_block = last_consecutive_block
self.missing_intermediate_blocks = missing_intermediate_blocks
# KV pairs contain the value up keys up until last_consecutive_block
self.kvpairs = kvpairs
def __repr__(self):
return (f'{self.__class__.__name__}:\n'
f' req_index={self.req_index}\n'
f' last_known_block={self.last_known_block}\n'
f' last_consecutive_block={self.last_consecutive_block}\n'
f' '
f'missing_intermediate_blocks={self.missing_intermediate_blocks}\n'
f' causal state kvpairs={self.kvpairs}\n')
class ConcurrentValue:
"""Track the state for a request / reply in self.concurrent"""
def __init__(self, is_read):
if is_read:
self.result = Result.UNKNOWN_READ
else:
self.result = Result.UNKNOWN_WRITE
# Only used in writes
# Set only when writes succeed.
self.written_block_id = None
def __repr__(self):
return (f'{self.__class__.__name__}:\n'
f' result={self.result}\n'
f' written_block_id={self.written_block_id}\n')
class Block:
def __init__(self, kvpairs, req_index=None):
self.kvpairs = kvpairs
self.req_index = req_index
def __repr__(self):
return (f'{self.__class__.__name__}:\n'
f' kvpairs={self.kvpairs}\n'
f' req_index={self.req_index}\n')
class Status:
"""
Status about the order of writes and reads.
This is useful for debugging linearizability check failures.
"""
def __init__(self, config):
self.config = config
self.start_time = time.monotonic()
self.end_time = 0
self.last_client_reply = 0
self.client_timeouts = {}
self.client_replies = {}
def record_client_reply(self, client_id):
self.last_client_reply = time.monotonic()
count = self.client_replies.get(client_id, 0)
self.client_replies[client_id] = count + 1
def record_client_timeout(self, client_id):
count = self.client_timeouts.get(client_id, 0)
self.client_timeouts[client_id] = count + 1
def __str__(self):
return (f'{self.__class__.__name__}:\n'
f' config={self.config}\n'
f' test_duration={self.end_time - self.start_time} seconds\n'
f' time_since_last_client_reply='
f'{self.end_time - self.last_client_reply} seconds\n'
f' client_timeouts={self.client_timeouts}\n'
f' client_replies={self.client_replies}\n')
class SkvbcTracker:
"""
Track requests, expected and actual responses from SimpleKVBC test
clusters, and then allow verification of the linearizability of responses.
The SkvbcTracker is used by a test to record messages sent and replies
received to a cluster of SimpleKVBC TesterReplica nodes. Every request
sent and reply received is recorded in `self.history`, and indexes into this
history for requesets are frequently referred to by other parts of this
code.
By tracking outstanding requests in `self.outstanding` it is able to
determine which requests are concurrent with which other requests, and track
them in `self.concurrent`. The tracker also records a set of known blocks in
`self.blocks` that it learns when ConditionalWrite replies are successful.
Sometimes replies do not arrive for requests, and therefore the requests are never
removed from self.outstanding. Such requests are considered concurrent with
all future requests.
Additionally, the tracker records all completed reads and failed writes so
that they can be put into a total order with the blocks and checked for
correctness (i.e. linearized).
A test can send many requests to a cluster while it is also crashing nodes,
creating partitions, instigating view changes, playing with clocks, and
performing generally malicious actions. These failures can trigger missing
replies as well as anomalies in the record of blocks themselves. The goal of
the test is to try to trigger these anomalies via contentious writes and
failures, such that any flaws in the implementation of concord-sbft can
revealed when the tracker performs verification. When a failure is found the
tracker state can be dumped along with the specific exception thrown to help
developers reason about bugs in the code.
Any complexity in this code stems primarily from the requirements and
implementation of the verification procedure. Unlike general, dynamic
model based linearizability checkers like
[knossos](https://github.com/jepsen-io/knossos) or
[porcupine](https://github.com/anishathalye/porcupine), which attempt an
NP-Hard search for correct linearizations soley from requests and responses,
the SkvbcTracker relies on the total ordering of blocks provided by the
TesterReplica implementation, and can perform correctness checks in linear
time. However, the tester also records the complete history of requests and
replies and can be used to generate a compatible output file that can then
be verified using either knossos or porcupine.
Every time a successful conditional write reply is received it includes the
block number that it generated. This allows us to create a definitive total
order for all blocks generated from requests that the tracker knows the
replies of. However, we don't know the value of all blocks, because some
replies may have been dropped. Before a test calls the `verify` method, it
must retrieve and inform the tracker about any missing blocks. However, the
test doesn't know which blocks are missing. The test will therfore call the
`get_missing_blocks` method with the last_block_id of the cluster, that it
retrieves after it is done sending read and write requests. The
`get_missing_blocks` method will return a set of blocks which the test
should retrieve from the replicas and then inform the tracker about with a
call to `fill_missing_blocks`. After `fill_missing_blocks` is called the
tracker will have a total order of all blocks in the system, thus obviating
the need to perform an NP-hard search to find a write order that also
satisfies concurrent reads and failed responses.
An astute reader will note that we are trusting the TesterReplica cluster to
tell us about the values of certain blocks, but that cluster is also the
system under test that we are attempting to validate. At an absolute
minimum, we want to make sure that any filled blocks could have been
generated by outstanding requests. Therefore, the first step in verification
is to call `_match_filled_blocks`. If any blocks could not have been
generated by the outstanding requests, i.e. requests without replies, than a
`PhantomBlockError` exception is raised. We could also try to order
identical requests and blocks and determine if they could have successfully
created those blocks based on their readsets not conflicting with writesets
in the block history. However, this is an expensive search, that is probably
similar to general linearizability testing in its complexity, and in long
histories with many missing blocks may prolong all tests with little benefit. If we
determine the need we can add this procedure later. The assumption here is that
any incorrectness in the missing blocks will be detected via the inability
of reads and failed requests to serialize, or be the result of unmatched
requests. In other words, if the missing blocks are incorrect, other parts
of the chain are likely incorrect and will be detected from the remaining
parts of the verification procedure.
The remaining parts of the verification procedure are documented in their
respective methods, but will briefly be described here.
`_verify_successful_writes` ensures that the readset in the write request
does not intersect with the writesets in other requests that created blocks
up until the creation of the block by this successful write. In essence, it
is verifying compare and swap behavior, using the block id of the readset as
a version.
`_linearize_reads` ensures that the returned values of the read could have
been returned from the *state* of the kvstore before or after any concurrent
requests with the read.
`_linearize_write_failures` works similarly to `_linearize_reads`, in that it
looks at concurrent write requests to determine if there was an anomaly. The
anomaly in this case would be if the failed request had a readset that
didn't actually intersect with any writesets in concurrent requests. This
would mean that there wasn't actually a conflict, and therefore the write
should have succeeded. It's important to note here that we are only checking
explicit failures returned from the cluster that are due to conflict errors
in the replica's block state. We do not try to show that timeouts should not
have failed. Any request that times out is not recorded as having a reply
and remains in `self.outstanding`.
It's important to note that the verification procedure returns the first
error it finds in an exception. There may be other errors that were not
reported but existed. In the future we may wish to change this code to track
all (or most) errors and report a set of exceptions to the caller, to enable better
debugging.
One more check that may be useful is to take the complete chain in
`self.blocks` and compare it to all blocks in the cluster at the end of the
test run. The values should be identical. This can be an expensive check in
clusters with lots of blocks, but we may want to add it as an optional check
in the future.
"""
def __init__(self, initial_kvpairs={}, skvbc=None, bft_network=None, pre_exec_all=False, no_conflicts=False):
# If this flag is set to True, it means that all the tracker requests will
# go through the pre_execution mechanism
self.pre_exec_all = pre_exec_all
# If set to True, it means that we always sends an empty read set in the bft write request, in order to prevent
# conflicts
self.no_conflicts = no_conflicts
# A partial order of all requests (SkvbcWriteRequest | SkvbcReadRequest)
# issued against SimpleKVBC. History tracks requests and responses. A
# happens-before relationship exists between responses and requests
# launched after those responses.
self.history = []
# All currently outstanding requests:
# (client_id, seq_num) -> CausalState
self.outstanding = {}
# All completed reads by index into history -> CompletedRead
self.completed_reads = {}
# All failed writes by index into history -> CausalState
self.failed_writes = {}
# A set of all concurrent requests and their results for each request in
# history
# index -> dict{index, ConcurrentValue}
self.concurrent = {}
# All blocks and their kv data based on responses
# Each known block is mapped from block id to a Block
self.blocks = {}
# The value of all keys at last_consecutive_block
self.kvpairs = initial_kvpairs
self.last_consecutive_block = 0
# The block last received in a write
self.last_known_block = 0
# Blocks that get filled in by the call to fill_missing_blocks
# These blocks were created by write requests that never got replies.
self.filled_blocks = {}
self.skvbc = skvbc
self.bft_network = bft_network
if self.bft_network is not None:
self.status = Status(bft_network.config)
def send_write(self, client_id, seq_num, readset, writeset, read_block_id):
"""Track the send of a write request"""
req = SkvbcWriteRequest(
client_id, seq_num, readset, writeset, read_block_id)
self._send_req(req, is_read=False)
def send_read(self, client_id, seq_num, readset):
"""
Track the send of a read request.
Always get the latest value. We are trying to linearize requests, so we
want a real-time ordering which requires getting the latest values.
"""
req = SkvbcReadRequest(client_id, seq_num, readset)
self._send_req(req, is_read=True)
def _send_req(self, req, is_read):
self.history.append(req)
index = len(self.history) - 1
self._update_concurrent_requests(index, is_read)
cs = CausalState(index,
self.last_known_block,
self.last_consecutive_block,
self._count_non_consecutive_blocks(),
self.kvpairs.copy())
self.outstanding[(req.client_id, req.seq_num)] = cs
def handle_write_reply(self, client_id, seq_num, reply):
"""
Match a write reply with its outstanding request.
Check for consistency violations and raise an exception if found.
"""
rpy = SkvbcWriteReply(client_id, seq_num, reply)
self.history.append(rpy)
req, req_index = self._get_matching_request(rpy)
if reply.success:
if reply.last_block_id in self.blocks:
# This block_id has already been written!
block = self.blocks[reply.last_block_id]
raise ConflictingBlockWriteError(reply.last_block_id, block, req)
else:
self._record_concurrent_write_success(req_index,
rpy,
reply.last_block_id)
self.blocks[reply.last_block_id] = Block(req.writeset, req_index)
if reply.last_block_id > self.last_known_block:
self.last_known_block = reply.last_block_id
# Update consecutive kvpairs
if reply.last_block_id == self.last_consecutive_block + 1:
self.last_consecutive_block += 1
self.kvpairs.update(req.writeset)
# Did we already have the next consecutive blocks?
while True:
block = self.blocks.get(self.last_consecutive_block + 1)
if block is None:
break
self.last_consecutive_block += 1
self.kvpairs.update(block.kvpairs)
else:
self._record_concurrent_write_failure(req_index, rpy)
def handle_read_reply(self, client_id, seq_num, kvpairs):
"""
Get a read reply and ensure that it linearizes with the current known
concurrent replies.
"""
rpy = SkvbcReadReply(client_id, seq_num, kvpairs)
req, req_index = self._get_matching_request(rpy)
self.history.append(rpy)
self._record_read_reply(req_index, rpy)
def get_missing_blocks(self, last_block_id):
"""
Retrieve the set of missing blocks.
This is called during verification so that the test can retrieve these
blocks and call self.fill_in_missing_blocks().
After missing blocks are filled in, successful reads can be linearized.
"""
missing_blocks = set()
for i in range(self.last_consecutive_block + 1, self.last_known_block):
if self.blocks.get(i) is None:
missing_blocks.add(i)
# Include last_block_id if not already known
for i in range(self.last_known_block + 1, last_block_id + 1):
missing_blocks.add(i)
print(f'{len(missing_blocks)} missing blocks found.')
return missing_blocks
def fill_missing_blocks(self, missing_blocks):
"""
Add all missing blocks to self.blocks
Note that these blocks will not have a matching req_index since we never
received a reply for the request that created it. In some histories it's
not possible to identify an unambiguous request, since there may be
multiple possible requests that could have correctly generated the
block. Rather than trying to match the requests, to the missing blocks,
we just assume the missing blocks are correct for now, and use the full
block history to verify successful conditional writes and reads.
"""
for block_id, kvpairs in missing_blocks.items():
self.blocks[block_id] = Block(kvpairs)
if block_id > self.last_known_block:
self.last_known_block = block_id
self.filled_blocks = missing_blocks
print(f'{len(missing_blocks)} missing blocks filled.')
def verify(self):
self._match_filled_blocks()
self._verify_successful_writes()
self._linearize_reads()
self._linearize_write_failures()
def _match_filled_blocks(self):
"""
For every filled block, identify an outstanding write request with a
matching writeset.
If there isn't an outstanding request that could have generated the
block raise a PhantomBlockError.
"""
# Req/block_id pairs
matched_blocks = []
write_requests = self._get_all_outstanding_write_requests()
for block_id, block_kvpairs in self.filled_blocks.items():
unmatched = []
success = False
for _ in range(0, len(write_requests)):
req = write_requests.pop()
if req.writeset == block_kvpairs:
matched_blocks.append((req, block_id))
success = True
break
else:
unmatched.append(req)
if not success:
raise PhantomBlockError(block_id,
block_kvpairs,
matched_blocks,
unmatched)
write_requests.extend(unmatched)
def _get_all_outstanding_write_requests(self):
writes = []
for causal_state in self.outstanding.values():
req = self.history[causal_state.req_index]
if isinstance(req, SkvbcWriteRequest):
writes.append(req)
return writes
def _verify_successful_writes(self):
for i in range(1, self.last_known_block+1):
req_index = self.blocks[i].req_index
if req_index != None:
# A reply was received for this request that created the block
req = self.history[req_index]
self._verify_successful_write(i, req)
def _linearize_write_failures(self):
"""
Go through all write failures, and determine if they should have failed
due to conflict.
The failure check involves looking for write conflicts based on the
causal state at the time the failed request was issued, and any
succeeding concurrent writes.
If no concurrent writes had writesets that intersected with the readset
of the failed write, then that write should have succeeded. In this case
we raise a NoConflictError.
Note that we only count failures explicitly returned from skvbc, i.e.
those where writes returned success = false due to conflict. Timeouts
remain in outstanding requests, since we don't know whether they would
have succeeded or failed.
"""
for req_index, causal_state in self.failed_writes.items():
blocks_to_check = self._num_blocks_to_linearize_over(req_index,
causal_state)
failed_req = self.history[req_index]
# Check for writeset intersection at every block from the block
# after the readset until the last possible concurrently generated
# block.
success = False
for i in range(failed_req.read_block_id + 1,
causal_state.last_known_block + blocks_to_check + 1):
writeset = set(self.blocks[i].kvpairs.keys())
if len(failed_req.readset.intersection(writeset)) != 0:
# We found a block that conflicts. We must
# assume that failed_req was failed correctly.
success = True
break
if not success:
# We didn't find any conflicting blocks.
# failed_req should have succeeded!
raise NoConflictError(failed_req, causal_state)
def _linearize_reads(self):
"""
At this point, we should know the kv pairs of all blocks.
Attempt to find linearization points for all reads in
self.completed_reads.
If a read cannot be linearized, then raise an exception.
"""
for req_index, completed_read in self.completed_reads.items():
cs = completed_read.causal_state
kv = cs.kvpairs.copy()
# We must check that the read linearizes after
# causal_state.last_known_block, since it must have started after
# that. Build up the kv state until last_known_block.
for block_id in range(cs.last_consecutive_block + 1,
cs.last_known_block + 1):
kv.update(self.blocks[block_id].kvpairs)
blocks_to_check = self._num_blocks_to_linearize_over(req_index, cs)
success = False
for i in range(cs.last_known_block,
cs.last_known_block + blocks_to_check + 1):
if i != cs.last_known_block:
kv.update(self.blocks[i].kvpairs)
if self._read_is_valid(kv, completed_read.kvpairs):
# The read linearizes here
success = True
break
if not success:
raise InvalidReadError(completed_read,
self.concurrent[req_index])
def _num_blocks_to_linearize_over(self, req_index, causal_state):
"""
The number of blocks after causal_sate.last_known_block that we must
iterate over fo see if a read or write failure can linearize after each
of the blocks.
"""
cs = causal_state
max_concurrent = self._max_possible_concurrent_writes(req_index)
concurrent_remaining = max_concurrent - cs.missing_intermediate_blocks
total_remaining = self.last_known_block - cs.last_known_block
return min(concurrent_remaining, total_remaining)
def _read_is_valid(self, kv_state, read_kvpairs):
"""Return if a read of read_kvpairs is possible given kv_state."""
for k, v in read_kvpairs.items():
if kv_state.get(k) != v:
return False
return True
def _max_possible_concurrent_writes(self, req_index):
"""
Return the maximum possible number of concurrrent writes.
This includes writes that returned successfully but also writes with no
return that could have generated missing blocks.
"""
count = 0
for val in self.concurrent[req_index].values():
if val.result == Result.WRITE_SUCCESS or \
val.result == Result.UNKNOWN_WRITE:
count += 1
return count
def _update_concurrent_requests(self, index, is_read):
"""
Set the concurrent requests for this request to a dictionary of all
indexes into history in self.outstanding mapped to a ConcurrentValue.
Also update all concurrent requests to include this request in their
concurrent dicts.
"""
self.concurrent[index] = {}
for causal_state in self.outstanding.values():
i = causal_state.req_index
is_read_outstanding = isinstance(self.history[i], SkvbcReadRequest)
# Add the outstanding request to this request's concurrent dicts
self.concurrent[index][i] = ConcurrentValue(is_read_outstanding)
# Add this request to the concurrent dicts of each outstanding req
self.concurrent[i][index] = ConcurrentValue(is_read)
def _verify_successful_write(self, written_block_id, req):
"""
Ensure that the block at written_block_id should have been written by
req.
Check that for each key in the readset, there have been no writes to
those keys for each block after the block version in the conditional
write up to, but not including this block. An example of failure is:
* We read block id = X
* We write block id = X + 2
* We notice that block id X + 1 has written a key in the readset of
this request that created block X + 2.
Note that we may have unknown blocks due to missing responses. We just
skip these blocks, as we can't tell if there's a conflict or not. We
have to assume there isn't a conflict in this case.
If there is a conflicting block then there is a bug in the consensus
algorithm, and we raise a StaleReadError.
"""
for i in range(req.read_block_id + 1, written_block_id):
if i not in self.blocks:
# Ensure we have learned about this block.
# Move on if we have not.
continue
block = self.blocks[i]
# If the writeset of the request that created intermediate blocks
# intersects the readset of this request, then we have a conflict.
if len(req.readset.intersection(set(block.kvpairs.keys()))) != 0:
raise StaleReadError(req.read_block_id, i, written_block_id)
def _record_concurrent_write_success(self, req_index, rpy, block_id):
"""Inform all concurrent requests that this request succeeded."""
# We don't need the causal state for verification on write successes
del self.outstanding[(rpy.client_id, rpy.seq_num)]
val = ConcurrentValue(is_read=False)
val.result = Result.WRITE_SUCCESS
val.written_block_id = block_id
for i in self.concurrent[req_index].keys():
self.concurrent[i][req_index] = val
def _record_concurrent_write_failure(self, req_index, rpy):
"""Inform all concurrent requests that this request failed."""
causal_state = self.outstanding.pop((rpy.client_id, rpy.seq_num))
self.failed_writes[req_index] = causal_state
val = ConcurrentValue(is_read=False)
val.result = Result.WRITE_FAIL
for i in self.concurrent[req_index].keys():
self.concurrent[i][req_index] = val
def _record_read_reply(self, req_index, rpy):
"""Inform all concurrent requests about a read reply"""
causal_state = self.outstanding.pop((rpy.client_id, rpy.seq_num))
self.completed_reads[req_index] = CompletedRead(causal_state,
rpy.kvpairs)
for i in self.concurrent[req_index].keys():
self.concurrent[i][req_index].result = Result.READ_REPLY
def _count_non_consecutive_blocks(self):
"""
Count the number of missing blocks between self.last_consecutive_block
to self.last_known_block.
"""
count = 0
for i in range(self.last_consecutive_block + 1, self.last_known_block):
if i not in self.blocks:
count += 1
return count
def _get_matching_request(self, rpy):
"""
Return the request that matches rpy along with its index into
self.history.
"""
causal_state = self.outstanding[(rpy.client_id, rpy.seq_num)]
index = causal_state.req_index
return (self.history[index], index)
async def fill_missing_blocks_and_verify(self):
try:
# Use a new client, since other clients may not be responsive due to
# past failed responses.
client = await self.skvbc.bft_network.new_client()
last_block_id = await self.get_last_block_id(client)
print(f'Last Block ID = {last_block_id}')
missing_block_ids = self.get_missing_blocks(last_block_id)
print(f'Missing Block IDs = {missing_block_ids}')
blocks = await self.get_blocks(client, missing_block_ids)
self.fill_missing_blocks(blocks)
self.verify()
except Exception as e:
print(f'retries = {client.retries}')
self.status.end_time = time.monotonic()
print("HISTORY...")
for i, entry in enumerate(self.history):
print(f'Index = {i}: {entry}\n')
print("BLOCKS...")
print(f'{self.blocks}\n')
print(str(self.status), flush=True)
print("FAILURE...")
raise(e)
async def get_blocks(self, client, block_ids):
blocks = {}
for block_id in block_ids:
retries = 12 # 60 seconds
for i in range(0, retries):
try:
msg = kvbc.SimpleKVBCProtocol.get_block_data_req(block_id)
blocks[block_id] = kvbc.SimpleKVBCProtocol.parse_reply(await client.read(msg))
break
except trio.TooSlowError:
if i == retries - 1:
raise
print(f'Retrieved block {block_id}')
return blocks
async def get_last_block_id(self, client):
msg = kvbc.SimpleKVBCProtocol.get_last_block_req()
return kvbc.SimpleKVBCProtocol.parse_reply(await client.read(msg))
async def send_tracked_write(self, client, max_set_size, long_exec=False):
max_read_set_size = 0 if self.no_conflicts else max_set_size
readset = self.readset(0, max_read_set_size)
writeset = self.writeset(max_set_size)
read_version = self.read_block_id()
msg = self.skvbc.write_req(readset, writeset, read_version, long_exec)
seq_num = client.req_seq_num.next()
client_id = client.client_id
self.send_write(
client_id, seq_num, readset, dict(writeset), read_version)
try:
serialized_reply = await client.write(msg, seq_num, pre_process=self.pre_exec_all)
self.status.record_client_reply(client_id)
reply = self.skvbc.parse_reply(serialized_reply)
self.handle_write_reply(client_id, seq_num, reply)
except trio.TooSlowError:
self.status.record_client_timeout(client_id)
return
async def send_tracked_read(self, client, max_set_size):
readset = self.readset(1, max_set_size)
msg = self.skvbc.read_req(readset)
seq_num = client.req_seq_num.next()
client_id = client.client_id
self.send_read(client_id, seq_num, readset)
try:
serialized_reply = await client.read(msg, seq_num)
self.status.record_client_reply(client_id)
reply = self.skvbc.parse_reply(serialized_reply)
self.handle_read_reply(client_id, seq_num, reply)
except trio.TooSlowError:
self.status.record_client_timeout(client_id)
return
def read_block_id(self):
start = max(0, self.last_known_block - MAX_LOOKBACK)
return random.randint(start, self.last_known_block)
def readset(self, min_size, max_size):
return self.skvbc.random_keys(random.randint(min_size, max_size))
def writeset(self, max_size):
writeset_keys = self.skvbc.random_keys(random.randint(0, max_size))
writeset_values = self.skvbc.random_values(len(writeset_keys))
return list(zip(writeset_keys, writeset_values))
async def run_concurrent_ops(self, num_ops, write_weight=.70):
max_concurrency = len(self.bft_network.clients) // 2
max_size = len(self.skvbc.keys) // 2
sent = 0
write_count = 0
read_count = 0
while sent < num_ops:
clients = self.bft_network.random_clients(max_concurrency)
async with trio.open_nursery() as nursery:
for client in clients:
if random.random() < write_weight:
nursery.start_soon(self.send_tracked_write, client, max_size)
write_count += 1
else:
nursery.start_soon(self.send_tracked_read, client, max_size)
read_count += 1
sent += len(clients)
return read_count, write_count
async def send_indefinite_tracked_ops(self, write_weight=.70):
max_size = len(self.skvbc.keys) // 2
while True:
client = self.bft_network.random_client()
async with trio.open_nursery() as nursery:
try:
if random.random() < write_weight:
nursery.start_soon(self.send_tracked_write, client, max_size)
else:
nursery.start_soon(self.send_tracked_write, client, max_size)
except:
pass
await trio.sleep(.01)
async def write_and_track_known_kv(self, kv, client, long_exec=False):
read_version = self.read_block_id()
readset = self.readset(0, 0)
msg = self.skvbc.write_req(readset, kv, read_version, long_exec)
seq_num = client.req_seq_num.next()
client_id = client.client_id
self.send_write(
client_id, seq_num, readset, dict(kv), read_version)
try:
serialized_reply = await client.write(msg, seq_num, pre_process=self.pre_exec_all)
self.status.record_client_reply(client_id)
reply = self.skvbc.parse_reply(serialized_reply)
self.handle_write_reply(client_id, seq_num, reply)
return reply
except trio.TooSlowError:
self.status.record_client_timeout(client_id)
return
async def read_and_track_known_kv(self, key, client):
msg = self.skvbc.read_req([key])
seq_num = client.req_seq_num.next()
client_id = client.client_id
self.send_read(client_id, seq_num, [key])
try:
serialized_reply = await client.read(msg, seq_num)
self.status.record_client_reply(client_id)
reply = self.skvbc.parse_reply(serialized_reply)
self.handle_read_reply(client_id, seq_num, reply)
return reply
except trio.TooSlowError:
self.status.record_client_timeout(client_id)
return
async def tracked_prime_for_state_transfer(
self, stale_nodes,
checkpoints_num=2,
persistency_enabled=True):
initial_nodes = self.bft_network.all_replicas(without=stale_nodes)
[self.bft_network.start_replica(i) for i in initial_nodes]
client = self.bft_network.random_client()
# Write a KV pair with a known value
known_key = self.skvbc.max_key()
known_val = self.skvbc.random_value()
known_kv = [(known_key, known_val)]
await self.write_and_track_known_kv(known_kv, client)
# Fill up the initial nodes with data, checkpoint them and stop
# them. Then bring them back up and ensure the checkpoint data is
# there.
client1 = self.bft_network.random_client()
# Write enough data to checkpoint and create a need for state transfer
for i in range(1 + checkpoints_num * 150):
key = self.skvbc.random_key()
val = self.skvbc.random_value()
kv = [(key, val)]
await self.write_and_track_known_kv(kv, client1)
await self.skvbc.network_wait_for_checkpoint(initial_nodes, checkpoints_num, persistency_enabled)
return client, known_key, known_val, known_kv
async def tracked_read_your_writes(self):
client = self.bft_network.random_client()
keys = self.skvbc._create_keys()
# Verify by "Read your write"
# Perform write with the new primary
last_block = self.skvbc.parse_reply(
await client.read(self.skvbc.get_last_block_req()))
# Perform an unconditional KV put.
# Ensure keys aren't identical
kv = [(keys[0], self.skvbc.random_value()),
(keys[1], self.skvbc.random_value())]
#reply = await client.write(self.write_req([], kv, 0))
#reply = self.parse_reply(reply)
reply = await self.write_and_track_known_kv(kv, client)
assert reply.success
assert last_block + 1 == reply.last_block_id
last_block = reply.last_block_id
# Read the last write and check if equal
# Get the kvpairs in the last written block
data = await client.read(self.skvbc.get_block_data_req(last_block))
kv2 = self.skvbc.parse_reply(data)
assert kv2 == dict(kv)
class PassThroughSkvbcTracker:
def __init__(self, skvbc=None, bft_network=None, pre_exec_all=False, no_conflicts=False):
self.pre_exec_all = pre_exec_all
self.no_conflicts = no_conflicts
self.skvbc = skvbc
self.bft_network = bft_network
async def send_tracked_write(self, client, max_set_size, long_exec=False):
max_read_set_size = 0 if self.no_conflicts else max_set_size
readset = self.readset(0, max_read_set_size)
writeset = self.writeset(max_set_size)
msg = self.skvbc.write_req(readset, writeset, 0, long_exec)
try:
serialized_reply = await client.write(msg, pre_process=self.pre_exec_all)
reply = self.skvbc.parse_reply(serialized_reply)
return reply
except trio.TooSlowError:
return
async def send_tracked_read(self, client, max_set_size):
readset = self.readset(1, max_set_size)
msg = self.skvbc.read_req(readset)
try:
serialized_reply = await client.read(msg)
reply = self.skvbc.parse_reply(serialized_reply)
return reply
except trio.TooSlowError:
return
def readset(self, min_size, max_size):
return self.skvbc.random_keys(random.randint(min_size, max_size))
def writeset(self, max_size):
writeset_keys = self.skvbc.random_keys(random.randint(0, max_size))
writeset_values = self.skvbc.random_values(len(writeset_keys))
return list(zip(writeset_keys, writeset_values))
async def run_concurrent_ops(self, num_ops, write_weight=.70):
max_concurrency = len(self.bft_network.clients) // 2
max_size = len(self.skvbc.keys) // 2
sent = 0
write_count = 0
read_count = 0
while sent < num_ops:
clients = self.bft_network.random_clients(max_concurrency)
async with trio.open_nursery() as nursery:
for client in clients:
if random.random() < write_weight:
nursery.start_soon(self.send_tracked_write, client, max_size)
write_count += 1
else:
nursery.start_soon(self.send_tracked_read, client, max_size)
read_count += 1
sent += len(clients)
return read_count, write_count
async def send_indefinite_tracked_ops(self, write_weight=.70):
max_size = len(self.skvbc.keys) // 2
while True:
client = self.bft_network.random_client()
try:
if random.random() < write_weight:
await self.send_tracked_write(client, max_size)
else:
await self.send_tracked_read(client, max_size)
except:
pass
await trio.sleep(.1)
async def write_and_track_known_kv(self, kv, client, long_exec=False):
return self.skvbc.parse_reply(await client.write(
self.skvbc.write_req([], kv, 0, long_exec)), pre_process=self.pre_exec_all)
async def read_and_track_known_kv(self, key, client):
msg = self.skvbc.read_req([key])
try:
return self.skvbc.parse_reply(await client.read(msg))
except trio.TooSlowError:
return
async def tracked_prime_for_state_transfer(
self, stale_nodes,
checkpoints_num=2,
persistency_enabled=True):
initial_nodes = self.bft_network.all_replicas(without=stale_nodes)
[self.bft_network.start_replica(i) for i in initial_nodes]
client = self.bft_network.random_client()
# Write a KV pair with a known value
known_key = self.skvbc.max_key()
known_val = self.skvbc.random_value()
known_kv = [(known_key, known_val)]
await self.write_and_track_known_kv(known_kv, client)
# Fill up the initial nodes with data, checkpoint them and stop
# them. Then bring them back up and ensure the checkpoint data is
# there.
client1 = self.bft_network.random_client()
# Write enough data to checkpoint and create a need for state transfer
for i in range(1 + checkpoints_num * 150):
key = self.skvbc.random_key()
val = self.skvbc.random_value()
kv = [(key, val)]
await self.write_and_track_known_kv(kv, client1)
await self.skvbc.network_wait_for_checkpoint(initial_nodes, checkpoints_num, persistency_enabled)
return client, known_key, known_val, known_kv
async def tracked_read_your_writes(self):
client = self.bft_network.random_client()
keys = self.skvbc._create_keys()
# Verify by "Read your write"
# Perform write with the new primary
last_block = self.skvbc.parse_reply(
await client.read(self.skvbc.get_last_block_req()))
# Perform an unconditional KV put.
# Ensure keys aren't identical
kv = [(keys[0], self.skvbc.random_value()),
(keys[1], self.skvbc.random_value())]
reply = await self.write_and_track_known_kv(kv, client)
assert reply.success
assert last_block + 1 == reply.last_block_id
last_block = reply.last_block_id
# Read the last write and check if equal
# Get the kvpairs in the last written block
data = await client.read(self.skvbc.get_block_data_req(last_block))
kv2 = self.skvbc.parse_reply(data)
assert kv2 == dict(kv)
|
[
"util.skvbc_exceptions.StaleReadError",
"trio.open_nursery",
"random.randint",
"util.skvbc_exceptions.ConflictingBlockWriteError",
"util.skvbc_exceptions.PhantomBlockError",
"util.skvbc_exceptions.NoConflictError",
"random.random",
"time.monotonic",
"util.skvbc.SimpleKVBCProtocol.get_last_block_req",
"functools.wraps",
"util.skvbc_exceptions.InvalidReadError",
"util.skvbc.SimpleKVBCProtocol.get_block_data_req",
"util.skvbc.SimpleKVBCProtocol",
"trio.sleep"
] |
[((1041, 1056), 'functools.wraps', 'wraps', (['async_fn'], {}), '(async_fn)\n', (1046, 1056), False, 'from functools import wraps\n'), ((2188, 2204), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2202, 2204), False, 'import time\n'), ((2936, 2952), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2950, 2952), False, 'import time\n'), ((3600, 3616), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3614, 3616), False, 'import time\n'), ((4064, 4080), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (4078, 4080), False, 'import time\n'), ((4592, 4608), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (4606, 4608), False, 'import time\n'), ((5154, 5170), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (5168, 5170), False, 'import time\n'), ((8087, 8103), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (8101, 8103), False, 'import time\n'), ((8312, 8328), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (8326, 8328), False, 'import time\n'), ((36348, 36392), 'util.skvbc.SimpleKVBCProtocol.get_last_block_req', 'kvbc.SimpleKVBCProtocol.get_last_block_req', ([], {}), '()\n', (36390, 36392), True, 'from util import skvbc as kvbc\n'), ((38170, 38214), 'random.randint', 'random.randint', (['start', 'self.last_known_block'], {}), '(start, self.last_known_block)\n', (38184, 38214), False, 'import random\n'), ((38297, 38331), 'random.randint', 'random.randint', (['min_size', 'max_size'], {}), '(min_size, max_size)\n', (38311, 38331), False, 'import random\n'), ((38415, 38442), 'random.randint', 'random.randint', (['(0)', 'max_size'], {}), '(0, max_size)\n', (38429, 38442), False, 'import random\n'), ((45056, 45090), 'random.randint', 'random.randint', (['min_size', 'max_size'], {}), '(min_size, max_size)\n', (45070, 45090), False, 'import random\n'), ((45174, 45201), 'random.randint', 'random.randint', (['(0)', 'max_size'], {}), '(0, max_size)\n', (45188, 45201), False, 'import random\n'), ((1297, 1333), 'util.skvbc.SimpleKVBCProtocol', 'kvbc.SimpleKVBCProtocol', (['bft_network'], {}), '(bft_network)\n', (1320, 1333), True, 'from util import skvbc as kvbc\n'), ((1595, 1631), 'util.skvbc.SimpleKVBCProtocol', 'kvbc.SimpleKVBCProtocol', (['bft_network'], {}), '(bft_network)\n', (1618, 1631), True, 'from util import skvbc as kvbc\n'), ((20027, 20086), 'util.skvbc_exceptions.ConflictingBlockWriteError', 'ConflictingBlockWriteError', (['reply.last_block_id', 'block', 'req'], {}), '(reply.last_block_id, block, req)\n', (20053, 20086), False, 'from util.skvbc_exceptions import ConflictingBlockWriteError, StaleReadError, NoConflictError, InvalidReadError, PhantomBlockError\n'), ((24431, 24500), 'util.skvbc_exceptions.PhantomBlockError', 'PhantomBlockError', (['block_id', 'block_kvpairs', 'matched_blocks', 'unmatched'], {}), '(block_id, block_kvpairs, matched_blocks, unmatched)\n', (24448, 24500), False, 'from util.skvbc_exceptions import ConflictingBlockWriteError, StaleReadError, NoConflictError, InvalidReadError, PhantomBlockError\n'), ((27249, 27290), 'util.skvbc_exceptions.NoConflictError', 'NoConflictError', (['failed_req', 'causal_state'], {}), '(failed_req, causal_state)\n', (27264, 27290), False, 'from util.skvbc_exceptions import ConflictingBlockWriteError, StaleReadError, NoConflictError, InvalidReadError, PhantomBlockError\n'), ((28665, 28725), 'util.skvbc_exceptions.InvalidReadError', 'InvalidReadError', (['completed_read', 'self.concurrent[req_index]'], {}), '(completed_read, self.concurrent[req_index])\n', (28681, 28725), False, 'from util.skvbc_exceptions import ConflictingBlockWriteError, StaleReadError, NoConflictError, InvalidReadError, PhantomBlockError\n'), ((32450, 32504), 'util.skvbc_exceptions.StaleReadError', 'StaleReadError', (['req.read_block_id', 'i', 'written_block_id'], {}), '(req.read_block_id, i, written_block_id)\n', (32464, 32504), False, 'from util.skvbc_exceptions import ConflictingBlockWriteError, StaleReadError, NoConflictError, InvalidReadError, PhantomBlockError\n'), ((35363, 35379), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (35377, 35379), False, 'import time\n'), ((38934, 38953), 'trio.open_nursery', 'trio.open_nursery', ([], {}), '()\n', (38951, 38953), False, 'import trio\n'), ((39620, 39639), 'trio.open_nursery', 'trio.open_nursery', ([], {}), '()\n', (39637, 39639), False, 'import trio\n'), ((45693, 45712), 'trio.open_nursery', 'trio.open_nursery', ([], {}), '()\n', (45710, 45712), False, 'import trio\n'), ((46640, 46655), 'trio.sleep', 'trio.sleep', (['(0.1)'], {}), '(0.1)\n', (46650, 46655), False, 'import trio\n'), ((35924, 35976), 'util.skvbc.SimpleKVBCProtocol.get_block_data_req', 'kvbc.SimpleKVBCProtocol.get_block_data_req', (['block_id'], {}), '(block_id)\n', (35966, 35976), True, 'from util import skvbc as kvbc\n'), ((39997, 40013), 'trio.sleep', 'trio.sleep', (['(0.01)'], {}), '(0.01)\n', (40007, 40013), False, 'import trio\n'), ((46392, 46407), 'random.random', 'random.random', ([], {}), '()\n', (46405, 46407), False, 'import random\n'), ((39028, 39043), 'random.random', 'random.random', ([], {}), '()\n', (39041, 39043), False, 'import random\n'), ((39696, 39711), 'random.random', 'random.random', ([], {}), '()\n', (39709, 39711), False, 'import random\n'), ((45787, 45802), 'random.random', 'random.random', ([], {}), '()\n', (45800, 45802), False, 'import random\n')]
|
from ray import Ray
from vec3 import Vec3
import math
from random import random
def random_in_unit_disk():
while True:
p = 2.0 * Vec3(random(),random(),0) -Vec3(1,1,0)
if Vec3.dot(p,p) < 1.0:
return p
class Camera:
def __init__(self,lookfrom,lookat,vup,vfov,aspect,aperture,focus_dist,t0,t1):
self.time0 = t0
self.time1 = t1
self.lens_radius = aperture /2
theta = vfov * math.pi/180
half_height = math.tan(theta/2)
half_width = aspect * half_height
self.origin = lookfrom
self.w = Vec3.unit_vector(lookfrom -lookat)
self.u = Vec3.unit_vector(Vec3.cross(vup,self.w))
self.v = Vec3.cross(self.w,self.u)
self.lower_left_corner = self.origin - half_width*focus_dist*self.u -half_height*focus_dist*self.v -focus_dist*self.w
self.horizontal = 2*half_width*focus_dist*self.u
self.vertical = 2*half_height*focus_dist*self.v
def get_ray(self,s,t):
rd = self.lens_radius * random_in_unit_disk()
offset = self.u*rd.x + self.v*rd.y
time = self.time0 + random()*(self.time1 - self.time0)
return Ray(self.origin + offset,self.lower_left_corner + s*self.horizontal + t*self.vertical -self.origin -offset,time)
|
[
"vec3.Vec3.cross",
"math.tan",
"random.random",
"ray.Ray",
"vec3.Vec3.dot",
"vec3.Vec3.unit_vector",
"vec3.Vec3"
] |
[((437, 456), 'math.tan', 'math.tan', (['(theta / 2)'], {}), '(theta / 2)\n', (445, 456), False, 'import math\n'), ((533, 568), 'vec3.Vec3.unit_vector', 'Vec3.unit_vector', (['(lookfrom - lookat)'], {}), '(lookfrom - lookat)\n', (549, 568), False, 'from vec3 import Vec3\n'), ((635, 661), 'vec3.Vec3.cross', 'Vec3.cross', (['self.w', 'self.u'], {}), '(self.w, self.u)\n', (645, 661), False, 'from vec3 import Vec3\n'), ((1073, 1197), 'ray.Ray', 'Ray', (['(self.origin + offset)', '(self.lower_left_corner + s * self.horizontal + t * self.vertical - self.\n origin - offset)', 'time'], {}), '(self.origin + offset, self.lower_left_corner + s * self.horizontal + t *\n self.vertical - self.origin - offset, time)\n', (1076, 1197), False, 'from ray import Ray\n'), ((163, 176), 'vec3.Vec3', 'Vec3', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (167, 176), False, 'from vec3 import Vec3\n'), ((182, 196), 'vec3.Vec3.dot', 'Vec3.dot', (['p', 'p'], {}), '(p, p)\n', (190, 196), False, 'from vec3 import Vec3\n'), ((598, 621), 'vec3.Vec3.cross', 'Vec3.cross', (['vup', 'self.w'], {}), '(vup, self.w)\n', (608, 621), False, 'from vec3 import Vec3\n'), ((1027, 1035), 'random.random', 'random', ([], {}), '()\n', (1033, 1035), False, 'from random import random\n'), ((141, 149), 'random.random', 'random', ([], {}), '()\n', (147, 149), False, 'from random import random\n'), ((150, 158), 'random.random', 'random', ([], {}), '()\n', (156, 158), False, 'from random import random\n')]
|
# coding: utf-8
import os
import sys
sys.path.append(os.pardir) # 为了导入父目录的文件而进行的设定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from common.multi_layer_net import MultiLayerNet
from common.optimizer import SGD
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
# 为了再现过拟合,减少学习数据
x_train = x_train[:300]
t_train = t_train[:300]
# weight decay(权值衰减)的设定 =======================
#weight_decay_lambda = 0 # 不使用权值衰减的情况
weight_decay_lambda = 0.1
# ====================================================
network = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, 100], output_size=10,
weight_decay_lambda=weight_decay_lambda)
optimizer = SGD(lr=0.01)
max_epochs = 201
train_size = x_train.shape[0]
batch_size = 100
train_loss_list = []
train_acc_list = []
test_acc_list = []
iter_per_epoch = max(train_size / batch_size, 1)
epoch_cnt = 0
for i in range(1000000000):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
grads = network.gradient(x_batch, t_batch)
optimizer.update(network.params, grads)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
# print("epoch:" + str(epoch_cnt) + ", train acc:" + str(train_acc) + ", test acc:" + str(test_acc))
epoch_cnt += 1
if epoch_cnt >= max_epochs:
break
# 3.绘制图形==========
markers = {'train': 'o', 'test': 's'}
x = np.arange(max_epochs)
plt.plot(x, train_acc_list, marker='o', label='train', markevery=10)
plt.plot(x, test_acc_list, marker='s', label='test', markevery=10)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
|
[
"sys.path.append",
"numpy.random.choice",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"common.optimizer.SGD",
"matplotlib.pyplot.legend",
"dataset.mnist.load_mnist",
"numpy.arange",
"common.multi_layer_net.MultiLayerNet",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((38, 64), 'sys.path.append', 'sys.path.append', (['os.pardir'], {}), '(os.pardir)\n', (53, 64), False, 'import sys\n'), ((295, 321), 'dataset.mnist.load_mnist', 'load_mnist', ([], {'normalize': '(True)'}), '(normalize=True)\n', (305, 321), False, 'from dataset.mnist import load_mnist\n'), ((567, 707), 'common.multi_layer_net.MultiLayerNet', 'MultiLayerNet', ([], {'input_size': '(784)', 'hidden_size_list': '[100, 100, 100, 100, 100, 100]', 'output_size': '(10)', 'weight_decay_lambda': 'weight_decay_lambda'}), '(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, \n 100], output_size=10, weight_decay_lambda=weight_decay_lambda)\n', (580, 707), False, 'from common.multi_layer_net import MultiLayerNet\n'), ((739, 751), 'common.optimizer.SGD', 'SGD', ([], {'lr': '(0.01)'}), '(lr=0.01)\n', (742, 751), False, 'from common.optimizer import SGD\n'), ((1659, 1680), 'numpy.arange', 'np.arange', (['max_epochs'], {}), '(max_epochs)\n', (1668, 1680), True, 'import numpy as np\n'), ((1681, 1749), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'train_acc_list'], {'marker': '"""o"""', 'label': '"""train"""', 'markevery': '(10)'}), "(x, train_acc_list, marker='o', label='train', markevery=10)\n", (1689, 1749), True, 'import matplotlib.pyplot as plt\n'), ((1750, 1816), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'test_acc_list'], {'marker': '"""s"""', 'label': '"""test"""', 'markevery': '(10)'}), "(x, test_acc_list, marker='s', label='test', markevery=10)\n", (1758, 1816), True, 'import matplotlib.pyplot as plt\n'), ((1817, 1837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (1827, 1837), True, 'import matplotlib.pyplot as plt\n'), ((1838, 1860), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (1848, 1860), True, 'import matplotlib.pyplot as plt\n'), ((1861, 1877), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (1869, 1877), True, 'import matplotlib.pyplot as plt\n'), ((1878, 1907), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (1888, 1907), True, 'import matplotlib.pyplot as plt\n'), ((1908, 1918), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1916, 1918), True, 'import matplotlib.pyplot as plt\n'), ((988, 1028), 'numpy.random.choice', 'np.random.choice', (['train_size', 'batch_size'], {}), '(train_size, batch_size)\n', (1004, 1028), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""Tests for classes representing custom host collections."""
from __future__ import unicode_literals
from dns import name
from ipaddress import ip_address
from nose_parameterized import parameterized
from spam_lists.exceptions import InvalidHostError
from spam_lists.host_collections import HostCollection, SortedHostCollection
from test.compat import unittest, Mock
from test.unit.common_definitions import (
TestFunctionDoesNotHandleMixin, host_list_host_factory, HostListTestMixin
)
def get_sorting_key(value):
"""Get a key for sorting host values during tests.
:param value: a host value for which we generate key
:returns: a value used as the sorting key
"""
try:
return ip_address(value)
except ValueError:
return name.from_text(value)
def has_to_unicode(value):
"""Check if an object has a to_unicode attribute.
:param value: a value for which we test membership of the attribute
:returns: True if the value has the attribute
"""
return hasattr(value, 'to_unicode')
def host_collection_host_factory(host):
"""Get a mock of a host object stored in a collection.
:param host: a host value to be represented by the mock
:returns: an instance of Mock representing a host object stored
in a host collection
"""
host_object = host_list_host_factory(host)
_str = host_object.to_unicode()
def test(other):
"""Test if the other and the host object match each other.
:param other: an object to be compared
:returns: result of the test
"""
return (has_to_unicode(other) and
other.to_unicode() in _str)
host_object.is_match.side_effect = test
host_object.is_subdomain.side_effect = test
def less_than(other):
"""Check if the host object key is less than the other.
This function is an implementation of __lt__ expected from host
objects by bisect_right function.
:param other: a value to be compared
:returns: result of the comparison
"""
host_object_key = get_sorting_key(_str)
other_value = other.to_unicode() if has_to_unicode(other) else other
other_key = get_sorting_key(other_value)
try:
result = host_object_key < other_key
except TypeError:
result = _str < other_value
return result
host_object.__lt__.side_effect = less_than
return host_object
class HostCollectionBaseTest(
HostListTestMixin,
TestFunctionDoesNotHandleMixin,
):
"""Tests for subclasses or BaseHostCollection.
:ivar host_factory_mock: a mocked implementation of host factory
used by tested instance. Uses host_collection_host_factory
as its implementation.
:ivar tested_instance: an instance of tested class
"""
# pylint: disable=too-many-public-methods
valid_urls = ['http://test.com', 'http://127.33.22.11']
def setUp(self):
self.host_factory_mock = Mock()
self.host_factory_mock.side_effect = host_collection_host_factory
self.tested_instance = self.constructor(
'test_host_collection',
self.classification,
host_factory=self.host_factory_mock
)
def test_add_invalid_host(self):
"""Test for InvalidHostError when adding an invalid host.
An invalid host is defined as a value that doesn't match a type
of host value accepted by a collection.
"""
function = self.tested_instance.add
self._test_function_does_not_handle(
InvalidHostError,
self.host_factory_mock,
function,
'invalidhost.com'
)
@parameterized.expand(HostListTestMixin.valid_host_input)
def test_add_for_valid(self, _, value):
"""Test the method for a valid host value.
:param value: a host value to be added
"""
self.tested_instance.add(value)
self.assertTrue(value in self.tested_instance.hosts)
def test_add_for_subdomain(self):
"""Test the method for a subdomain of a listed domain.
A subdomain to a domain already listed in the collection is
expected not to be added to the collection.
"""
initial_hosts = ['domain.com']
self._set_matching_hosts(initial_hosts)
self.tested_instance.add('subdomain.domain.com')
self.assertCountEqual(initial_hosts, self.tested_instance.hosts)
def test_add_for_the_same_value(self):
"""Test the method for a listed value.
An already listed value is expected not to be added to
the collection
"""
value = 'domain.com'
initial_hosts = ['host.com', value]
self._set_matching_hosts(initial_hosts)
self.tested_instance.add(value)
self.assertCountEqual(initial_hosts, self.tested_instance.hosts)
def test_add_a_superdomain(self):
"""Test the method for a superdomain of a listed domain.
A superdomain of a domain listed in the collection is expected
to replace its subdomain when added.
"""
superdomain = 'domain.com'
subdomain = 'sub.domain.com'
initial_hosts = ['host1.com', subdomain]
self._set_matching_hosts(initial_hosts)
self.tested_instance.add(superdomain)
initial_hosts.remove(subdomain)
initial_hosts.append(superdomain)
self.assertCountEqual(initial_hosts, self.tested_instance.hosts)
def _set_matching_hosts(self, hosts):
self.tested_instance.hosts = list(hosts)
class HostCollectionTest(HostCollectionBaseTest, unittest.TestCase):
"""Tests for HostCollection class."""
constructor = HostCollection
class SortedHostCollectionTest(HostCollectionBaseTest, unittest.TestCase):
"""Tests for SortedHostCollection class."""
constructor = SortedHostCollection
def _set_matching_hosts(self, hosts):
self.tested_instance.hosts = list(hosts)
self.tested_instance.hosts.sort(key=self.host_factory_mock)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
[
"nose_parameterized.parameterized.expand",
"test.compat.unittest.main",
"test.compat.Mock",
"dns.name.from_text",
"ipaddress.ip_address",
"test.unit.common_definitions.host_list_host_factory"
] |
[((1352, 1380), 'test.unit.common_definitions.host_list_host_factory', 'host_list_host_factory', (['host'], {}), '(host)\n', (1374, 1380), False, 'from test.unit.common_definitions import TestFunctionDoesNotHandleMixin, host_list_host_factory, HostListTestMixin\n'), ((3739, 3795), 'nose_parameterized.parameterized.expand', 'parameterized.expand', (['HostListTestMixin.valid_host_input'], {}), '(HostListTestMixin.valid_host_input)\n', (3759, 3795), False, 'from nose_parameterized import parameterized\n'), ((6178, 6193), 'test.compat.unittest.main', 'unittest.main', ([], {}), '()\n', (6191, 6193), False, 'from test.compat import unittest, Mock\n'), ((739, 756), 'ipaddress.ip_address', 'ip_address', (['value'], {}), '(value)\n', (749, 756), False, 'from ipaddress import ip_address\n'), ((3022, 3028), 'test.compat.Mock', 'Mock', ([], {}), '()\n', (3026, 3028), False, 'from test.compat import unittest, Mock\n'), ((795, 816), 'dns.name.from_text', 'name.from_text', (['value'], {}), '(value)\n', (809, 816), False, 'from dns import name\n')]
|
import io
import os
import subprocess
from datetime import datetime
from urllib.parse import urlparse
from rastervision.filesystem import (FileSystem, NotReadableError,
NotWritableError)
# Code from https://alexwlchan.net/2017/07/listing-s3-keys/
def get_matching_s3_objects(bucket, prefix='', suffix=''):
"""
Generate objects in an S3 bucket.
:param bucket: Name of the S3 bucket.
:param prefix: Only fetch objects whose key starts with
this prefix (optional).
:param suffix: Only fetch objects whose keys end with
this suffix (optional).
"""
import boto3
s3 = boto3.client('s3')
kwargs = {'Bucket': bucket}
# If the prefix is a single string (not a tuple of strings), we can
# do the filtering directly in the S3 API.
if isinstance(prefix, str):
kwargs['Prefix'] = prefix
while True:
# The S3 API response is a large blob of metadata.
# 'Contents' contains information about the listed objects.
resp = s3.list_objects_v2(**kwargs)
try:
contents = resp['Contents']
except KeyError:
return
for obj in contents:
key = obj['Key']
if key.startswith(prefix) and key.endswith(suffix):
yield obj
# The S3 API is paginated, returning up to 1000 keys at a time.
# Pass the continuation token into the next response, until we
# reach the final page (when this field is missing).
try:
kwargs['ContinuationToken'] = resp['NextContinuationToken']
except KeyError:
break
def get_matching_s3_keys(bucket, prefix='', suffix=''):
"""
Generate the keys in an S3 bucket.
:param bucket: Name of the S3 bucket.
:param prefix: Only fetch keys that start with this prefix (optional).
:param suffix: Only fetch keys that end with this suffix (optional).
"""
for obj in get_matching_s3_objects(bucket, prefix, suffix):
yield obj['Key']
class S3FileSystem(FileSystem):
@staticmethod
def get_session():
# Lazily load boto
import boto3
return boto3.Session()
@staticmethod
def matches_uri(uri: str, mode: str) -> bool:
parsed_uri = urlparse(uri)
return parsed_uri.scheme == 's3'
@staticmethod
def file_exists(uri: str) -> bool:
# Lazily load boto
import botocore
s3 = S3FileSystem.get_session().resource('s3')
parsed_uri = urlparse(uri)
bucket = parsed_uri.netloc
key = parsed_uri.path[1:]
try:
s3.Object(bucket, key).load()
except botocore.exceptions.ClientError as e:
return False
return True
@staticmethod
def read_str(uri: str) -> str:
return S3FileSystem.read_bytes(uri).decode('utf-8')
@staticmethod
def read_bytes(uri: str) -> bytes:
import botocore
s3 = S3FileSystem.get_session().client('s3')
parsed_uri = urlparse(uri)
with io.BytesIO() as file_buffer:
try:
s3.download_fileobj(parsed_uri.netloc, parsed_uri.path[1:],
file_buffer)
return file_buffer.getvalue()
except botocore.exceptions.ClientError as e:
raise NotReadableError('Could not read {}'.format(uri)) from e
@staticmethod
def write_str(uri: str, data: str) -> None:
data = bytes(data, encoding='utf-8')
S3FileSystem.write_bytes(uri, data)
@staticmethod
def write_bytes(uri: str, data: bytes) -> None:
s3 = S3FileSystem.get_session().client('s3')
parsed_uri = urlparse(uri)
bucket = parsed_uri.netloc
key = parsed_uri.path[1:]
with io.BytesIO(data) as str_buffer:
try:
s3.upload_fileobj(str_buffer, bucket, key)
except Exception as e:
raise NotWritableError('Could not write {}'.format(uri)) from e
@staticmethod
def sync_from_dir(src_dir_uri: str,
dest_dir_uri: str,
delete: bool = False) -> None: # pragma: no cover
command = ['aws', 's3', 'sync', src_dir_uri, dest_dir_uri]
if delete:
command.append('--delete')
subprocess.run(command)
@staticmethod
def sync_to_dir(src_dir_uri: str, dest_dir_uri: str,
delete: bool = False) -> None: # pragma: no cover
command = ['aws', 's3', 'sync', src_dir_uri, dest_dir_uri]
if delete:
command.append('--delete')
subprocess.run(command)
@staticmethod
def copy_to(src_path: str, dst_uri: str) -> None:
s3 = S3FileSystem.get_session().client('s3')
parsed_uri = urlparse(dst_uri)
if os.path.isfile(src_path):
try:
s3.upload_file(src_path, parsed_uri.netloc,
parsed_uri.path[1:])
except Exception as e:
raise NotWritableError(
'Could not write {}'.format(dst_uri)) from e
else:
S3FileSystem.sync_to_dir(src_path, dst_uri, delete=True)
@staticmethod
def copy_from(uri: str, path: str) -> None:
import botocore
s3 = S3FileSystem.get_session().client('s3')
parsed_uri = urlparse(uri)
try:
s3.download_file(parsed_uri.netloc, parsed_uri.path[1:], path)
except botocore.exceptions.ClientError:
raise NotReadableError('Could not read {}'.format(uri))
@staticmethod
def local_path(uri: str, download_dir: str) -> None:
parsed_uri = urlparse(uri)
path = os.path.join(download_dir, 's3', parsed_uri.netloc,
parsed_uri.path[1:])
return path
@staticmethod
def last_modified(uri: str) -> datetime:
parsed_uri = urlparse(uri)
bucket, key = parsed_uri.netloc, parsed_uri.path[1:]
s3 = S3FileSystem.get_session().client('s3')
head_data = s3.head_object(Bucket=bucket, Key=key)
return head_data['LastModified']
@staticmethod
def list_paths(uri, ext=''):
parsed_uri = urlparse(uri)
bucket = parsed_uri.netloc
prefix = os.path.join(parsed_uri.path[1:])
keys = get_matching_s3_keys(bucket, prefix, suffix=ext)
return [os.path.join('s3://', bucket, key) for key in keys]
|
[
"subprocess.run",
"io.BytesIO",
"boto3.Session",
"boto3.client",
"os.path.isfile",
"os.path.join",
"urllib.parse.urlparse"
] |
[((651, 669), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (663, 669), False, 'import boto3\n'), ((2188, 2203), 'boto3.Session', 'boto3.Session', ([], {}), '()\n', (2201, 2203), False, 'import boto3\n'), ((2294, 2307), 'urllib.parse.urlparse', 'urlparse', (['uri'], {}), '(uri)\n', (2302, 2307), False, 'from urllib.parse import urlparse\n'), ((2535, 2548), 'urllib.parse.urlparse', 'urlparse', (['uri'], {}), '(uri)\n', (2543, 2548), False, 'from urllib.parse import urlparse\n'), ((3043, 3056), 'urllib.parse.urlparse', 'urlparse', (['uri'], {}), '(uri)\n', (3051, 3056), False, 'from urllib.parse import urlparse\n'), ((3725, 3738), 'urllib.parse.urlparse', 'urlparse', (['uri'], {}), '(uri)\n', (3733, 3738), False, 'from urllib.parse import urlparse\n'), ((4350, 4373), 'subprocess.run', 'subprocess.run', (['command'], {}), '(command)\n', (4364, 4373), False, 'import subprocess\n'), ((4654, 4677), 'subprocess.run', 'subprocess.run', (['command'], {}), '(command)\n', (4668, 4677), False, 'import subprocess\n'), ((4826, 4843), 'urllib.parse.urlparse', 'urlparse', (['dst_uri'], {}), '(dst_uri)\n', (4834, 4843), False, 'from urllib.parse import urlparse\n'), ((4855, 4879), 'os.path.isfile', 'os.path.isfile', (['src_path'], {}), '(src_path)\n', (4869, 4879), False, 'import os\n'), ((5400, 5413), 'urllib.parse.urlparse', 'urlparse', (['uri'], {}), '(uri)\n', (5408, 5413), False, 'from urllib.parse import urlparse\n'), ((5715, 5728), 'urllib.parse.urlparse', 'urlparse', (['uri'], {}), '(uri)\n', (5723, 5728), False, 'from urllib.parse import urlparse\n'), ((5744, 5816), 'os.path.join', 'os.path.join', (['download_dir', '"""s3"""', 'parsed_uri.netloc', 'parsed_uri.path[1:]'], {}), "(download_dir, 's3', parsed_uri.netloc, parsed_uri.path[1:])\n", (5756, 5816), False, 'import os\n'), ((5950, 5963), 'urllib.parse.urlparse', 'urlparse', (['uri'], {}), '(uri)\n', (5958, 5963), False, 'from urllib.parse import urlparse\n'), ((6251, 6264), 'urllib.parse.urlparse', 'urlparse', (['uri'], {}), '(uri)\n', (6259, 6264), False, 'from urllib.parse import urlparse\n'), ((6317, 6350), 'os.path.join', 'os.path.join', (['parsed_uri.path[1:]'], {}), '(parsed_uri.path[1:])\n', (6329, 6350), False, 'import os\n'), ((3070, 3082), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3080, 3082), False, 'import io\n'), ((3821, 3837), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (3831, 3837), False, 'import io\n'), ((6431, 6465), 'os.path.join', 'os.path.join', (['"""s3://"""', 'bucket', 'key'], {}), "('s3://', bucket, key)\n", (6443, 6465), False, 'import os\n')]
|
#coding:utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
from paddlehub.common import utils
from paddlehub.common.downloader import default_downloader
from paddlehub.common.hub_server import default_hub_server
from paddlehub.commands.base_command import BaseCommand, ENTRY
class DownloadCommand(BaseCommand):
name = "download"
def __init__(self, name):
super(DownloadCommand, self).__init__(name)
self.show_in_help = True
self.description = "Download PaddlePaddle pretrained model/module files."
self.parser = self.parser = argparse.ArgumentParser(
description=self.__class__.__doc__,
prog='%s %s <model_name/module_name>' % (ENTRY, name),
usage='%(prog)s [options]',
add_help=False)
# yapf: disable
self.add_arg("--type", str, "All", "choice: Module/Model/All")
self.add_arg('--output_path', str, ".", "path to save the model/module" )
self.add_arg('--uncompress', bool, False, "uncompress the download package or not" )
# yapf: enable
def execute(self, argv):
if not argv:
print("ERROR: Please provide the model/module name\n")
self.help()
return False
mod_name = argv[0]
mod_version = None if "==" not in mod_name else mod_name.split("==")[1]
mod_name = mod_name if "==" not in mod_name else mod_name.split("==")[0]
self.args = self.parser.parse_args(argv[1:])
self.args.type = self.check_type(self.args.type)
extra = {"command": "download"}
if self.args.type in ["Module", "Model"]:
search_result = default_hub_server.get_resource_url(
mod_name,
resource_type=self.args.type,
version=mod_version,
extra=extra)
else:
search_result = default_hub_server.get_resource_url(
mod_name,
resource_type="Module",
version=mod_version,
extra=extra)
self.args.type = "Module"
if search_result == {}:
search_result = default_hub_server.get_resource_url(
mod_name,
resource_type="Model",
version=mod_version,
extra=extra)
self.args.type = "Model"
url = search_result.get('url', None)
except_md5_value = search_result.get('md5', None)
if not url:
if default_hub_server._server_check() is False:
tips = "Request Hub-Server unsuccessfully, please check your network."
else:
tips = "PaddleHub can't find model/module named %s" % mod_name
if mod_version:
tips += " with version %s" % mod_version
tips += ". Please use the 'hub search' command to find the correct model/module name."
print(tips)
return True
need_to_download_file = True
file_name = os.path.basename(url)
file = os.path.join(self.args.output_path, file_name)
if os.path.exists(file):
print("File %s already existed\nWait to check the MD5 value" %
file_name)
file_md5_value = utils.md5_of_file(file)
if except_md5_value == file_md5_value:
print("MD5 check pass.")
need_to_download_file = False
else:
print("MD5 check failed!\nDelete invalid file.")
os.remove(file)
if need_to_download_file:
result, tips, file = default_downloader.download_file(
url=url, save_path=self.args.output_path, print_progress=True)
if not result:
print(tips)
return False
if self.args.uncompress:
result, tips, file = default_downloader.uncompress(
file=file,
dirname=self.args.output_path,
delete_file=True,
print_progress=True)
print(tips)
if self.args.type == "Model":
os.rename(file, "./" + mod_name)
return True
def check_type(self, mod_type):
mod_type = mod_type.lower()
if mod_type == "module":
mod_type = "Module"
elif mod_type == "model":
mod_type = "Model"
else:
mod_type = "All"
return mod_type
command = DownloadCommand.instance()
|
[
"paddlehub.common.downloader.default_downloader.uncompress",
"paddlehub.common.downloader.default_downloader.download_file",
"os.remove",
"argparse.ArgumentParser",
"paddlehub.common.hub_server.default_hub_server.get_resource_url",
"os.path.basename",
"os.rename",
"os.path.exists",
"paddlehub.common.utils.md5_of_file",
"paddlehub.common.hub_server.default_hub_server._server_check",
"os.path.join"
] |
[((1271, 1439), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'self.__class__.__doc__', 'prog': "('%s %s <model_name/module_name>' % (ENTRY, name))", 'usage': '"""%(prog)s [options]"""', 'add_help': '(False)'}), "(description=self.__class__.__doc__, prog=\n '%s %s <model_name/module_name>' % (ENTRY, name), usage=\n '%(prog)s [options]', add_help=False)\n", (1294, 1439), False, 'import argparse\n'), ((3758, 3779), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (3774, 3779), False, 'import os\n'), ((3795, 3841), 'os.path.join', 'os.path.join', (['self.args.output_path', 'file_name'], {}), '(self.args.output_path, file_name)\n', (3807, 3841), False, 'import os\n'), ((3853, 3873), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (3867, 3873), False, 'import os\n'), ((2372, 2485), 'paddlehub.common.hub_server.default_hub_server.get_resource_url', 'default_hub_server.get_resource_url', (['mod_name'], {'resource_type': 'self.args.type', 'version': 'mod_version', 'extra': 'extra'}), '(mod_name, resource_type=self.args.type,\n version=mod_version, extra=extra)\n', (2407, 2485), False, 'from paddlehub.common.hub_server import default_hub_server\n'), ((2589, 2696), 'paddlehub.common.hub_server.default_hub_server.get_resource_url', 'default_hub_server.get_resource_url', (['mod_name'], {'resource_type': '"""Module"""', 'version': 'mod_version', 'extra': 'extra'}), "(mod_name, resource_type='Module',\n version=mod_version, extra=extra)\n", (2624, 2696), False, 'from paddlehub.common.hub_server import default_hub_server\n'), ((4008, 4031), 'paddlehub.common.utils.md5_of_file', 'utils.md5_of_file', (['file'], {}), '(file)\n', (4025, 4031), False, 'from paddlehub.common import utils\n'), ((4353, 4452), 'paddlehub.common.downloader.default_downloader.download_file', 'default_downloader.download_file', ([], {'url': 'url', 'save_path': 'self.args.output_path', 'print_progress': '(True)'}), '(url=url, save_path=self.args.output_path,\n print_progress=True)\n', (4385, 4452), False, 'from paddlehub.common.downloader import default_downloader\n'), ((4617, 4731), 'paddlehub.common.downloader.default_downloader.uncompress', 'default_downloader.uncompress', ([], {'file': 'file', 'dirname': 'self.args.output_path', 'delete_file': '(True)', 'print_progress': '(True)'}), '(file=file, dirname=self.args.output_path,\n delete_file=True, print_progress=True)\n', (4646, 4731), False, 'from paddlehub.common.downloader import default_downloader\n'), ((2864, 2970), 'paddlehub.common.hub_server.default_hub_server.get_resource_url', 'default_hub_server.get_resource_url', (['mod_name'], {'resource_type': '"""Model"""', 'version': 'mod_version', 'extra': 'extra'}), "(mod_name, resource_type='Model',\n version=mod_version, extra=extra)\n", (2899, 2970), False, 'from paddlehub.common.hub_server import default_hub_server\n'), ((3227, 3261), 'paddlehub.common.hub_server.default_hub_server._server_check', 'default_hub_server._server_check', ([], {}), '()\n', (3259, 3261), False, 'from paddlehub.common.hub_server import default_hub_server\n'), ((4269, 4284), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (4278, 4284), False, 'import os\n'), ((4875, 4907), 'os.rename', 'os.rename', (['file', "('./' + mod_name)"], {}), "(file, './' + mod_name)\n", (4884, 4907), False, 'import os\n')]
|
from infrastructure.models import CustomField, Environment
from resourcehandlers.aws.models import AWSHandler
from common.methods import set_progress
import boto3
import time
def connect_to_elasticache(env):
"""
Return boto connection to the elasticache in the specified environment's region.
"""
rh = env.resource_handler.cast()
return (rh.id, boto3.client(
'elasticache',
region_name=env.aws_region,
aws_access_key_id=rh.serviceaccount,
aws_secret_access_key=rh.servicepasswd))
def create_custom_fields_as_needed():
CustomField.objects.get_or_create(
name='aws_rh_id', defaults={
'label': 'AWS RH ID', 'type': 'STR',
'description': 'Used by the AWS blueprints'
}
)
CustomField.objects.get_or_create(
name='env_id', defaults={
'label': 'Environment ID', 'type': 'STR',
'description': 'Used by the AWS blueprints'
}
)
CustomField.objects.get_or_create(
name='name', defaults={
'label': 'AWS ElastiCache', 'type': 'STR',
'description': 'Used by the AWS blueprints'
}
)
CustomField.objects.get_or_create(
name='cluster_name', defaults={
'label': 'AWS ElastiCache cluster_name', 'type': 'STR'
}
)
CustomField.objects.get_or_create(
name='engine', defaults={
'label': 'AWS ElastiCache engine', 'type': 'STR',
'description': 'The name of the cache engine to be used for this cluster'
}
)
def generate_options_for_aws_environment(profile=None, **kwargs):
envs_this_user_can_view = Environment.objects_for_profile(profile)
aws_handlers = AWSHandler.objects.all()
aws_envs = envs_this_user_can_view.filter(
resource_handler_id__in=aws_handlers)
return [(env.id, env.name) for env in aws_envs]
def generate_options_for_engine(**kwargs):
return ['memcached', 'redis']
def generate_options_for_cache_node_type(**kwargs):
return [
('cache.t2.micro', 'T2 node - cache.t2.micro'),
('cache.t2.small', 'T2 node - cache.t2.small'),
('cache.t2.medium', 'T2 node - cache.t2.medium'),
('cache.m3.medium', 'M3 node - cache.m3.medium'),
('cache.m3.large', 'M3 node - cache.m3.large'),
('cache.m3.xlarge', 'M3 node - cache.m3.xlarge'),
('cache.m4.xlarge', 'M4 node - cache.m4.xlarge'),
('cache.m4.2xlarge', 'M4 node - cache.m4.2xlarge'),
('cache.m4.4xlarge', 'M4 node - cache.m4.4xlarge'),
('cache.m4.10xlarge', 'M4 node - cache.m4.10xlarge'),
('cache.t1.micro', 'T1 node(not recommended) - cache.t1.micro'),
('cache.m1.small', 'T1 node(not recommended) - cache.m1.small'),
('cache.m1.medium', 'T1 node(not recommended) - cache.m1.medium'),
('cache.m1.large', 'T1 node(not recommended) - cache.m1.large'),
('cache.m1.xlarge', 'T1 node(not recommended) - cache.m1.xlarge'),
('cache.c1.xlarge', 'C1 node(not recommended) - cache.c1.xlarge'),
('cache.r3.large', 'R3 node - cache.r3.large'),
('cache.r3.xlarge', 'R3 node - cache.r3.xlarge'),
('cache.r3.2xlarge', 'R3 node - cache.r3.2xlarge'),
('cache.r3.4xlarge', 'R3 node - cache.r3.4xlarge'),
('cache.r3.8xlarge', 'R3 node - cache.r3.8xlarge'),
('cache.r4.large', 'R4 node - cache.r4.large'),
('cache.r4.xlarge', 'R4 node - cache.r4.xlarge'),
('cache.r4.2xlarge', 'R4 node - cache.r4.2xlarge'),
('cache.r4.4xlarge', 'R4 node - cache.r4.4xlarge'),
('cache.r4.8xlarge', 'R4 node - cache.r4.8xlarge'),
('cache.r4.16xlarge', 'R4 node - cache.r4.16xlarge'),
('cache.m2.xlarge', 'M2 node - cache.m2.xlarge'),
('cache.m2.2xlarge', 'M2 node - cache.m2.2xlarge'),
('cache.m2.4xlarge', 'M2 node - cache.m2.4xlarge'),
]
def run(resource, *args, **kwargs):
create_custom_fields_as_needed()
cluster_name = "{{ cluster_name }}"
engine = "{{ engine }}"
CacheNodeType = "{{ cache_node_type }}"
env = Environment.objects.get(id='{{ aws_environment }}')
NumCacheNodes = "{{ num_cache_nodes }}"
if engine == 'redis':
NumCacheNodes = 1
rh_id, client = connect_to_elasticache(env)
try:
client.create_cache_cluster(
CacheClusterId=cluster_name,
Engine=engine,
CacheNodeType=CacheNodeType,
NumCacheNodes=int(NumCacheNodes),
)
waiter = client.get_waiter('cache_cluster_available')
waiter.wait(
CacheClusterId=cluster_name
)
except Exception as error:
return "FAILURE", "", f"{error}"
while True:
response = client.describe_cache_clusters(
CacheClusterId=cluster_name)
cache_instances = response['CacheClusters']
if len(cache_instances) != 1:
raise RuntimeError(
"Multiple caches with thi name {0} identified. ".format(cluster_name))
cache_instance = cache_instances[0]
status = cache_instance['CacheClusterStatus']
set_progress('Status of the cluster is: %s' % status)
if status == 'available':
set_progress('Cluster is now available on host')
break
time.sleep(5)
resource.name = cluster_name
resource.cluster_name = cluster_name
resource.engine = engine
resource.aws_rh_id = rh_id
resource.env_id = env.id
resource.save()
return "SUCCESS", "", ""
|
[
"resourcehandlers.aws.models.AWSHandler.objects.all",
"boto3.client",
"time.sleep",
"infrastructure.models.Environment.objects.get",
"infrastructure.models.Environment.objects_for_profile",
"common.methods.set_progress",
"infrastructure.models.CustomField.objects.get_or_create"
] |
[((578, 726), 'infrastructure.models.CustomField.objects.get_or_create', 'CustomField.objects.get_or_create', ([], {'name': '"""aws_rh_id"""', 'defaults': "{'label': 'AWS RH ID', 'type': 'STR', 'description':\n 'Used by the AWS blueprints'}"}), "(name='aws_rh_id', defaults={'label':\n 'AWS RH ID', 'type': 'STR', 'description': 'Used by the AWS blueprints'})\n", (611, 726), False, 'from infrastructure.models import CustomField, Environment\n'), ((775, 929), 'infrastructure.models.CustomField.objects.get_or_create', 'CustomField.objects.get_or_create', ([], {'name': '"""env_id"""', 'defaults': "{'label': 'Environment ID', 'type': 'STR', 'description':\n 'Used by the AWS blueprints'}"}), "(name='env_id', defaults={'label':\n 'Environment ID', 'type': 'STR', 'description':\n 'Used by the AWS blueprints'})\n", (808, 929), False, 'from infrastructure.models import CustomField, Environment\n'), ((974, 1127), 'infrastructure.models.CustomField.objects.get_or_create', 'CustomField.objects.get_or_create', ([], {'name': '"""name"""', 'defaults': "{'label': 'AWS ElastiCache', 'type': 'STR', 'description':\n 'Used by the AWS blueprints'}"}), "(name='name', defaults={'label':\n 'AWS ElastiCache', 'type': 'STR', 'description':\n 'Used by the AWS blueprints'})\n", (1007, 1127), False, 'from infrastructure.models import CustomField, Environment\n'), ((1172, 1297), 'infrastructure.models.CustomField.objects.get_or_create', 'CustomField.objects.get_or_create', ([], {'name': '"""cluster_name"""', 'defaults': "{'label': 'AWS ElastiCache cluster_name', 'type': 'STR'}"}), "(name='cluster_name', defaults={'label':\n 'AWS ElastiCache cluster_name', 'type': 'STR'})\n", (1205, 1297), False, 'from infrastructure.models import CustomField, Environment\n'), ((1334, 1526), 'infrastructure.models.CustomField.objects.get_or_create', 'CustomField.objects.get_or_create', ([], {'name': '"""engine"""', 'defaults': "{'label': 'AWS ElastiCache engine', 'type': 'STR', 'description':\n 'The name of the cache engine to be used for this cluster'}"}), "(name='engine', defaults={'label':\n 'AWS ElastiCache engine', 'type': 'STR', 'description':\n 'The name of the cache engine to be used for this cluster'})\n", (1367, 1526), False, 'from infrastructure.models import CustomField, Environment\n'), ((1665, 1705), 'infrastructure.models.Environment.objects_for_profile', 'Environment.objects_for_profile', (['profile'], {}), '(profile)\n', (1696, 1705), False, 'from infrastructure.models import CustomField, Environment\n'), ((1725, 1749), 'resourcehandlers.aws.models.AWSHandler.objects.all', 'AWSHandler.objects.all', ([], {}), '()\n', (1747, 1749), False, 'from resourcehandlers.aws.models import AWSHandler\n'), ((4099, 4150), 'infrastructure.models.Environment.objects.get', 'Environment.objects.get', ([], {'id': '"""{{ aws_environment }}"""'}), "(id='{{ aws_environment }}')\n", (4122, 4150), False, 'from infrastructure.models import CustomField, Environment\n'), ((367, 504), 'boto3.client', 'boto3.client', (['"""elasticache"""'], {'region_name': 'env.aws_region', 'aws_access_key_id': 'rh.serviceaccount', 'aws_secret_access_key': 'rh.servicepasswd'}), "('elasticache', region_name=env.aws_region, aws_access_key_id=\n rh.serviceaccount, aws_secret_access_key=rh.servicepasswd)\n", (379, 504), False, 'import boto3\n'), ((5143, 5196), 'common.methods.set_progress', 'set_progress', (["('Status of the cluster is: %s' % status)"], {}), "('Status of the cluster is: %s' % status)\n", (5155, 5196), False, 'from common.methods import set_progress\n'), ((5319, 5332), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (5329, 5332), False, 'import time\n'), ((5244, 5292), 'common.methods.set_progress', 'set_progress', (['"""Cluster is now available on host"""'], {}), "('Cluster is now available on host')\n", (5256, 5292), False, 'from common.methods import set_progress\n')]
|
# -*- coding: utf-8 -*-
"""
Wrapper for PyICU word segmentation. This wrapper module uses
:class:`icu.BreakIterator` with Thai as :class:`icu.Local`
to locate boundaries between words from the text.
:See Also:
* `GitHub repository <https://github.com/ovalhub/pyicu>`_
"""
import re
from typing import List
from icu import BreakIterator, Locale
def _gen_words(text: str) -> str:
bd = BreakIterator.createWordInstance(Locale("th"))
bd.setText(text)
p = bd.first()
for q in bd:
yield text[p:q]
p = q
def segment(text: str) -> List[str]:
if not text or not isinstance(text, str):
return []
text = re.sub("([^\u0E00-\u0E7F\n ]+)", " \\1 ", text)
return list(_gen_words(text))
|
[
"icu.Locale",
"re.sub"
] |
[((652, 692), 're.sub', 're.sub', (['"""([^\u0e00-\u0e7f\n ]+)"""', '""" \\\\1 """', 'text'], {}), '("""([^\u0e00-\u0e7f\n ]+)""", \' \\\\1 \', text)\n', (658, 692), False, 'import re\n'), ((428, 440), 'icu.Locale', 'Locale', (['"""th"""'], {}), "('th')\n", (434, 440), False, 'from icu import BreakIterator, Locale\n')]
|
"""
Jupyter <-> Vim
See: <http://jupyter-client.readthedocs.io/en/stable/api/client.html>
"""
# Standard
import re
from textwrap import dedent
from threading import Thread, Lock
from time import sleep
# Py module
from jupyter_client import KernelManager
import vim
# Local
from jupyter_util import echom, unquote_string, match_kernel_id, get_vim
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
# Local
from language import list_languages
class VimMessenger():
"""Handle message to/from Vim
Attributes
----------
sync : :obj:`Sync`
Object to support asynchronous operations.
message_queue : :obj:`Queue`
Asynchronous queue of messages.
pid : int
PID of the current vim session.
verbose : bool
If True, receive message id from sending function and report back to
vim with output, silent otherwise.
monitor_console : bool
If True, create a new buffer in vim to display output from the kernel.
cell_separators : list of str
User-defined list of strings that separate code cells.
"""
def __init__(self, sync):
self.sync = sync
self.message_queue = Queue() # for async echom
self.pid = get_vim('getpid()', -1) # pid of current vim session
# Define members python <- vim
self.set_monitor_bools()
self.set_cell_separators()
def set_monitor_bools(self):
"""Set booleans to define if jupyter_vim monitors messages."""
# NOTE this function is called by the @monitor_decorator in jupyter_vim
# to ensure user options are up-to-date.
self.verbose = bool(int(vim.vars.get('jupyter_verbose', 0)))
self.monitor_console = bool(int(vim.vars.get('jupyter_monitor_console', 0)))
def set_cell_separators(self):
"""Set cell separators (list of str)."""
# NOTE this function is called from jupyter_vim.run_cell
self.cell_separators = get_vim('g:jupyter_cell_separators', '')
self.cell_separators = [unquote_string(x) for x in self.cell_separators]
@staticmethod
def get_timer_intervals():
"""Return list of user-defined timers [ms].
Returns
-------
list of int
List of timers [ms].
"""
timer_list = get_vim('g:jupyter_timer_intervals', [0.1, 0.5, 1, 3])
return [int(x) for x in timer_list]
@staticmethod
def get_meta_messages():
"""Return list of user-defined list of meta messages.
Returns
-------
list of str
List of user-defined meta messages to send before/after code.
"""
return (get_vim('b:jupyter_exec_before', ''),
get_vim('b:jupyter_exec_pre', ''),
get_vim('b:jupyter_exex_post', ''),
get_vim('b:jupyter_exec_after', '')
)
def is_cell_separator(self, line):
"""Return True if given `line` is a cell separator."""
return any([bool(re.match(separation, line.strip()))
for separation in self.cell_separators])
def thread_echom(self, arg, **args):
"""Wrap echo async: put message to be echoed in a queue."""
self.message_queue.put((arg, args))
def timer_echom(self):
"""Call echom sync on all messages in queue."""
# Check in
if self.message_queue.empty():
return
# Show user the force
while not self.message_queue.empty():
(arg, args) = self.message_queue.get_nowait()
echom(arg, **args)
# Restore peace in the galaxy
vim.command('redraw')
# TODO add verbose flag
def string_hi(self):
"""Return Hi from vim string."""
return ('\\n\\nReceived connection from vim client with pid {}'
'\\n' + '-' * 60 + '\\n').format(self.pid)
def thread_echom_kernel_info(self, kernel_info):
"""Echo kernel info (async).
Parameters
----------
kernel_info : str
Information about the kernel to print in vim messages.
"""
kernel_string = '\n '.join([str(key) + ': ' + str(kernel_info[key])
for key in kernel_info])
# Send command so that user knows vim is connected at bottom, more readable
self.thread_echom('Connected: {}'.format(kernel_info['id']), style='Question')
# FIXME messages does not actually display in vim,
# only appears in `:messages` command.
self.thread_echom('To:', style='Question')
self.thread_echom(kernel_string)
class JupyterMessenger():
"""Handle primitive messages to/from jupyter kernel.
Attributes
----------
km_client : :obj:`KernelManager` client
Object to handle connections with the kernel.
See: <http://jupyter-client.readthedocs.io/en/stable/api/client.html>
kernel_info : dict
Information about the kernel itself.
dict with keys:
'kernel_type' : str, the type of kernel, i.e. `python`.
'pid' : int, the pid of the kernel process.
'cwd' : str, the current working directory of the kernel.
'hostname' : str, the hostname of the kernel.
cfile : str
Filename of the connection file, i.e. `kernel-123.json`.
sync : :obj:`Sync`
Object to support asynchronous operations.
meta_messages : list of str
User-defined meta messages to send before/after code.
"""
def __init__(self, sync):
self.km_client = None # KernelManager client
self.kernel_info = dict() # Kernel information
self.cfile = '' # Connection file
self.sync = sync # Sync object
self.meta_messages = VimMessenger.get_meta_messages()
def create_kernel_manager(self):
"""Create the kernel manager and connect a client.
Returns
-------
bool
True if client connects successfully, False on failure.
"""
# Get client
kernel_manager = KernelManager(connection_file=self.cfile)
# The json may be badly encoding especially if autoconnecting
try:
kernel_manager.load_connection_file()
except Exception:
return False
self.km_client = kernel_manager.client()
# Open channel
self.km_client.start_channels()
# Ping the kernel
self.km_client.kernel_info()
try:
self.km_client.get_shell_msg(timeout=1)
return True
except Empty:
return False
def disconnnect(self):
"""Disconnect silently from kernel and close channels."""
if self.km_client is None:
return
self.km_client.stop_channels()
self.km_client = None
def update_meta_messages(self):
"""Sync: reread vim meta vars."""
self.meta_messages = VimMessenger.get_meta_messages()
def check_connection(self):
"""Check that we have a client connected to the kernel.
Returns
-------
bool
True if client is connected, False if not.
"""
return self.km_client.hb_channel.is_beating() if self.km_client else False
def check_connection_or_warn(self):
"""Echo warning if not connected.
Returns
-------
bool
True if client is connected, False if not.
"""
if self.check_connection():
return True
echom('WARNING: Not connected to Jupyter!'
'\nRun :JupyterConnect to find the kernel', style='WarningMsg')
return False
def get_pending_msgs(self):
"""Get pending message pool.
Returns
-------
list of :obj:`msg`
List of messages waiting on the `iopub_channel`.
"""
msgs = list()
try:
self.sync.msg_lock.acquire()
msgs = self.km_client.iopub_channel.get_msgs()
except (Empty, TypeError, KeyError, IndexError, ValueError):
pass
finally:
self.sync.msg_lock.release()
return msgs
def get_reply_msg(self, msg_id):
"""Get kernel reply from sent client message with msg_id (async).
This function can block 3 sec, so call in a thread.
Returns
-------
dict
Message response.
"""
# TODO handle 'is_complete' requests?
# <http://jupyter-client.readthedocs.io/en/stable/messaging.html#code-completeness>
# Declare default
reply = dict()
for _ in range(3):
# Check
if self.sync.stop:
return dict()
# Get
self.sync.msg_lock.acquire()
try:
reply = self.km_client.get_shell_msg(block=True, timeout=1) or {}
except (Empty, TypeError, KeyError, IndexError, ValueError):
pass
finally:
self.sync.msg_lock.release()
# Stop
if reply.get('parent_header', {}).get('msg_id', -1) == msg_id:
break
return reply
def send(self, msg, ismeta=False, **kwargs):
"""Send a message to the kernel client.
.. note:: Async: crossroad <- run_command
Global: -> cmd, cmd_id
Returns
-------
int
Command id.
"""
if not self.check_connection_or_warn():
return -1
# Pre
if not ismeta:
bef, pre, post, aft = self.meta_messages
# Send before unless it is blank
if bef:
self.send(bef, ismeta=True)
# Craft new message
msg = pre + msg + post
# Include dedent of msg so we don't get odd indentation errors.
cmd = dedent(msg)
# Actually send execute_request
cmd_id = self.km_client.execute(cmd, **kwargs)
# Send after unless it is blank
if not ismeta and aft:
self.send(aft, ismeta=True)
return cmd_id
def get_kernel_info(self, language):
"""Explicitly ask the jupyter kernel for its pid
.. note:: Thread: <- cfile
<- vim_pid
-> lang
-> kernel_pid
Returns
-------
dict
dict with keys: {'kernel_type', 'pid', 'cwd', 'hostname'}
"""
# Check in
if self.kernel_info['kernel_type'] not in list_languages():
echom('I don''t know how to get infos for a Jupyter kernel of type "{}"'
.format(self.kernel_info['kernel_type']), 'WarningMsg')
# Fill kernel_info
self.kernel_info.update({
'connection_file': self.cfile,
'id': match_kernel_id(self.cfile), # int id of cfile
# Get from kernel info
'pid': self.send_code_and_get_reply(language.pid), # PID of kernel
'cwd': self.send_code_and_get_reply(language.cwd),
'hostname': self.send_code_and_get_reply(language.hostname),
})
# Return
return self.kernel_info
def send_code_and_get_reply(self, code):
"""Get variable _res from code string.
.. note:: Only used by get_kernel_info (internal) => send with ismeta.
Returns
-------
str
Unquoted string of the message reply.
"""
# Send message
msg_id = self.send(code, ismeta=True, silent=True, user_expressions={'_res': '_res'})
# Wait to get message back from kernel (1 sec)
reply = self.get_reply_msg(msg_id)
# Get _res from user expression
res = reply.get('content', {}).get('user_expressions', {}) \
.get('_res', {}).get('data', {}).get('text/plain', -1)
# Try again parse messages
if res == -1:
line_number = reply.get('content', {}).get('execution_count', -1)
msgs = self.get_pending_msgs()
res = parse_iopub_for_reply(msgs, line_number)
# Rest in peace
return unquote_string(res)
class Sync():
"""Synchronization (not so) primitives, for safe thread support.
Attributes
----------
thread : :obj:`Thread` or None
The running thread.
stop : bool
True if thread should not be stopped, False otherwise.
line_queue : :obj:`Queue`
Queue of lines of code to echo to the kernel.
msg_lock : :obj:`Lock`
lock to retrieve messages one thread at a time.
"""
def __init__(self):
self.thread = None
self.stop = False
self.line_queue = Queue()
self.msg_lock = Lock()
def check_stop(self):
"""Check and reset stop value.
Returns
-------
bool
Last value of `self.stop`.
"""
last = self.stop
if self.stop:
self.stop = False
return last
def stop_thread(self):
"""Stop current thread."""
if self.thread is None:
return
if not self.thread.is_alive():
self.thread = None
return
# Wait 1 sec max
self.stop = True
for _ in range(100):
if not self.stop:
sleep(0.010)
self.thread = None
return
def start_thread(self, target=None, args=None):
"""Stop last / Create new / Start thread.
Parameters
----------
target : callable, optional, default=None
Callable object to which `args` will be passed.
args : list, optional, default=None
"""
if args is None:
args = list()
self.stop_thread()
self.thread = Thread(target=target, args=args, daemon=True)
self.thread.start()
# -----------------------------------------------------------------------------
# Parsers
# -----------------------------------------------------------------------------
def parse_iopub_for_reply(msgs, line_number):
"""Get kernel response from message pool (Async).
.. note:: some kernel (iperl) do not discriminate when client asks for
`user_expressions`. But still they give a printable output.
Parameters
----------
msgs : list
List of messages to parse.
line_number : int
The message number of the corresponding code.
Returns
-------
str
The kernel response to the messages.
"""
res = -1
# Parse all execute
for msg in msgs:
# Get the result of execution
content = msg.get('content', False)
if not content:
continue
ec = int(content.get('execution_count', 0))
if not ec:
continue
if line_number not in (-1, ec):
continue
msg_type = msg.get('header', {}).get('msg_type', '')
if msg_type not in ('execute_result', 'stream'):
continue
res = content.get('data', {}).get('text/plain', -1)
res = res if res != -1 else content.get('text', -1)
break
return res
|
[
"textwrap.dedent",
"threading.Thread",
"jupyter_util.get_vim",
"jupyter_util.unquote_string",
"jupyter_util.echom",
"Queue.Queue",
"vim.vars.get",
"jupyter_util.match_kernel_id",
"time.sleep",
"threading.Lock",
"jupyter_client.KernelManager",
"vim.command",
"language.list_languages"
] |
[((1215, 1222), 'Queue.Queue', 'Queue', ([], {}), '()\n', (1220, 1222), False, 'from Queue import Queue, Empty\n'), ((1267, 1290), 'jupyter_util.get_vim', 'get_vim', (['"""getpid()"""', '(-1)'], {}), "('getpid()', -1)\n", (1274, 1290), False, 'from jupyter_util import echom, unquote_string, match_kernel_id, get_vim\n'), ((1998, 2038), 'jupyter_util.get_vim', 'get_vim', (['"""g:jupyter_cell_separators"""', '""""""'], {}), "('g:jupyter_cell_separators', '')\n", (2005, 2038), False, 'from jupyter_util import echom, unquote_string, match_kernel_id, get_vim\n'), ((2341, 2395), 'jupyter_util.get_vim', 'get_vim', (['"""g:jupyter_timer_intervals"""', '[0.1, 0.5, 1, 3]'], {}), "('g:jupyter_timer_intervals', [0.1, 0.5, 1, 3])\n", (2348, 2395), False, 'from jupyter_util import echom, unquote_string, match_kernel_id, get_vim\n'), ((3669, 3690), 'vim.command', 'vim.command', (['"""redraw"""'], {}), "('redraw')\n", (3680, 3690), False, 'import vim\n'), ((6137, 6178), 'jupyter_client.KernelManager', 'KernelManager', ([], {'connection_file': 'self.cfile'}), '(connection_file=self.cfile)\n', (6150, 6178), False, 'from jupyter_client import KernelManager\n'), ((7590, 7706), 'jupyter_util.echom', 'echom', (['"""WARNING: Not connected to Jupyter!\nRun :JupyterConnect to find the kernel"""'], {'style': '"""WarningMsg"""'}), '(\n """WARNING: Not connected to Jupyter!\nRun :JupyterConnect to find the kernel"""\n , style=\'WarningMsg\')\n', (7595, 7706), False, 'from jupyter_util import echom, unquote_string, match_kernel_id, get_vim\n'), ((9943, 9954), 'textwrap.dedent', 'dedent', (['msg'], {}), '(msg)\n', (9949, 9954), False, 'from textwrap import dedent\n'), ((12250, 12269), 'jupyter_util.unquote_string', 'unquote_string', (['res'], {}), '(res)\n', (12264, 12269), False, 'from jupyter_util import echom, unquote_string, match_kernel_id, get_vim\n'), ((12806, 12813), 'Queue.Queue', 'Queue', ([], {}), '()\n', (12811, 12813), False, 'from Queue import Queue, Empty\n'), ((12838, 12844), 'threading.Lock', 'Lock', ([], {}), '()\n', (12842, 12844), False, 'from threading import Thread, Lock\n'), ((13897, 13942), 'threading.Thread', 'Thread', ([], {'target': 'target', 'args': 'args', 'daemon': '(True)'}), '(target=target, args=args, daemon=True)\n', (13903, 13942), False, 'from threading import Thread, Lock\n'), ((2071, 2088), 'jupyter_util.unquote_string', 'unquote_string', (['x'], {}), '(x)\n', (2085, 2088), False, 'from jupyter_util import echom, unquote_string, match_kernel_id, get_vim\n'), ((2705, 2741), 'jupyter_util.get_vim', 'get_vim', (['"""b:jupyter_exec_before"""', '""""""'], {}), "('b:jupyter_exec_before', '')\n", (2712, 2741), False, 'from jupyter_util import echom, unquote_string, match_kernel_id, get_vim\n'), ((2759, 2792), 'jupyter_util.get_vim', 'get_vim', (['"""b:jupyter_exec_pre"""', '""""""'], {}), "('b:jupyter_exec_pre', '')\n", (2766, 2792), False, 'from jupyter_util import echom, unquote_string, match_kernel_id, get_vim\n'), ((2810, 2844), 'jupyter_util.get_vim', 'get_vim', (['"""b:jupyter_exex_post"""', '""""""'], {}), "('b:jupyter_exex_post', '')\n", (2817, 2844), False, 'from jupyter_util import echom, unquote_string, match_kernel_id, get_vim\n'), ((2862, 2897), 'jupyter_util.get_vim', 'get_vim', (['"""b:jupyter_exec_after"""', '""""""'], {}), "('b:jupyter_exec_after', '')\n", (2869, 2897), False, 'from jupyter_util import echom, unquote_string, match_kernel_id, get_vim\n'), ((3603, 3621), 'jupyter_util.echom', 'echom', (['arg'], {}), '(arg, **args)\n', (3608, 3621), False, 'from jupyter_util import echom, unquote_string, match_kernel_id, get_vim\n'), ((10628, 10644), 'language.list_languages', 'list_languages', ([], {}), '()\n', (10642, 10644), False, 'from language import list_languages\n'), ((1695, 1729), 'vim.vars.get', 'vim.vars.get', (['"""jupyter_verbose"""', '(0)'], {}), "('jupyter_verbose', 0)\n", (1707, 1729), False, 'import vim\n'), ((1772, 1814), 'vim.vars.get', 'vim.vars.get', (['"""jupyter_monitor_console"""', '(0)'], {}), "('jupyter_monitor_console', 0)\n", (1784, 1814), False, 'import vim\n'), ((10928, 10955), 'jupyter_util.match_kernel_id', 'match_kernel_id', (['self.cfile'], {}), '(self.cfile)\n', (10943, 10955), False, 'from jupyter_util import echom, unquote_string, match_kernel_id, get_vim\n'), ((13434, 13445), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (13439, 13445), False, 'from time import sleep\n')]
|
# Install the Python library from https://pypi.org/project/amadeus
from amadeus import ResponseError, Client
amadeus = Client(
client_id='YOUR_AMADEUS_API_KEY',
client_secret='YOUR_AMADEUS_API_SECRET'
)
try:
'''
# Delete a given flight order based on it's id
'''
response = amadeus.booking.flight_order('MlpZVkFMfFdBVFNPTnwyMDE1LTExLTAy').delete()
print(response.data)
except ResponseError as error:
raise error
|
[
"amadeus.Client"
] |
[((120, 206), 'amadeus.Client', 'Client', ([], {'client_id': '"""YOUR_AMADEUS_API_KEY"""', 'client_secret': '"""YOUR_AMADEUS_API_SECRET"""'}), "(client_id='YOUR_AMADEUS_API_KEY', client_secret=\n 'YOUR_AMADEUS_API_SECRET')\n", (126, 206), False, 'from amadeus import ResponseError, Client\n')]
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# initial
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
# Model 'ObjectCategory'
self.db.create_table(
"cm_objectcategory",
(
("id", models.AutoField(verbose_name="ID", primary_key=True, auto_created=True)),
("name", models.CharField("Name", max_length=64, unique=True)),
(
"description",
models.CharField("Description", max_length=128, null=True, blank=True),
),
),
)
# Model 'Object'
self.db.create_table(
"cm_object",
(
("id", models.AutoField(verbose_name="ID", primary_key=True, auto_created=True)),
("handler_class_name", models.CharField("Object Type", max_length=64)),
("stream_url", models.CharField("URL", max_length=128)),
("profile_name", models.CharField("Profile", max_length=128)),
("repo_path", models.CharField("Repo Path", max_length=128)),
(
"push_every",
models.PositiveIntegerField(
"Push Every (secs)", default=86400, blank=True, null=True
),
),
("next_push", models.DateTimeField("Next Push", blank=True, null=True)),
("last_push", models.DateTimeField("Last Push", blank=True, null=True)),
(
"pull_every",
models.PositiveIntegerField(
"Pull Every (secs)", default=86400, blank=True, null=True
),
),
("next_pull", models.DateTimeField("Next Pull", blank=True, null=True)),
("last_pull", models.DateTimeField("Last Pull", blank=True, null=True)),
),
)
# Mock Models
Object = self.db.mock_model(model_name="Object", db_table="cm_object")
ObjectCategory = self.db.mock_model(
model_name="ObjectCategory", db_table="cm_objectcategory"
)
# M2M field 'Object.categories'
self.db.create_table(
"cm_object_categories",
(
("id", models.AutoField(verbose_name="ID", primary_key=True, auto_created=True)),
("object", models.ForeignKey(Object, null=False, on_delete=models.CASCADE)),
(
"objectcategory",
models.ForeignKey(ObjectCategory, null=False, on_delete=models.CASCADE),
),
),
)
self.db.create_index("cm_object", ["handler_class_name", "repo_path"], unique=True)
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.PositiveIntegerField",
"django.db.models.AutoField",
"django.db.models.DateTimeField"
] |
[((629, 701), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'primary_key': '(True)', 'auto_created': '(True)'}), "(verbose_name='ID', primary_key=True, auto_created=True)\n", (645, 701), False, 'from django.db import models\n'), ((729, 781), 'django.db.models.CharField', 'models.CharField', (['"""Name"""'], {'max_length': '(64)', 'unique': '(True)'}), "('Name', max_length=64, unique=True)\n", (745, 781), False, 'from django.db import models\n'), ((857, 927), 'django.db.models.CharField', 'models.CharField', (['"""Description"""'], {'max_length': '(128)', 'null': '(True)', 'blank': '(True)'}), "('Description', max_length=128, null=True, blank=True)\n", (873, 927), False, 'from django.db import models\n'), ((1090, 1162), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'primary_key': '(True)', 'auto_created': '(True)'}), "(verbose_name='ID', primary_key=True, auto_created=True)\n", (1106, 1162), False, 'from django.db import models\n'), ((1204, 1250), 'django.db.models.CharField', 'models.CharField', (['"""Object Type"""'], {'max_length': '(64)'}), "('Object Type', max_length=64)\n", (1220, 1250), False, 'from django.db import models\n'), ((1284, 1323), 'django.db.models.CharField', 'models.CharField', (['"""URL"""'], {'max_length': '(128)'}), "('URL', max_length=128)\n", (1300, 1323), False, 'from django.db import models\n'), ((1359, 1402), 'django.db.models.CharField', 'models.CharField', (['"""Profile"""'], {'max_length': '(128)'}), "('Profile', max_length=128)\n", (1375, 1402), False, 'from django.db import models\n'), ((1435, 1480), 'django.db.models.CharField', 'models.CharField', (['"""Repo Path"""'], {'max_length': '(128)'}), "('Repo Path', max_length=128)\n", (1451, 1480), False, 'from django.db import models\n'), ((1555, 1645), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (['"""Push Every (secs)"""'], {'default': '(86400)', 'blank': '(True)', 'null': '(True)'}), "('Push Every (secs)', default=86400, blank=True,\n null=True)\n", (1582, 1645), False, 'from django.db import models\n'), ((1738, 1794), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""Next Push"""'], {'blank': '(True)', 'null': '(True)'}), "('Next Push', blank=True, null=True)\n", (1758, 1794), False, 'from django.db import models\n'), ((1827, 1883), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""Last Push"""'], {'blank': '(True)', 'null': '(True)'}), "('Last Push', blank=True, null=True)\n", (1847, 1883), False, 'from django.db import models\n'), ((1958, 2048), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (['"""Pull Every (secs)"""'], {'default': '(86400)', 'blank': '(True)', 'null': '(True)'}), "('Pull Every (secs)', default=86400, blank=True,\n null=True)\n", (1985, 2048), False, 'from django.db import models\n'), ((2141, 2197), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""Next Pull"""'], {'blank': '(True)', 'null': '(True)'}), "('Next Pull', blank=True, null=True)\n", (2161, 2197), False, 'from django.db import models\n'), ((2230, 2286), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""Last Pull"""'], {'blank': '(True)', 'null': '(True)'}), "('Last Pull', blank=True, null=True)\n", (2250, 2286), False, 'from django.db import models\n'), ((2684, 2756), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'primary_key': '(True)', 'auto_created': '(True)'}), "(verbose_name='ID', primary_key=True, auto_created=True)\n", (2700, 2756), False, 'from django.db import models\n'), ((2786, 2849), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Object'], {'null': '(False)', 'on_delete': 'models.CASCADE'}), '(Object, null=False, on_delete=models.CASCADE)\n', (2803, 2849), False, 'from django.db import models\n'), ((2928, 2999), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ObjectCategory'], {'null': '(False)', 'on_delete': 'models.CASCADE'}), '(ObjectCategory, null=False, on_delete=models.CASCADE)\n', (2945, 2999), False, 'from django.db import models\n')]
|
from robinhood_crypto_api import RobinhoodCrypto
from bin import pricinginfo
from indicators import ST
import pprint
import time
from multiprocessing import Process
r = RobinhoodCrypto("INSERT GMAIL HERE","INSERT USERNAME HERE")
#Make sure two factor authentication is being sent to your phone
def holdingsinfo(instrument):
holdings_info = r.holdings()
pprint.pprint(r.accounts())
if holdings_info:
for i in holdings_info:
for values in i['currency'].values():
if values == instrument:
quantity = float(i['quantity'])
return quantity
def main(instrument, granularity, multiplier, length, cash):
listoftime = []
if instrument == "BTCUSDT":
name = "Bitcoin"
rinstru = "BTCUSD"
if instrument == "ETHUSDT":
name = "Ethereum"
rinstru = "ETHUSD"
print(name)
while True:
try:
time.sleep(0.5)
times = pricinginfo(instrument = instrument, interval = granularity, number = 5, dfs=0)
if times not in listoftime:
listoftime.append(times)
if len(listoftime) > 1:
before, after = ST(instrument = instrument, interval = granularity, number = 100, multiplier=multiplier, length=length)
quote_info = (r.quotes(rinstru))['mark_price']
quantity = holdingsinfo(name)
if quantity == 0:
if before == 'SELL' and after == 'BUY':
quantities = round(cash/float(quote_info), 5)
market_order_info = r.trade(
rinstru,
price=str(round(float(quote_info) * 1.005, 2)),
quantity=str(quantities),
side="buy",
time_in_force="gtc",
type="market"
)
print('Buy initiated for {}'.format(instrument))
else:
if before == 'BUY' and after == 'SELL':
market_order_info = r.trade(
rinstru,
price=str(round(float(quote_info) * 0.995, 2)),
quantity=str(quantity),
side="sell",
time_in_force="gtc",
type="market"
)
print('Sell initiated for {}'.format(instrument))
except Exception as e:
print(e)
print('Restarting!')
time.sleep(30)
continue
if __name__ == '__main__':
Process(target=main, args=("BTCUSDT", '1HOUR', 2.9, 13, 34.5)).start()
time.sleep(10)
Process(target=main, args=('ETHUSDT', '1HOUR', 2.5, 15, 64.5)).start()
# process1 = threading.Thread(target = main, args=("BTCUSDT", '1MINUTE', 3, 10, "0.00225"))
# process2 = threading.Thread(target = main, args=('ETHUSDT', '1MINUTE', 2.5, 15, "0.07"))
# process1.start()
# process2.start()
#ST(instrument = "BTCUSDT", number = 100, interval = "1HOUR", multiplier=3, length=10)
# quote_info = r.quotes()
# market_order_info = r.trade(
# 'BTCUSD',
# price="5000",
# quantity="0.000015",
# side="buy",
# time_in_force="gtc",
# type="limit"
# )
|
[
"indicators.ST",
"time.sleep",
"bin.pricinginfo",
"multiprocessing.Process",
"robinhood_crypto_api.RobinhoodCrypto"
] |
[((175, 235), 'robinhood_crypto_api.RobinhoodCrypto', 'RobinhoodCrypto', (['"""INSERT GMAIL HERE"""', '"""INSERT USERNAME HERE"""'], {}), "('INSERT GMAIL HERE', 'INSERT USERNAME HERE')\n", (190, 235), False, 'from robinhood_crypto_api import RobinhoodCrypto\n'), ((3014, 3028), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (3024, 3028), False, 'import time\n'), ((943, 958), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (953, 958), False, 'import time\n'), ((980, 1053), 'bin.pricinginfo', 'pricinginfo', ([], {'instrument': 'instrument', 'interval': 'granularity', 'number': '(5)', 'dfs': '(0)'}), '(instrument=instrument, interval=granularity, number=5, dfs=0)\n', (991, 1053), False, 'from bin import pricinginfo\n'), ((2938, 3000), 'multiprocessing.Process', 'Process', ([], {'target': 'main', 'args': "('BTCUSDT', '1HOUR', 2.9, 13, 34.5)"}), "(target=main, args=('BTCUSDT', '1HOUR', 2.9, 13, 34.5))\n", (2945, 3000), False, 'from multiprocessing import Process\n'), ((3034, 3096), 'multiprocessing.Process', 'Process', ([], {'target': 'main', 'args': "('ETHUSDT', '1HOUR', 2.5, 15, 64.5)"}), "(target=main, args=('ETHUSDT', '1HOUR', 2.5, 15, 64.5))\n", (3041, 3096), False, 'from multiprocessing import Process\n'), ((2866, 2880), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (2876, 2880), False, 'import time\n'), ((1221, 1323), 'indicators.ST', 'ST', ([], {'instrument': 'instrument', 'interval': 'granularity', 'number': '(100)', 'multiplier': 'multiplier', 'length': 'length'}), '(instrument=instrument, interval=granularity, number=100, multiplier=\n multiplier, length=length)\n', (1223, 1323), False, 'from indicators import ST\n')]
|
# flake8: noqa
import codecs
import time
import unittest
try:
import unittest.mock as compat_mock
except ImportError:
import mock as compat_mock
import sys
import os
try:
from instagram_private_api import (
__version__, Client, ClientError, ClientLoginError,
ClientCookieExpiredError, ClientThrottledError, ClientCompatPatch,
ClientLoginRequiredError, MediaTypes,
ClientSentryBlockError, ClientCheckpointRequiredError,
ClientChallengeRequiredError)
from instagram_private_api.utils import (
InstagramID, gen_user_breadcrumb,
max_chunk_size_generator, max_chunk_count_generator, get_file_size,
ig_chunk_generator
) # noqa
from instagram_private_api.constants import Constants
from instagram_private_api.compat import compat_urllib_parse
except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from instagram_private_api import (
__version__, Client, ClientError, ClientLoginError,
ClientCookieExpiredError, ClientThrottledError, ClientCompatPatch,
ClientLoginRequiredError, MediaTypes,
ClientSentryBlockError, ClientCheckpointRequiredError,
ClientChallengeRequiredError)
from instagram_private_api.utils import (
InstagramID, gen_user_breadcrumb,
max_chunk_size_generator, max_chunk_count_generator, get_file_size,
ig_chunk_generator
) # noqa
from instagram_private_api.constants import Constants
from instagram_private_api.compat import compat_urllib_parse
try:
from instagram_web_api import (
__version__ as __webversion__,
Client as WebClient,
ClientError as WebClientError,
ClientLoginError as WebClientLoginError,
ClientCookieExpiredError as WebClientCookieExpiredError,
ClientCompatPatch as WebClientCompatPatch)
from instagram_web_api.compat import compat_urllib_error
except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from instagram_web_api import (
__version__ as __webversion__,
Client as WebClient,
ClientError as WebClientError,
ClientLoginError as WebClientLoginError,
ClientCookieExpiredError as WebClientCookieExpiredError,
ClientCompatPatch as WebClientCompatPatch)
from instagram_web_api.compat import compat_urllib_error
def to_json(python_object):
if isinstance(python_object, bytes):
return {'__class__': 'bytes',
'__value__': codecs.encode(python_object, 'base64').decode()}
raise TypeError(repr(python_object) + ' is not JSON serializable')
def from_json(json_object):
if '__class__' in json_object and json_object['__class__'] == 'bytes':
return codecs.decode(json_object['__value__'].encode(), 'base64')
return json_object
class ApiTestBase(unittest.TestCase):
"""Main base class for private api tests."""
def __init__(self, testname, api, user_id=None, media_id=None):
super(ApiTestBase, self).__init__(testname)
self.api = api
self.test_user_id = user_id
self.test_media_id = media_id
self.sleep_interval = 2.5
if testname.endswith('_mock'):
self.sleep_interval = 0 # sleep a bit between tests to avoid HTTP429 errors
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
time.sleep(self.sleep_interval)
class WebApiTestBase(unittest.TestCase):
"""Main base class for web api tests."""
def __init__(self, testname, api):
super(WebApiTestBase, self).__init__(testname)
self.api = api
self.sleep_interval = 2.5
if testname.endswith('_mock'):
self.sleep_interval = 0 # sleep a bit between tests to avoid HTTP429 errors
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.test_user_id = '25025320'
self.test_user_name = 'instagram'
self.test_media_shortcode = 'BJL-gjsDyo1'
self.test_media_shortcode2 = 'BVRqQxmj2TA'
self.test_media_id = '1009392755603152985'
self.test_comment_id = '1234567890'
def tearDown(self):
time.sleep(self.sleep_interval)
class MockResponse(object):
"""A mock class to emulate api responses."""
def __init__(self, code=200, content_type='', body=''):
self.code = 200
self.content_type = content_type
self.body = body
def info(self):
return {'Content-Type': self.content_type}
def read(self):
return self.body.encode('utf8')
|
[
"codecs.encode",
"os.path.dirname",
"time.sleep"
] |
[((3511, 3542), 'time.sleep', 'time.sleep', (['self.sleep_interval'], {}), '(self.sleep_interval)\n', (3521, 3542), False, 'import time\n'), ((4358, 4389), 'time.sleep', 'time.sleep', (['self.sleep_interval'], {}), '(self.sleep_interval)\n', (4368, 4389), False, 'import time\n'), ((884, 909), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (899, 909), False, 'import os\n'), ((1996, 2021), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2011, 2021), False, 'import os\n'), ((2537, 2575), 'codecs.encode', 'codecs.encode', (['python_object', '"""base64"""'], {}), "(python_object, 'base64')\n", (2550, 2575), False, 'import codecs\n')]
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions for evaluation and summarization of metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
def _get_or_create_eval_step():
"""Gets or creates the eval step `Tensor`.
Returns:
A `Tensor` representing a counter for the evaluation step.
Raises:
ValueError: If multiple `Tensors` have been added to the
`tf.GraphKeys.EVAL_STEP` collection.
"""
graph = ops.get_default_graph()
eval_steps = graph.get_collection(ops.GraphKeys.EVAL_STEP)
if len(eval_steps) == 1:
return eval_steps[0]
elif len(eval_steps) > 1:
raise ValueError('Multiple tensors added to tf.GraphKeys.EVAL_STEP')
else:
counter = variable_scope.get_variable(
'eval_step',
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.EVAL_STEP])
return counter
class _StopAfterNEvalsHook(session_run_hook.SessionRunHook):
"""Run hook used by the evaluation routines to run the `eval_ops` N times."""
def __init__(self, num_evals, log_progress=True):
"""Constructs the run hook.
Args:
num_evals: The number of evaluations to run for.
log_progress: Whether to log evaluation progress, defaults to True.
"""
# The number of evals to run for.
self._num_evals = num_evals
self._evals_completed = None
self._log_progress = log_progress
def _set_evals_completed_tensor(self, updated_eval_step):
self._evals_completed = updated_eval_step
def before_run(self, run_context):
return session_run_hook.SessionRunArgs({
'evals_completed': self._evals_completed
})
def after_run(self, run_context, run_values):
evals_completed = run_values.results['evals_completed']
if self._log_progress:
logging.info('Evaluation [%d/%d]', evals_completed, self._num_evals)
if evals_completed >= self._num_evals:
run_context.request_stop()
def _evaluate_once(checkpoint_path,
master='',
scaffold=None,
eval_ops=None,
feed_dict=None,
final_ops=None,
final_ops_feed_dict=None,
hooks=None,
config=None):
"""Evaluates the model at the given checkpoint path.
During a single evaluation, the `eval_ops` is run until the session is
interrupted or requested to finish. This is typically requested via a
`tf.contrib.training.StopAfterNEvalsHook` which results in `eval_ops` running
the requested number of times.
Optionally, a user can pass in `final_ops`, a single `Tensor`, a list of
`Tensors` or a dictionary from names to `Tensors`. The `final_ops` is
evaluated a single time after `eval_ops` has finished running and the fetched
values of `final_ops` are returned. If `final_ops` is left as `None`, then
`None` is returned.
One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record
summaries after the `eval_ops` have run. If `eval_ops` is `None`, the
summaries run immedietly after the model checkpoint has been restored.
Note that `evaluate_once` creates a local variable used to track the number of
evaluations run via `tf.contrib.training.get_or_create_eval_step`.
Consequently, if a custom local init op is provided via a `scaffold`, the
caller should ensure that the local init op also initializes the eval step.
Args:
checkpoint_path: The path to a checkpoint to use for evaluation.
master: The BNS address of the TensorFlow master.
scaffold: An tf.train.Scaffold instance for initializing variables and
restoring variables. Note that `scaffold.init_fn` is used by the function
to restore the checkpoint. If you supply a custom init_fn, then it must
also take care of restoring the model from its checkpoint.
eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`, which is run until the session is requested to stop,
commonly done by a `tf.contrib.training.StopAfterNEvalsHook`.
feed_dict: The feed dictionary to use when executing the `eval_ops`.
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`.
hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the
evaluation loop.
config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
Returns:
The fetched values of `final_ops` or `None` if `final_ops` is `None`.
"""
eval_step = _get_or_create_eval_step()
# Prepare the run hooks.
hooks = hooks or []
if eval_ops is not None:
update_eval_step = state_ops.assign_add(eval_step, 1)
for h in hooks:
if isinstance(h, _StopAfterNEvalsHook):
h._set_evals_completed_tensor(update_eval_step) # pylint: disable=protected-access
if isinstance(eval_ops, dict):
eval_ops['update_eval_step'] = update_eval_step
elif isinstance(eval_ops, (tuple, list)):
eval_ops = list(eval_ops) + [update_eval_step]
else:
eval_ops = [eval_ops, update_eval_step]
logging.info('Starting evaluation at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
time.gmtime()))
# Prepare the session creator.
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master=master,
config=config)
final_ops_hook = basic_session_run_hooks.FinalOpsHook(
final_ops, final_ops_feed_dict)
hooks.append(final_ops_hook)
with monitored_session.MonitoredSession(
session_creator=session_creator, hooks=hooks) as session:
if eval_ops is not None:
while not session.should_stop():
session.run(eval_ops, feed_dict)
logging.info('Finished evaluation at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
time.gmtime()))
return final_ops_hook.final_ops_values
|
[
"tensorflow.python.training.basic_session_run_hooks.FinalOpsHook",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.training.monitored_session.MonitoredSession",
"time.gmtime",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.training.session_run_hook.SessionRunArgs",
"tensorflow.python.ops.init_ops.zeros_initializer",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.training.monitored_session.ChiefSessionCreator"
] |
[((1628, 1651), 'tensorflow.python.framework.ops.get_default_graph', 'ops.get_default_graph', ([], {}), '()\n', (1649, 1651), False, 'from tensorflow.python.framework import ops\n'), ((6717, 6859), 'tensorflow.python.training.monitored_session.ChiefSessionCreator', 'monitored_session.ChiefSessionCreator', ([], {'scaffold': 'scaffold', 'checkpoint_filename_with_path': 'checkpoint_path', 'master': 'master', 'config': 'config'}), '(scaffold=scaffold,\n checkpoint_filename_with_path=checkpoint_path, master=master, config=config\n )\n', (6754, 6859), False, 'from tensorflow.python.training import monitored_session\n'), ((6896, 6964), 'tensorflow.python.training.basic_session_run_hooks.FinalOpsHook', 'basic_session_run_hooks.FinalOpsHook', (['final_ops', 'final_ops_feed_dict'], {}), '(final_ops, final_ops_feed_dict)\n', (6932, 6964), False, 'from tensorflow.python.training import basic_session_run_hooks\n'), ((2829, 2904), 'tensorflow.python.training.session_run_hook.SessionRunArgs', 'session_run_hook.SessionRunArgs', (["{'evals_completed': self._evals_completed}"], {}), "({'evals_completed': self._evals_completed})\n", (2860, 2904), False, 'from tensorflow.python.training import session_run_hook\n'), ((6072, 6106), 'tensorflow.python.ops.state_ops.assign_add', 'state_ops.assign_add', (['eval_step', '(1)'], {}), '(eval_step, 1)\n', (6092, 6106), False, 'from tensorflow.python.ops import state_ops\n'), ((7011, 7096), 'tensorflow.python.training.monitored_session.MonitoredSession', 'monitored_session.MonitoredSession', ([], {'session_creator': 'session_creator', 'hooks': 'hooks'}), '(session_creator=session_creator, hooks=hooks\n )\n', (7045, 7096), False, 'from tensorflow.python.training import monitored_session\n'), ((3061, 3129), 'tensorflow.python.platform.tf_logging.info', 'logging.info', (['"""Evaluation [%d/%d]"""', 'evals_completed', 'self._num_evals'], {}), "('Evaluation [%d/%d]', evals_completed, self._num_evals)\n", (3073, 3129), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((6647, 6660), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (6658, 6660), False, 'import time\n'), ((7356, 7369), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (7367, 7369), False, 'import time\n'), ((2004, 2032), 'tensorflow.python.ops.init_ops.zeros_initializer', 'init_ops.zeros_initializer', ([], {}), '()\n', (2030, 2032), False, 'from tensorflow.python.ops import init_ops\n')]
|
from storage import read_region_snapshot, _round_15min
import datetime
from dateutil.parser import parse
def test_read_region_snapshot():
read_region_snapshot('slc_ut', '2021-09-01T00:00:00Z')
def test__round_15min():
ts = parse('2021-01-31T23:59:01Z')
ret = _round_15min(ts)
assert ret == parse('2021-02-01T00:00:00Z')
ts = parse('2021-01-31T23:50:01Z')
ret = _round_15min(ts)
assert ret == parse('2021-01-31T23:45:00Z')
print('test__round_15min: All tests passed')
def run_tests():
# test__round_15min()
# test_read_region_snapshot()
print('All IO tests passed')
if __name__ == '__main__':
run_tests()
|
[
"storage._round_15min",
"storage.read_region_snapshot",
"dateutil.parser.parse"
] |
[((144, 198), 'storage.read_region_snapshot', 'read_region_snapshot', (['"""slc_ut"""', '"""2021-09-01T00:00:00Z"""'], {}), "('slc_ut', '2021-09-01T00:00:00Z')\n", (164, 198), False, 'from storage import read_region_snapshot, _round_15min\n'), ((235, 264), 'dateutil.parser.parse', 'parse', (['"""2021-01-31T23:59:01Z"""'], {}), "('2021-01-31T23:59:01Z')\n", (240, 264), False, 'from dateutil.parser import parse\n'), ((275, 291), 'storage._round_15min', '_round_15min', (['ts'], {}), '(ts)\n', (287, 291), False, 'from storage import read_region_snapshot, _round_15min\n'), ((350, 379), 'dateutil.parser.parse', 'parse', (['"""2021-01-31T23:50:01Z"""'], {}), "('2021-01-31T23:50:01Z')\n", (355, 379), False, 'from dateutil.parser import parse\n'), ((390, 406), 'storage._round_15min', '_round_15min', (['ts'], {}), '(ts)\n', (402, 406), False, 'from storage import read_region_snapshot, _round_15min\n'), ((310, 339), 'dateutil.parser.parse', 'parse', (['"""2021-02-01T00:00:00Z"""'], {}), "('2021-02-01T00:00:00Z')\n", (315, 339), False, 'from dateutil.parser import parse\n'), ((425, 454), 'dateutil.parser.parse', 'parse', (['"""2021-01-31T23:45:00Z"""'], {}), "('2021-01-31T23:45:00Z')\n", (430, 454), False, 'from dateutil.parser import parse\n')]
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Reddit dataset using tldr as summaries."""
import json
import os
import datasets
_CITATION = """
@inproceedings{volske-etal-2017-tl,
title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization},
author = {<NAME> and <NAME> and <NAME> and <NAME>},
booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},
month = {sep},
year = {2017},
address = {Copenhagen, Denmark},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W17-4508},
doi = {10.18653/v1/W17-4508},
pages = {59--63},
abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.},
}
"""
_DESCRIPTION = """
This corpus contains preprocessed posts from the Reddit dataset.
The dataset consists of 3,848,330 posts with an average length of 270 words for content,
and 28 words for the summary.
Features includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.
Content is used as document and summary is used as summary.
"""
_URL = "https://zenodo.org/record/1043504/files/corpus-webis-tldr-17.zip?download=1"
_DOCUMENT = "content"
_SUMMARY = "summary"
_ADDITIONAL_FEATURES = ["author", "body", "normalizedBody", "subreddit", "subreddit_id", "id"]
class Reddit(datasets.GeneratorBasedBuilder):
"""Reddit Dataset."""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{k: datasets.Value("string") for k in _ADDITIONAL_FEATURES + [_DOCUMENT, _SUMMARY]}
),
supervised_keys=None,
homepage="https://github.com/webis-de/webis-tldr-17-corpus",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"path": os.path.join(dl_path, "corpus-webis-tldr-17.json")},
)
]
def _generate_examples(self, path=None):
"""Yields examples."""
with open(path, "rb") as f:
for i, line in enumerate(f):
# possible keys are:
# author: string (nullable = true)
# body: string (nullable = true)
# normalizedBody: string (nullable = true)
# content: string (nullable = true)
# content_len: long (nullable = true)
# summary: string (nullable = true)
# summary_len: long (nullable = true)
# id: string (nullable = true)
# subreddit: string (nullable = true)
# subreddit_id: string (nullable = true)
# title: string (nullable = true)
d = json.loads(line)
if _SUMMARY in d and _DOCUMENT in d:
yield i, {k: d.get(k, "") for k in _ADDITIONAL_FEATURES + [_DOCUMENT, _SUMMARY]}
|
[
"datasets.Value",
"datasets.Version",
"json.loads",
"os.path.join"
] |
[((2583, 2608), 'datasets.Version', 'datasets.Version', (['"""1.0.0"""'], {}), "('1.0.0')\n", (2599, 2608), False, 'import datasets\n'), ((4173, 4189), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (4183, 4189), False, 'import json\n'), ((2766, 2790), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2780, 2790), False, 'import datasets\n'), ((3284, 3334), 'os.path.join', 'os.path.join', (['dl_path', '"""corpus-webis-tldr-17.json"""'], {}), "(dl_path, 'corpus-webis-tldr-17.json')\n", (3296, 3334), False, 'import os\n')]
|
import subprocess
import os
def check_dependency(name):
""""""
e = subprocess.call('which '+name, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if e!=0:
raise RuntimeError('External dependency \''+name+'\' not installed')
def refresh_gcs_token():
""""""
t = subprocess.check_output('gcloud auth application-default print-access-token', shell=True)
os.environ.putenv('GCS_OAUTH_TOKEN', t)
|
[
"os.environ.putenv",
"subprocess.check_output",
"subprocess.call"
] |
[((77, 179), 'subprocess.call', 'subprocess.call', (["('which ' + name)"], {'shell': '(True)', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.DEVNULL'}), "('which ' + name, shell=True, stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL)\n", (92, 179), False, 'import subprocess\n'), ((310, 403), 'subprocess.check_output', 'subprocess.check_output', (['"""gcloud auth application-default print-access-token"""'], {'shell': '(True)'}), "('gcloud auth application-default print-access-token',\n shell=True)\n", (333, 403), False, 'import subprocess\n'), ((404, 443), 'os.environ.putenv', 'os.environ.putenv', (['"""GCS_OAUTH_TOKEN"""', 't'], {}), "('GCS_OAUTH_TOKEN', t)\n", (421, 443), False, 'import os\n')]
|
from django.shortcuts import render
from django.http import HttpResponse
from .forms import EmployeeApplicationForm
# Create your views here.
def index(request):
newForm = EmployeeApplicationForm
context = {
"form": newForm,
}
return render(request, "EmployeeAppApp/index.html", context)
def gotThatInfo(request):
print(request.POST)
context = {
"name": request.POST["name"],
"dateOfBirth": request.POST["dateOfBirth"],
"position": request.POST["position"],
"salary": request.POST["salary"],
}
return render(request, "EmployeeAppApp/gotThatInfo.html", context)
|
[
"django.shortcuts.render"
] |
[((261, 314), 'django.shortcuts.render', 'render', (['request', '"""EmployeeAppApp/index.html"""', 'context'], {}), "(request, 'EmployeeAppApp/index.html', context)\n", (267, 314), False, 'from django.shortcuts import render\n'), ((580, 639), 'django.shortcuts.render', 'render', (['request', '"""EmployeeAppApp/gotThatInfo.html"""', 'context'], {}), "(request, 'EmployeeAppApp/gotThatInfo.html', context)\n", (586, 639), False, 'from django.shortcuts import render\n')]
|
# -*- coding: utf-8 -*-
"""
Module containing the implementation of Geofile operations using a sql statement.
"""
from concurrent import futures
from dataclasses import dataclass
from datetime import datetime
import logging
import logging.config
import math
import multiprocessing
from pathlib import Path
import shutil
from typing import Iterable, List, Optional
import pandas as pd
import geofileops as gfo
from geofileops import GeofileType, GeometryType, PrimitiveType
from geofileops import fileops
from geofileops.fileops import _append_to_nolock
from . import _io_util
from . import _ogr_util
from . import _sqlite_util
from . import _general_util
################################################################################
# Some init
################################################################################
logger = logging.getLogger(__name__)
################################################################################
# Operations on one layer
################################################################################
def buffer(
input_path: Path,
output_path: Path,
distance: float,
quadrantsegments: int = 5,
input_layer: Optional[str] = None,
output_layer: Optional[str] = None,
columns: Optional[List[str]] = None,
explodecollections: bool = False,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
if distance < 0:
# For a double sided buffer, aA negative buffer is only relevant
# for polygon types, so only keep polygon results
# Negative buffer creates invalid stuff, so use collectionextract
# to keep only polygons
sql_template = f"""
SELECT ST_CollectionExtract(
ST_buffer({{geometrycolumn}}, {distance}, {quadrantsegments}),
3) AS geom
{{columns_to_select_str}}
FROM "{{input_layer}}" layer
WHERE 1=1
{{batch_filter}}"""
else:
sql_template = f"""
SELECT ST_Buffer({{geometrycolumn}}, {distance}, {quadrantsegments}) AS geom
{{columns_to_select_str}}
FROM "{{input_layer}}" layer
WHERE 1=1
{{batch_filter}}"""
# Buffer operation always results in polygons...
force_output_geometrytype = GeometryType.MULTIPOLYGON
return _single_layer_vector_operation(
input_path=input_path,
output_path=output_path,
sql_template=sql_template,
operation_name="buffer",
input_layer=input_layer,
output_layer=output_layer,
columns=columns,
explodecollections=explodecollections,
force_output_geometrytype=force_output_geometrytype,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def convexhull(
input_path: Path,
output_path: Path,
input_layer: Optional[str] = None,
output_layer: Optional[str] = None,
columns: Optional[List[str]] = None,
explodecollections: bool = False,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
# Prepare sql template for this operation
sql_template = """
SELECT ST_ConvexHull({geometrycolumn}) AS geom
{columns_to_select_str}
FROM "{input_layer}" layer
WHERE 1=1
{batch_filter}"""
# Output geometry type same as input geometry type
input_layer_info = gfo.get_layerinfo(input_path, input_layer)
return _single_layer_vector_operation(
input_path=input_path,
output_path=output_path,
sql_template=sql_template,
operation_name="convexhull",
input_layer=input_layer,
output_layer=output_layer,
columns=columns,
explodecollections=explodecollections,
force_output_geometrytype=input_layer_info.geometrytype,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def delete_duplicate_geometries(
input_path: Path,
output_path: Path,
input_layer: Optional[str] = None,
output_layer: Optional[str] = None,
columns: Optional[List[str]] = None,
explodecollections: bool = False,
force: bool = False,
):
# The query as written doesn't give correct results when parallellized,
# but it isn't useful to do it for this operation.
sql_template = """
SELECT {geometrycolumn} AS geom
{columns_to_select_str}
FROM "{input_layer}" layer
WHERE layer.rowid IN (
SELECT MIN(layer_sub.rowid) AS rowid_to_keep
FROM "{input_layer}" layer_sub
GROUP BY layer_sub.{geometrycolumn}
)
"""
# Go!
input_layer_info = gfo.get_layerinfo(input_path, input_layer)
return _single_layer_vector_operation(
input_path=input_path,
output_path=output_path,
sql_template=sql_template,
operation_name="delete_duplicate_geometries",
input_layer=input_layer,
output_layer=output_layer,
columns=columns,
explodecollections=explodecollections,
force_output_geometrytype=input_layer_info.geometrytype,
filter_null_geoms=True,
nb_parallel=1,
force=force,
)
def isvalid(
input_path: Path,
output_path: Path,
input_layer: Optional[str] = None,
output_layer: Optional[str] = None,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
) -> bool:
# Prepare sql template for this operation
sql_template = """
SELECT ST_IsValidDetail({geometrycolumn}) AS geom
,ST_IsValid({geometrycolumn}) AS isvalid
,ST_IsValidReason({geometrycolumn}) AS isvalidreason
{columns_to_select_str}
FROM "{input_layer}" layer
WHERE ST_IsValid({geometrycolumn}) <> 1
{batch_filter}"""
_single_layer_vector_operation(
input_path=input_path,
output_path=output_path,
sql_template=sql_template,
operation_name="isvalid",
input_layer=input_layer,
output_layer=output_layer,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
# If no invalid geoms are found, there won't be an output file
if not output_path.exists():
# If output is a geopackage, check if all data can be read
try:
input_geofiletype = GeofileType(input_path)
if input_geofiletype.is_spatialite_based:
_sqlite_util.test_data_integrity(path=input_path)
logger.debug("test_data_integrity was succesfull")
except Exception:
logger.exception(
"No invalid geometries found, but some attributes could not be read"
)
return False
return True
else:
layerinfo = gfo.get_layerinfo(output_path)
logger.info(
f"Found {layerinfo.featurecount} invalid geometries in {output_path}"
)
return False
def makevalid(
input_path: Path,
output_path: Path,
input_layer: Optional[str] = None,
output_layer: Optional[str] = None,
columns: Optional[List[str]] = None,
explodecollections: bool = False,
force_output_geometrytype: Optional[GeometryType] = None,
precision: Optional[float] = None,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
# Specify output_geomatrytype, because otherwise makevalid results in
# column type 'GEOMETRY'/'UNKNOWN(ANY)'
layerinfo = gfo.get_layerinfo(input_path, input_layer)
if force_output_geometrytype is None:
force_output_geometrytype = layerinfo.geometrytype
# First compose the operation to be done on the geometries
# If the number of decimals of coordinates should be limited
if precision is not None:
operation = f"SnapToGrid({{geometrycolumn}}, {precision})"
else:
operation = "{geometrycolumn}"
# Prepare sql template for this operation
operation = f"ST_MakeValid({operation})"
# If we want a specific geometrytype as result, extract it
if force_output_geometrytype is not GeometryType.GEOMETRYCOLLECTION:
primitivetypeid = force_output_geometrytype.to_primitivetype.value
operation = f"ST_CollectionExtract({operation}, {primitivetypeid})"
# Now we can prepare the entire statement
sql_template = f"""
SELECT {operation} AS geom
{{columns_to_select_str}}
FROM "{{input_layer}}" layer
WHERE 1=1
{{batch_filter}}"""
_single_layer_vector_operation(
input_path=input_path,
output_path=output_path,
sql_template=sql_template,
operation_name="makevalid",
input_layer=input_layer,
output_layer=output_layer,
columns=columns,
explodecollections=explodecollections,
force_output_geometrytype=force_output_geometrytype,
# filter_null_geoms=False,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
# If output is a geopackage, check if all data can be read
output_geofiletype = GeofileType(input_path)
if output_geofiletype.is_spatialite_based:
_sqlite_util.test_data_integrity(path=input_path)
def select(
input_path: Path,
output_path: Path,
sql_stmt: str,
sql_dialect: str = "SQLITE",
input_layer: Optional[str] = None,
output_layer: Optional[str] = None,
columns: Optional[List[str]] = None,
explodecollections: bool = False,
force_output_geometrytype: Optional[GeometryType] = None,
nb_parallel: int = 1,
batchsize: int = -1,
force: bool = False,
):
# Check if output exists already here, to evade to much logging to be written
if output_path.exists():
if force is False:
logger.info(f"Stop select: output exists already {output_path}")
return
logger.debug(f" -> select to execute:\n{sql_stmt}")
# If no output geometrytype is specified, use the geometrytype of the input layer
if force_output_geometrytype is None:
force_output_geometrytype = gfo.get_layerinfo(
input_path, input_layer
).geometrytype
logger.info(
"No force_output_geometrytype specified, so defaults to input layer "
f"geometrytype: {force_output_geometrytype}"
)
# Go!
return _single_layer_vector_operation(
input_path=input_path,
output_path=output_path,
sql_template=sql_stmt,
operation_name="select",
input_layer=input_layer,
output_layer=output_layer,
columns=columns,
explodecollections=explodecollections,
force_output_geometrytype=force_output_geometrytype,
filter_null_geoms=False,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def simplify(
input_path: Path,
output_path: Path,
tolerance: float,
input_layer: Optional[str] = None,
output_layer: Optional[str] = None,
columns: Optional[List[str]] = None,
explodecollections: bool = False,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
# Prepare sql template for this operation
sql_template = f"""
SELECT ST_SimplifyPreserveTopology({{geometrycolumn}}, {tolerance}) AS geom
{{columns_to_select_str}}
FROM "{{input_layer}}" layer
WHERE 1=1
{{batch_filter}}"""
# Output geometry type same as input geometry type
input_layer_info = gfo.get_layerinfo(input_path, input_layer)
return _single_layer_vector_operation(
input_path=input_path,
output_path=output_path,
sql_template=sql_template,
operation_name="simplify",
input_layer=input_layer,
output_layer=output_layer,
columns=columns,
explodecollections=explodecollections,
force_output_geometrytype=input_layer_info.geometrytype,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def _single_layer_vector_operation(
input_path: Path,
output_path: Path,
sql_template: str,
operation_name: str,
input_layer: Optional[str] = None,
output_layer: Optional[str] = None,
columns: Optional[List[str]] = None,
explodecollections: bool = False,
force_output_geometrytype: Optional[GeometryType] = None,
filter_null_geoms: bool = True,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
# Init
start_time = datetime.now()
# Check input parameters...
if not input_path.exists():
raise Exception(
f"Error {operation_name}: input_path doesn't exist: {input_path}"
)
# Check/get layer names
if input_layer is None:
input_layer = gfo.get_only_layer(input_path)
if output_layer is None:
output_layer = gfo.get_default_layer(output_path)
# If output file exists already, either clean up or return...
if output_path.exists():
if force is False:
logger.info(f"Stop {operation_name}: output exists already {output_path}")
return
else:
gfo.remove(output_path)
# Get layer info of the input layer
input_layerinfo = gfo.get_layerinfo(input_path, input_layer)
# Calculate
tempdir = _io_util.create_tempdir(f"geofileops/{operation_name.replace(' ', '_')}")
try:
processing_params = _prepare_processing_params(
input1_path=input_path,
input1_layer=input_layer,
input1_layer_alias="layer",
tempdir=tempdir,
nb_parallel=nb_parallel,
batchsize=batchsize,
convert_to_spatialite_based=False,
)
# If None is returned, just stop.
if processing_params is None or processing_params.batches is None:
return
# If there are multiple batches, there needs to be a {batch_filter}
# placeholder in the sql template!
if len(processing_params.batches) > 1:
if "{batch_filter}" not in sql_template:
raise ValueError(
"Error: nb_batches > 1 but no {batch_filter} "
f"placeholder in sql_template\n{sql_template}"
)
# Format column string for use in select
formatted_column_strings = format_column_strings(
columns_specified=columns, columns_available=input_layerinfo.columns
)
# Prepare output filename
tmp_output_path = tempdir / output_path.name
nb_done = 0
# Processing in threads is 2x faster for small datasets (on Windows)
calculate_in_threads = True if input_layerinfo.featurecount <= 100 else False
with _general_util.PooledExecutorFactory(
threadpool=calculate_in_threads,
max_workers=processing_params.nb_parallel,
initializer=_general_util.initialize_worker(),
) as calculate_pool:
batches = {}
future_to_batch_id = {}
for batch_id in processing_params.batches:
batches[batch_id] = {}
batches[batch_id]["layer"] = output_layer
tmp_partial_output_path = (
tempdir / f"{output_path.stem}_{batch_id}{output_path.suffix}"
)
batches[batch_id]["tmp_partial_output_path"] = tmp_partial_output_path
# Now we have everything to format sql statement
sql_stmt = sql_template.format(
geometrycolumn=input_layerinfo.geometrycolumn,
columns_to_select_str=formatted_column_strings.columns,
input_layer=processing_params.batches[batch_id]["layer"],
batch_filter=processing_params.batches[batch_id]["batch_filter"],
)
# Make sure no NULL geoms are outputted...
if filter_null_geoms is True:
sql_stmt = f"""
SELECT sub.*
FROM
( {sql_stmt}
) sub
WHERE sub.geom IS NOT NULL"""
batches[batch_id]["sql_stmt"] = sql_stmt
# Remark: this temp file doesn't need spatial index
translate_info = _ogr_util.VectorTranslateInfo(
input_path=processing_params.batches[batch_id]["path"],
output_path=tmp_partial_output_path,
output_layer=output_layer,
sql_stmt=sql_stmt,
sql_dialect="SQLITE",
explodecollections=explodecollections,
force_output_geometrytype=force_output_geometrytype,
options={"LAYER_CREATION.SPATIAL_INDEX": False},
)
future = calculate_pool.submit(
_ogr_util.vector_translate_by_info, info=translate_info
)
future_to_batch_id[future] = batch_id
# Loop till all parallel processes are ready, but process each one
# that is ready already
# Calculating can be done in parallel, but only one process can write to
# the same file at the time
for future in futures.as_completed(future_to_batch_id):
try:
_ = future.result()
except Exception as ex:
batch_id = future_to_batch_id[future]
logger.exception(f"Error executing {batches[batch_id]}")
raise Exception(f"Error executing {batches[batch_id]}") from ex
# Start copy of the result to a common file
# Remark: give higher priority, because this is the slowest factor
batch_id = future_to_batch_id[future]
tmp_partial_output_path = batches[batch_id]["tmp_partial_output_path"]
if tmp_partial_output_path.exists():
gfo.append_to(
src=tmp_partial_output_path,
dst=tmp_output_path,
dst_layer=output_layer,
create_spatial_index=False,
)
gfo.remove(tmp_partial_output_path)
else:
logger.debug(f"Result file {tmp_partial_output_path} was empty")
# Log the progress and prediction speed
nb_done += 1
_general_util.report_progress(
start_time,
nb_done,
len(batches),
operation_name,
nb_parallel=nb_parallel,
)
# Round up and clean up
# Now create spatial index and move to output location
if tmp_output_path.exists():
gfo.create_spatial_index(path=tmp_output_path, layer=output_layer)
output_path.parent.mkdir(parents=True, exist_ok=True)
gfo.move(tmp_output_path, output_path)
else:
logger.debug(f"Result of {operation_name} was empty!")
finally:
# Clean tmp dir
shutil.rmtree(tempdir)
logger.info(f"Processing ready, took {datetime.now()-start_time}!")
################################################################################
# Operations on two layers
################################################################################
def clip(
input_path: Path,
clip_path: Path,
output_path: Path,
input_layer: Optional[str] = None,
input_columns: Optional[List[str]] = None,
input_columns_prefix: str = "",
clip_layer: Optional[str] = None,
output_layer: Optional[str] = None,
explodecollections: bool = False,
output_with_spatial_index: bool = True,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
# Init
# In the query, important to only extract the geometry types that are expected
input_layer_info = gfo.get_layerinfo(input_path, input_layer)
primitivetypeid = input_layer_info.geometrytype.to_primitivetype.value
# If the input type is not point, force the output type to multi,
# because erase clip cause eg. polygons to be split to multipolygons...
force_output_geometrytype = input_layer_info.geometrytype
if force_output_geometrytype is not GeometryType.POINT:
force_output_geometrytype = input_layer_info.geometrytype.to_multitype
# Prepare sql template for this operation
# Remarks:
# - ST_intersection(geometry , NULL) gives NULL as result! -> hence the CASE
# - use of the with instead of an inline view is a lot faster
# - WHERE geom IS NOT NULL to evade rows with a NULL geom, they give issues in later operations
sql_template = f"""
SELECT * FROM (
WITH layer2_unioned AS (
SELECT layer1.rowid AS layer1_rowid
,ST_union(layer2.{{input2_geometrycolumn}}) AS geom
FROM {{input1_databasename}}."{{input1_layer}}" layer1
JOIN {{input1_databasename}}."rtree_{{input1_layer}}_{{input1_geometrycolumn}}" layer1tree ON layer1.fid = layer1tree.id
JOIN {{input2_databasename}}."{{input2_layer}}" layer2
JOIN {{input2_databasename}}."rtree_{{input2_layer}}_{{input2_geometrycolumn}}" layer2tree ON layer2.fid = layer2tree.id
WHERE 1=1
{{batch_filter}}
AND layer1tree.minx <= layer2tree.maxx AND layer1tree.maxx >= layer2tree.minx
AND layer1tree.miny <= layer2tree.maxy AND layer1tree.maxy >= layer2tree.miny
AND ST_Intersects(layer1.{{input1_geometrycolumn}}, layer2.{{input2_geometrycolumn}}) = 1
AND ST_Touches(layer1.{{input1_geometrycolumn}}, layer2.{{input2_geometrycolumn}}) = 0
GROUP BY layer1.rowid
)
SELECT CASE WHEN layer2_unioned.geom IS NULL THEN NULL
ELSE ST_CollectionExtract(
ST_intersection(layer1.{{input1_geometrycolumn}},
layer2_unioned.geom), {primitivetypeid})
END as geom
{{layer1_columns_prefix_alias_str}}
FROM {{input1_databasename}}."{{input1_layer}}" layer1
JOIN layer2_unioned ON layer1.rowid = layer2_unioned.layer1_rowid
WHERE 1=1
{{batch_filter}}
)
WHERE geom IS NOT NULL
AND ST_NPoints(geom) > 0
-- ST_CollectionExtract outputs empty, but not NULL geoms in spatialite 4.3
"""
# Go!
return _two_layer_vector_operation(
input1_path=input_path,
input2_path=clip_path,
output_path=output_path,
sql_template=sql_template,
operation_name="clip",
input1_layer=input_layer,
input1_columns=input_columns,
input1_columns_prefix=input_columns_prefix,
input2_layer=clip_layer,
input2_columns=None,
output_layer=output_layer,
explodecollections=explodecollections,
force_output_geometrytype=force_output_geometrytype,
output_with_spatial_index=output_with_spatial_index,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def erase(
input_path: Path,
erase_path: Path,
output_path: Path,
input_layer: Optional[str] = None,
input_columns: Optional[List[str]] = None,
input_columns_prefix: str = "",
erase_layer: Optional[str] = None,
output_layer: Optional[str] = None,
explodecollections: bool = False,
output_with_spatial_index: bool = True,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
# Init
# In the query, important to only extract the geometry types that are expected
input_layer_info = gfo.get_layerinfo(input_path, input_layer)
primitivetypeid = input_layer_info.geometrytype.to_primitivetype.value
# If the input type is not point, force the output type to multi,
# because erase can cause eg. polygons to be split to multipolygons...
force_output_geometrytype = input_layer_info.geometrytype
if force_output_geometrytype is not GeometryType.POINT:
force_output_geometrytype = input_layer_info.geometrytype.to_multitype
# Prepare sql template for this operation
# Remarks:
# - ST_difference(geometry , NULL) gives NULL as result! -> hence the CASE
# - use of the with instead of an inline view is a lot faster
# - WHERE geom IS NOT NULL to evade rows with a NULL geom, they give issues in later operations
sql_template = f"""
SELECT * FROM (
WITH layer2_unioned AS (
SELECT layer1.rowid AS layer1_rowid
,ST_union(layer2.{{input2_geometrycolumn}}) AS geom
FROM {{input1_databasename}}."{{input1_layer}}" layer1
JOIN {{input1_databasename}}."rtree_{{input1_layer}}_{{input1_geometrycolumn}}" layer1tree ON layer1.fid = layer1tree.id
JOIN {{input2_databasename}}."{{input2_layer}}" layer2
JOIN {{input2_databasename}}."rtree_{{input2_layer}}_{{input2_geometrycolumn}}" layer2tree ON layer2.fid = layer2tree.id
WHERE 1=1
{{batch_filter}}
AND layer1tree.minx <= layer2tree.maxx AND layer1tree.maxx >= layer2tree.minx
AND layer1tree.miny <= layer2tree.maxy AND layer1tree.maxy >= layer2tree.miny
AND ST_Intersects(layer1.{{input1_geometrycolumn}}, layer2.{{input2_geometrycolumn}}) = 1
AND ST_Touches(layer1.{{input1_geometrycolumn}}, layer2.{{input2_geometrycolumn}}) = 0
GROUP BY layer1.rowid
)
SELECT CASE WHEN layer2_unioned.geom IS NULL THEN layer1.{{input1_geometrycolumn}}
ELSE ST_CollectionExtract(ST_difference(layer1.{{input1_geometrycolumn}}, layer2_unioned.geom), {primitivetypeid})
END as geom
{{layer1_columns_prefix_alias_str}}
FROM {{input1_databasename}}."{{input1_layer}}" layer1
LEFT JOIN layer2_unioned ON layer1.rowid = layer2_unioned.layer1_rowid
WHERE 1=1
{{batch_filter}}
)
WHERE geom IS NOT NULL
AND ST_NPoints(geom) > 0
-- ST_CollectionExtract outputs empty, but not NULL geoms in spatialite 4.3
"""
# Go!
return _two_layer_vector_operation(
input1_path=input_path,
input2_path=erase_path,
output_path=output_path,
sql_template=sql_template,
operation_name="erase",
input1_layer=input_layer,
input1_columns=input_columns,
input1_columns_prefix=input_columns_prefix,
input2_layer=erase_layer,
output_layer=output_layer,
explodecollections=explodecollections,
force_output_geometrytype=force_output_geometrytype,
output_with_spatial_index=output_with_spatial_index,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def export_by_location(
input_to_select_from_path: Path,
input_to_compare_with_path: Path,
output_path: Path,
min_area_intersect: Optional[float] = None,
area_inters_column_name: Optional[str] = "area_inters",
input1_layer: Optional[str] = None,
input1_columns: Optional[List[str]] = None,
input2_layer: Optional[str] = None,
input2_columns: Optional[List[str]] = None,
output_layer: Optional[str] = None,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
# Prepare sql template for this operation
# TODO: test performance difference between the following two queries
sql_template = """
SELECT layer1.{{input1_geometrycolumn}} AS geom
{{layer1_columns_prefix_alias_str}}
FROM {{input1_databasename}}."{{input1_layer}}" layer1
JOIN {{input1_databasename}}."rtree_{{input1_layer}}_{{input1_geometrycolumn}}" layer1tree
ON layer1.fid = layer1tree.id
WHERE 1=1
{{batch_filter}}
AND EXISTS (
SELECT 1
FROM {{input2_databasename}}."{{input2_layer}}" layer2
JOIN {{input2_databasename}}."rtree_{{input2_layer}}_{{input2_geometrycolumn}}" layer2tree
ON layer2.fid = layer2tree.id
WHERE layer1tree.minx <= layer2tree.maxx
AND layer1tree.maxx >= layer2tree.minx
AND layer1tree.miny <= layer2tree.maxy
AND layer1tree.maxy >= layer2tree.miny
AND ST_intersects(layer1.{{input1_geometrycolumn}},
layer2.{{input2_geometrycolumn}}) = 1
AND ST_touches(layer1.{{input1_geometrycolumn}},
layer2.{{input2_geometrycolumn}}) = 0)
"""
# Calculate intersect area if necessary
area_inters_column_expression = ""
if area_inters_column_name is not None or min_area_intersect is not None:
if area_inters_column_name is None:
area_inters_column_name = "area_inters"
area_inters_column_expression = f""",ST_area(ST_intersection(ST_union(layer1.{{input1_geometrycolumn}}),
ST_union(layer2.{{input2_geometrycolumn}}))
) as {area_inters_column_name}"""
# Prepare sql template for this operation
sql_template = f"""
SELECT ST_union(layer1.{{input1_geometrycolumn}}) as geom
{{layer1_columns_prefix_str}}
{area_inters_column_expression}
FROM {{input1_databasename}}."{{input1_layer}}" layer1
JOIN {{input1_databasename}}."rtree_{{input1_layer}}_{{input1_geometrycolumn}}" layer1tree
ON layer1.fid = layer1tree.id
JOIN {{input2_databasename}}."{{input2_layer}}" layer2
JOIN {{input2_databasename}}."rtree_{{input2_layer}}_{{input2_geometrycolumn}}" layer2tree
ON layer2.fid = layer2tree.id
WHERE 1=1
{{batch_filter}}
AND layer1tree.minx <= layer2tree.maxx
AND layer1tree.maxx >= layer2tree.minx
AND layer1tree.miny <= layer2tree.maxy
AND layer1tree.maxy >= layer2tree.miny
AND ST_Intersects(layer1.{{input1_geometrycolumn}},
layer2.{{input2_geometrycolumn}}) = 1
AND ST_Touches(layer1.{{input1_geometrycolumn}},
layer2.{{input2_geometrycolumn}}) = 0
GROUP BY layer1.rowid {{layer1_columns_prefix_str}}
"""
# Filter on intersect area if necessary
if min_area_intersect is not None:
sql_template = f"""
SELECT sub.*
FROM
( {sql_template}
) sub
WHERE sub.{area_inters_column_name} >= {min_area_intersect}"""
# Go!
input_layer_info = gfo.get_layerinfo(input_to_select_from_path, input1_layer)
return _two_layer_vector_operation(
input1_path=input_to_select_from_path,
input2_path=input_to_compare_with_path,
output_path=output_path,
sql_template=sql_template,
operation_name="export_by_location",
input1_layer=input1_layer,
input1_columns=input1_columns,
input2_layer=input2_layer,
input2_columns=input2_columns,
output_layer=output_layer,
force_output_geometrytype=input_layer_info.geometrytype,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def export_by_distance(
input_to_select_from_path: Path,
input_to_compare_with_path: Path,
output_path: Path,
max_distance: float,
input1_layer: Optional[str] = None,
input1_columns: Optional[List[str]] = None,
input2_layer: Optional[str] = None,
output_layer: Optional[str] = None,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
# Prepare sql template for this operation
sql_template = f"""
SELECT geom
{{layer1_columns_prefix_alias_str}}
FROM {{input1_databasename}}."{{input1_layer}}" layer1
JOIN {{input1_databasename}}."rtree_{{input1_layer}}_{{input1_geometrycolumn}}" layer1tree ON layer1.fid = layer1tree.id
WHERE 1=1
{{batch_filter}}
AND EXISTS (
SELECT 1
FROM {{input2_databasename}}."{{input2_layer}}" layer2
JOIN {{input2_databasename}}."rtree_{{input2_layer}}_{{input2_geometrycolumn}}" layer2tree ON layer2.fid = layer2tree.id
WHERE (layer1tree.minx-{max_distance}) <= layer2tree.maxx
AND (layer1tree.maxx+{max_distance}) >= layer2tree.minx
AND (layer1tree.miny-{max_distance}) <= layer2tree.maxy
AND (layer1tree.maxy+{max_distance}) >= layer2tree.miny
AND ST_distance(layer1.{{input1_geometrycolumn}}, layer2.{{input2_geometrycolumn}}) <= {max_distance})"""
input_layer_info = gfo.get_layerinfo(input_to_select_from_path, input1_layer)
# Go!
return _two_layer_vector_operation(
input1_path=input_to_select_from_path,
input2_path=input_to_compare_with_path,
output_path=output_path,
sql_template=sql_template,
operation_name="export_by_distance",
input1_layer=input1_layer,
input1_columns=input1_columns,
input2_layer=input2_layer,
output_layer=output_layer,
force_output_geometrytype=input_layer_info.geometrytype,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def intersection(
input1_path: Path,
input2_path: Path,
output_path: Path,
input1_layer: Optional[str] = None,
input1_columns: Optional[List[str]] = None,
input1_columns_prefix: str = "l1_",
input2_layer: Optional[str] = None,
input2_columns: Optional[List[str]] = None,
input2_columns_prefix: str = "l2_",
output_layer: Optional[str] = None,
explodecollections: bool = False,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
# In the query, important to only extract the geometry types that are expected
# TODO: test for geometrycollection, line, point,...
input1_layer_info = gfo.get_layerinfo(input1_path, input1_layer)
input2_layer_info = gfo.get_layerinfo(input2_path, input2_layer)
primitivetype_to_extract = PrimitiveType(
min(
input1_layer_info.geometrytype.to_primitivetype.value,
input2_layer_info.geometrytype.to_primitivetype.value,
)
)
# For the output file, if output is going to be polygon or linestring, force
# MULTI variant to evade ugly warnings
force_output_geometrytype = primitivetype_to_extract.to_multitype
# Prepare sql template for this operation
sql_template = f"""
SELECT sub.geom
{{layer1_columns_from_subselect_str}}
{{layer2_columns_from_subselect_str}}
FROM
( SELECT ST_CollectionExtract(
ST_Intersection(layer1.{{input1_geometrycolumn}}, layer2.{{input2_geometrycolumn}}),
{primitivetype_to_extract.value}) as geom
{{layer1_columns_prefix_alias_str}}
{{layer2_columns_prefix_alias_str}}
FROM {{input1_databasename}}."{{input1_layer}}" layer1
JOIN {{input1_databasename}}."rtree_{{input1_layer}}_{{input1_geometrycolumn}}" layer1tree ON layer1.fid = layer1tree.id
JOIN {{input2_databasename}}."{{input2_layer}}" layer2
JOIN {{input2_databasename}}."rtree_{{input2_layer}}_{{input2_geometrycolumn}}" layer2tree ON layer2.fid = layer2tree.id
WHERE 1=1
{{batch_filter}}
AND layer1tree.minx <= layer2tree.maxx AND layer1tree.maxx >= layer2tree.minx
AND layer1tree.miny <= layer2tree.maxy AND layer1tree.maxy >= layer2tree.miny
AND ST_Intersects(layer1.{{input1_geometrycolumn}}, layer2.{{input2_geometrycolumn}}) = 1
AND ST_Touches(layer1.{{input1_geometrycolumn}}, layer2.{{input2_geometrycolumn}}) = 0
) sub
WHERE sub.geom IS NOT NULL
"""
# Go!
return _two_layer_vector_operation(
input1_path=input1_path,
input2_path=input2_path,
output_path=output_path,
sql_template=sql_template,
operation_name="intersection",
input1_layer=input1_layer,
input1_columns=input1_columns,
input1_columns_prefix=input1_columns_prefix,
input2_layer=input2_layer,
input2_columns=input2_columns,
input2_columns_prefix=input2_columns_prefix,
output_layer=output_layer,
explodecollections=explodecollections,
force_output_geometrytype=force_output_geometrytype,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def join_by_location(
input1_path: Path,
input2_path: Path,
output_path: Path,
spatial_relations_query: str = "intersects is True",
discard_nonmatching: bool = True,
min_area_intersect: Optional[float] = None,
area_inters_column_name: Optional[str] = None,
input1_layer: Optional[str] = None,
input1_columns: Optional[List[str]] = None,
input1_columns_prefix: str = "l1_",
input2_layer: Optional[str] = None,
input2_columns: Optional[List[str]] = None,
input2_columns_prefix: str = "l2_",
output_layer: Optional[str] = None,
explodecollections: bool = False,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
# Prepare sql template for this operation
# Prepare intersection area columns/filter
area_inters_column_expression = ""
area_inters_column_in_output = ""
area_inters_column_0_in_output = ""
area_inters_filter = ""
if area_inters_column_name is not None or min_area_intersect is not None:
if area_inters_column_name is not None:
area_inters_column_name_touse = area_inters_column_name
area_inters_column_in_output = f',"{area_inters_column_name}"'
area_inters_column_0_in_output = f',0 AS "{area_inters_column_name}"'
else:
area_inters_column_name_touse = "area_inters"
area_inters_column_expression = f',ST_area(ST_intersection(sub_filter.geom, sub_filter.l2_geom)) as "{area_inters_column_name_touse}"'
if min_area_intersect is not None:
area_inters_filter = f'WHERE sub_area."{area_inters_column_name_touse}" >= {min_area_intersect}'
# Prepare spatial relations filter
if spatial_relations_query != "intersects is True":
# joining should only be possible on features that at least have an
# interaction! So, add "intersects is True" to query to evade errors!
spatial_relations_query = f"({spatial_relations_query}) and intersects is True"
spatial_relations_filter = _prepare_spatial_relations_filter(
spatial_relations_query
)
# Prepare sql template
#
# Remark: use "LIMIT -1 OFFSET 0" to evade that the sqlite query optimizer
# "flattens" the subquery, as that makes checking the spatial
# relations (using ST_RelateMatch) very slow!
sql_template = f"""
WITH layer1_relations_filtered AS (
SELECT sub_area.*
FROM (
SELECT sub_filter.*
{area_inters_column_expression}
FROM (
SELECT layer1.{{input1_geometrycolumn}} as geom
,layer1.fid l1_fid
,layer2.{{input2_geometrycolumn}} as l2_geom
{{layer1_columns_prefix_alias_str}}
{{layer2_columns_prefix_alias_str}}
,ST_relate(layer1.{{input1_geometrycolumn}},
layer2.{{input2_geometrycolumn}}
) as spatial_relation
FROM {{input1_databasename}}."{{input1_layer}}" layer1
JOIN {{input1_databasename}}."rtree_{{input1_layer}}_{{input1_geometrycolumn}}" layer1tree ON layer1.fid = layer1tree.id
JOIN {{input2_databasename}}."{{input2_layer}}" layer2
JOIN {{input2_databasename}}."rtree_{{input2_layer}}_{{input2_geometrycolumn}}" layer2tree ON layer2.fid = layer2tree.id
WHERE 1=1
{{batch_filter}}
AND layer1tree.minx <= layer2tree.maxx
AND layer1tree.maxx >= layer2tree.minx
AND layer1tree.miny <= layer2tree.maxy
AND layer1tree.maxy >= layer2tree.miny
LIMIT -1 OFFSET 0
) sub_filter
WHERE {spatial_relations_filter.format(spatial_relation="sub_filter.spatial_relation")}
LIMIT -1 OFFSET 0
) sub_area
{area_inters_filter}
)
SELECT sub.geom
{{layer1_columns_from_subselect_str}}
{{layer2_columns_from_subselect_str}}
,sub.spatial_relation
{area_inters_column_in_output}
FROM layer1_relations_filtered sub """
# If a left join is asked, add all features from layer1 that weren't
# matched.
if discard_nonmatching is False:
sql_template = f"""
{sql_template}
UNION ALL
SELECT layer1.{{input1_geometrycolumn}} as geom
{{layer1_columns_prefix_alias_str}}
{{layer2_columns_prefix_alias_null_str}}
,NULL as spatial_relation
{area_inters_column_0_in_output}
FROM {{input1_databasename}}."{{input1_layer}}" layer1
WHERE 1=1
{{batch_filter}}
AND layer1.fid NOT IN (
SELECT l1_fid FROM layer1_relations_filtered) """
# Go!
input1_layer_info = gfo.get_layerinfo(input1_path, input1_layer)
return _two_layer_vector_operation(
input1_path=input1_path,
input2_path=input2_path,
output_path=output_path,
sql_template=sql_template,
operation_name="join_by_location",
input1_layer=input1_layer,
input1_columns=input1_columns,
input1_columns_prefix=input1_columns_prefix,
input2_layer=input2_layer,
input2_columns=input2_columns,
input2_columns_prefix=input2_columns_prefix,
output_layer=output_layer,
force_output_geometrytype=input1_layer_info.geometrytype,
explodecollections=explodecollections,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def _prepare_spatial_relations_filter(query: str) -> str:
named_spatial_relations = {
# "disjoint": ["FF*FF****"],
"equals": ["TFFF*FFF*"],
"touches": ["FT*******", "F**T*****", "F***T****"],
"within": ["T*F**F***"],
"overlaps": ["T*T***T**", "1*T***T**"],
"crosses": ["T*T******", "T*****T**", "0********"],
"intersects": ["T********", "*T*******", "***T*****", "****T****"],
"contains": ["T*****FF*"],
"covers": ["T*****FF*", "*T****FF*", "***T**FF*", "****T*FF*"],
"coveredby": ["T*F**F***", "*TF**F***", "**FT*F***", "**F*TF***"],
}
# Parse query and replace things that need to be replaced
import re
query_tokens = re.split("([ =()])", query)
query_tokens_prepared = []
nb_unclosed_brackets = 0
for token in query_tokens:
if token == "":
continue
elif token in [" ", "\n", "\t", "and", "or"]:
query_tokens_prepared.append(token)
elif token == "(":
nb_unclosed_brackets += 1
query_tokens_prepared.append(token)
elif token == ")":
nb_unclosed_brackets -= 1
query_tokens_prepared.append(token)
elif token == "is":
query_tokens_prepared.append("=")
elif token == "True":
query_tokens_prepared.append("1")
elif token == "False":
query_tokens_prepared.append("0")
elif token in named_spatial_relations:
match_list = []
for spatial_relation in named_spatial_relations[token]:
match = (
f"ST_RelateMatch({{spatial_relation}}, '{spatial_relation}') = 1"
)
match_list.append(match)
query_tokens_prepared.append(f"({' or '.join(match_list)})")
elif len(token) == 9 and re.fullmatch("^[FT012*]+$", token) is not None:
token_prepared = f"ST_RelateMatch({{spatial_relation}}, '{token}')"
query_tokens_prepared.append(token_prepared)
else:
raise ValueError(
f"Unexpected token in query (query is case sensitive!): {token}"
)
# If there are unclosed brackets, raise
if nb_unclosed_brackets > 0:
raise ValueError(f"not all brackets are closed in query {query}")
elif nb_unclosed_brackets < 0:
raise ValueError(f"more closing brackets than opening ones in query {query}")
result = f"({''.join(query_tokens_prepared)})"
return result
def join_nearest(
input1_path: Path,
input2_path: Path,
output_path: Path,
nb_nearest: int,
input1_layer: Optional[str] = None,
input1_columns: Optional[List[str]] = None,
input1_columns_prefix: str = "l1_",
input2_layer: Optional[str] = None,
input2_columns: Optional[List[str]] = None,
input2_columns_prefix: str = "l2_",
output_layer: Optional[str] = None,
explodecollections: bool = False,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
# Init some things...
# Because there is preprocessing done in this function, check output path
# here already
if output_path.exists() and force is False:
logger.info(f"Stop join_nearest: output exists already {output_path}")
return
if input1_layer is None:
input1_layer = gfo.get_only_layer(input1_path)
if input2_layer is None:
input2_layer = gfo.get_only_layer(input2_path)
# Prepare input files
# To use knn index, the input layers need to be in sqlite file format
# (not a .gpkg!), so prepare this
if input1_path == input2_path and GeofileType(input1_path) == GeofileType.SQLite:
# Input files already ok...
input1_tmp_path = input1_path
input1_tmp_layer = input1_layer
input2_tmp_path = input2_path
input2_tmp_layer = input2_layer
else:
# Put input2 layer in sqlite gfo...
tempdir = _io_util.create_tempdir("geofileops/join_nearest")
input1_tmp_path = tempdir / "both_input_layers.sqlite"
input1_tmp_layer = "input1_layer"
gfo.convert(
src=input1_path,
src_layer=input1_layer,
dst=input1_tmp_path,
dst_layer=input1_tmp_layer,
)
# Add input2 layer to sqlite gfo...
input2_tmp_path = input1_tmp_path
input2_tmp_layer = "input2_layer"
gfo.append_to(
src=input2_path,
src_layer=input2_layer,
dst=input2_tmp_path,
dst_layer=input2_tmp_layer,
)
# Remark: the 2 input layers need to be in one file!
sql_template = f"""
SELECT layer1.{{input1_geometrycolumn}} as geom
{{layer1_columns_prefix_alias_str}}
{{layer2_columns_prefix_alias_str}}
,k.pos, k.distance
FROM {{input1_databasename}}."{{input1_layer}}" layer1
JOIN {{input2_databasename}}.knn k
JOIN {{input2_databasename}}."{{input2_layer}}" layer2
ON layer2.rowid = k.fid
WHERE k.f_table_name = '{{input2_layer}}'
AND k.f_geometry_column = '{{input2_geometrycolumn}}'
AND k.ref_geometry = layer1.{{input1_geometrycolumn}}
AND k.max_items = {nb_nearest}
{{batch_filter}}
"""
input1_layer_info = gfo.get_layerinfo(input1_path, input1_layer)
# Go!
return _two_layer_vector_operation(
input1_path=input1_tmp_path,
input2_path=input2_tmp_path,
output_path=output_path,
sql_template=sql_template,
operation_name="join_nearest",
input1_layer=input1_tmp_layer,
input1_columns=input1_columns,
input1_columns_prefix=input1_columns_prefix,
input2_layer=input2_tmp_layer,
input2_columns=input2_columns,
input2_columns_prefix=input2_columns_prefix,
output_layer=output_layer,
force_output_geometrytype=input1_layer_info.geometrytype,
explodecollections=explodecollections,
use_ogr=True,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def select_two_layers(
input1_path: Path,
input2_path: Path,
output_path: Path,
sql_stmt: str,
input1_layer: Optional[str] = None,
input1_columns: Optional[List[str]] = None,
input1_columns_prefix: str = "l1_",
input2_layer: Optional[str] = None,
input2_columns: Optional[List[str]] = None,
input2_columns_prefix: str = "l2_",
output_layer: Optional[str] = None,
force_output_geometrytype: Optional[GeometryType] = None,
explodecollections: bool = False,
nb_parallel: int = 1,
batchsize: int = -1,
force: bool = False,
):
# Go!
return _two_layer_vector_operation(
input1_path=input1_path,
input2_path=input2_path,
output_path=output_path,
sql_template=sql_stmt,
operation_name="select_two_layers",
input1_layer=input1_layer,
input1_columns=input1_columns,
input1_columns_prefix=input1_columns_prefix,
input2_layer=input2_layer,
input2_columns=input2_columns,
input2_columns_prefix=input2_columns_prefix,
output_layer=output_layer,
force_output_geometrytype=force_output_geometrytype,
explodecollections=explodecollections,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def split(
input1_path: Path,
input2_path: Path,
output_path: Path,
input1_layer: Optional[str] = None,
input1_columns: Optional[List[str]] = None,
input1_columns_prefix: str = "l1_",
input2_layer: Optional[str] = None,
input2_columns: Optional[List[str]] = None,
input2_columns_prefix: str = "l2_",
output_layer: Optional[str] = None,
explodecollections: bool = False,
output_with_spatial_index: bool = True,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
# In the query, important to only extract the geometry types that are
# expected, so the primitive type of input1_layer
# TODO: test for geometrycollection, line, point,...
input1_layer_info = gfo.get_layerinfo(input1_path, input1_layer)
primitivetype_to_extract = input1_layer_info.geometrytype.to_primitivetype
# For the output file, force MULTI variant to evade ugly warnings
force_output_geometrytype = primitivetype_to_extract.to_multitype
# Prepare sql template for this operation
sql_template = f"""
SELECT * FROM
( WITH layer2_unioned AS (
SELECT layer1.rowid AS layer1_rowid
,ST_union(layer2.{{input2_geometrycolumn}}) AS geom
FROM {{input1_databasename}}."{{input1_layer}}" layer1
JOIN {{input1_databasename}}."rtree_{{input1_layer}}_{{input1_geometrycolumn}}" layer1tree ON layer1.fid = layer1tree.id
JOIN {{input2_databasename}}."{{input2_layer}}" layer2
JOIN {{input2_databasename}}."rtree_{{input2_layer}}_{{input2_geometrycolumn}}" layer2tree ON layer2.fid = layer2tree.id
WHERE 1=1
{{batch_filter}}
AND layer1tree.minx <= layer2tree.maxx
AND layer1tree.maxx >= layer2tree.minx
AND layer1tree.miny <= layer2tree.maxy
AND layer1tree.maxy >= layer2tree.miny
AND ST_Intersects(layer1.{{input1_geometrycolumn}},
layer2.{{input2_geometrycolumn}}) = 1
AND ST_Touches(layer1.{{input1_geometrycolumn}},
layer2.{{input2_geometrycolumn}}) = 0
GROUP BY layer1.rowid
)
SELECT ST_CollectionExtract(
ST_intersection(layer1.{{input1_geometrycolumn}},
layer2.{{input2_geometrycolumn}}),
{primitivetype_to_extract.value}) as geom
{{layer1_columns_prefix_alias_str}}
{{layer2_columns_prefix_alias_str}}
FROM {{input1_databasename}}."{{input1_layer}}" layer1
JOIN {{input1_databasename}}."rtree_{{input1_layer}}_{{input1_geometrycolumn}}" layer1tree ON layer1.fid = layer1tree.id
JOIN {{input2_databasename}}."{{input2_layer}}" layer2
JOIN {{input2_databasename}}."rtree_{{input2_layer}}_{{input2_geometrycolumn}}" layer2tree ON layer2.fid = layer2tree.id
WHERE 1=1
{{batch_filter}}
AND layer1tree.minx <= layer2tree.maxx
AND layer1tree.maxx >= layer2tree.minx
AND layer1tree.miny <= layer2tree.maxy
AND layer1tree.maxy >= layer2tree.miny
AND ST_Intersects(layer1.{{input1_geometrycolumn}},
layer2.{{input2_geometrycolumn}}) = 1
AND ST_Touches(layer1.{{input1_geometrycolumn}},
layer2.{{input2_geometrycolumn}}) = 0
UNION ALL
SELECT CASE WHEN layer2_unioned.geom IS NULL THEN layer1.{{input1_geometrycolumn}}
ELSE ST_CollectionExtract(
ST_difference(layer1.{{input1_geometrycolumn}},
layer2_unioned.geom),
{primitivetype_to_extract.value})
END as geom
{{layer1_columns_prefix_alias_str}}
{{layer2_columns_prefix_alias_null_str}}
FROM {{input1_databasename}}."{{input1_layer}}" layer1
LEFT JOIN layer2_unioned ON layer1.rowid = layer2_unioned.layer1_rowid
WHERE 1=1
{{batch_filter}}
)
WHERE geom IS NOT NULL
AND ST_NPoints(geom) > 0 -- ST_CollectionExtract outputs empty, but not NULL geoms in spatialite 4.3
"""
# Go!
return _two_layer_vector_operation(
input1_path=input1_path,
input2_path=input2_path,
output_path=output_path,
sql_template=sql_template,
operation_name="split",
input1_layer=input1_layer,
input1_columns=input1_columns,
input1_columns_prefix=input1_columns_prefix,
input2_layer=input2_layer,
input2_columns=input2_columns,
input2_columns_prefix=input2_columns_prefix,
output_layer=output_layer,
force_output_geometrytype=force_output_geometrytype,
explodecollections=explodecollections,
output_with_spatial_index=output_with_spatial_index,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def symmetric_difference(
input1_path: Path,
input2_path: Path,
output_path: Path,
input1_layer: Optional[str] = None,
input1_columns: Optional[List[str]] = None,
input1_columns_prefix: str = "l1_",
input2_layer: Optional[str] = None,
input2_columns: Optional[List[str]] = None,
input2_columns_prefix: str = "l2_",
output_layer: Optional[str] = None,
explodecollections: bool = False,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
# A symmetric difference can be simulated by doing an "erase" of input1
# and input2 and then append the result of an erase of input2 with
# input1...
# Because both erase calculations will be towards temp files,
# we need to do some additional init + checks here...
if force is False and output_path.exists():
return
if output_layer is None:
output_layer = gfo.get_default_layer(output_path)
tempdir = _io_util.create_tempdir("geofileops/symmdiff")
try:
# First erase input2 from input1 to a temporary output file
erase1_output_path = tempdir / "layer1_erase_layer2_output.gpkg"
erase(
input_path=input1_path,
erase_path=input2_path,
output_path=erase1_output_path,
input_layer=input1_layer,
input_columns=input1_columns,
input_columns_prefix=input1_columns_prefix,
erase_layer=input2_layer,
output_layer=output_layer,
explodecollections=explodecollections,
output_with_spatial_index=False,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
if input2_columns is None or len(input2_columns) > 0:
input2_info = gfo.get_layerinfo(input2_path)
columns_to_add = (
input2_columns if input2_columns is not None else input2_info.columns
)
for column in columns_to_add:
gfo.add_column(
erase1_output_path,
name=f"{input2_columns_prefix}{column}",
type=input2_info.columns[column].gdal_type,
)
# Now erase input1 from input2 to another temporary output file
erase2_output_path = tempdir / "layer2_erase_layer1_output.gpkg"
erase(
input_path=input2_path,
erase_path=input1_path,
output_path=erase2_output_path,
input_layer=input2_layer,
input_columns=input2_columns,
input_columns_prefix=input2_columns_prefix,
erase_layer=input1_layer,
output_layer=output_layer,
explodecollections=explodecollections,
output_with_spatial_index=False,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
# Now append
_append_to_nolock(
src=erase2_output_path,
dst=erase1_output_path,
src_layer=output_layer,
dst_layer=output_layer,
)
# Create spatial index
gfo.create_spatial_index(path=erase1_output_path, layer=output_layer)
# Now we are ready to move the result to the final spot...
if output_path.exists():
gfo.remove(output_path)
gfo.move(erase1_output_path, output_path)
finally:
shutil.rmtree(tempdir)
def union(
input1_path: Path,
input2_path: Path,
output_path: Path,
input1_layer: Optional[str] = None,
input1_columns: Optional[List[str]] = None,
input1_columns_prefix: str = "l1_",
input2_layer: Optional[str] = None,
input2_columns: Optional[List[str]] = None,
input2_columns_prefix: str = "l2_",
output_layer: Optional[str] = None,
explodecollections: bool = False,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
# A union can be simulated by doing a "split" of input1 and input2 and
# then append the result of an erase of input2 with input1...
# Because the calculations in split and erase will be towards temp files,
# we need to do some additional init + checks here...
if force is False and output_path.exists():
return
if output_layer is None:
output_layer = gfo.get_default_layer(output_path)
tempdir = _io_util.create_tempdir("geofileops/union")
try:
# First split input1 with input2 to a temporary output gfo...
split_output_path = tempdir / "split_output.gpkg"
split(
input1_path=input1_path,
input2_path=input2_path,
output_path=split_output_path,
input1_layer=input1_layer,
input1_columns=input1_columns,
input1_columns_prefix=input1_columns_prefix,
input2_layer=input2_layer,
input2_columns=input2_columns,
input2_columns_prefix=input2_columns_prefix,
output_layer=output_layer,
explodecollections=explodecollections,
output_with_spatial_index=False,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
# Now erase input1 from input2 to another temporary output gfo...
erase_output_path = tempdir / "erase_output.gpkg"
erase(
input_path=input2_path,
erase_path=input1_path,
output_path=erase_output_path,
input_layer=input2_layer,
input_columns=input2_columns,
input_columns_prefix=input2_columns_prefix,
erase_layer=input1_layer,
output_layer=output_layer,
explodecollections=explodecollections,
output_with_spatial_index=False,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
# Now append
_append_to_nolock(
src=erase_output_path,
dst=split_output_path,
src_layer=output_layer,
dst_layer=output_layer,
)
# Create spatial index
gfo.create_spatial_index(path=split_output_path, layer=output_layer)
# Now we are ready to move the result to the final spot...
if output_path.exists():
gfo.remove(output_path)
gfo.move(split_output_path, output_path)
finally:
shutil.rmtree(tempdir)
def _two_layer_vector_operation(
input1_path: Path,
input2_path: Path,
output_path: Path,
sql_template: str,
operation_name: str,
input1_layer: Optional[str] = None,
input1_columns: Optional[List[str]] = None,
input1_columns_prefix: str = "l1_",
input2_layer: Optional[str] = None,
input2_columns: Optional[List[str]] = None,
input2_columns_prefix: str = "l2_",
output_layer: Optional[str] = None,
explodecollections: bool = False,
force_output_geometrytype: Optional[GeometryType] = None,
output_with_spatial_index: bool = True,
use_ogr: bool = False,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
"""
Executes an operation that needs 2 input files.
Args:
input1_path (str): the file to export features from
input2_path (str): the file to check intersections with
output_path (str): output file
input1_layer (str, optional): [description]. Defaults to None.
input1_columns
input1_columns_prefix
input2_layer (str, optional): [description]. Defaults to None.
input2_columns
input2_columns_prefix
output_layer (str, optional): [description]. Defaults to None.
explodecollections (bool, optional): Explode collecions in output. Defaults to False.
force_output_geometrytype (GeometryType, optional): Defaults to None.
use_ogr (bool, optional): If True, ogr is used to do the processing,
In this case different input files (input1_path, input2_path) are
NOT supported. If False, sqlite3 is used directly.
Defaults to False.
nb_parallel (int, optional): [description]. Defaults to -1.
batchsize (int, optional): indicative number of rows to process per
batch. A smaller batch size, possibly in combination with a
smaller nb_parallel, will reduce the memory usage.
Defaults to -1: (try to) determine optimal size automatically.
force (bool, optional): [description]. Defaults to False.
Raises:
Exception: [description]
"""
# Init
if not input1_path.exists():
raise Exception(
f"Error {operation_name}: input1_path doesn't exist: {input1_path}"
)
if not input2_path.exists():
raise Exception(
f"Error {operation_name}: input2_path doesn't exist: {input2_path}"
)
if use_ogr is True and input1_path != input2_path:
raise Exception(
f"Error {operation_name}: if use_ogr is True, input1_path must equal input2_path!"
)
if output_path.exists():
if force is False:
logger.info(f"Stop {operation_name}: output exists already {output_path}")
return
else:
gfo.remove(output_path)
# Check if spatialite is properly installed to execute this query
_sqlite_util.check_runtimedependencies()
# Init layer info
start_time = datetime.now()
if input1_layer is None:
input1_layer = gfo.get_only_layer(input1_path)
if input2_layer is None:
input2_layer = gfo.get_only_layer(input2_path)
if output_layer is None:
output_layer = gfo.get_default_layer(output_path)
tempdir = _io_util.create_tempdir(f"geofileops/{operation_name}")
# Use get_layerinfo to check if the input files are valid
gfo.get_layerinfo(input1_path, input1_layer)
gfo.get_layerinfo(input2_path, input2_layer)
# Prepare output filename
tmp_output_path = tempdir / output_path.name
tmp_output_path.parent.mkdir(exist_ok=True, parents=True)
gfo.remove(tmp_output_path)
try:
# Prepare tmp files/batches
logger.info(
f"Prepare input (params) for {operation_name} with tempdir: {tempdir}"
)
processing_params = _prepare_processing_params(
input1_path=input1_path,
input1_layer=input1_layer,
input1_layer_alias="layer1",
input2_path=input2_path,
input2_layer=input2_layer,
tempdir=tempdir,
nb_parallel=nb_parallel,
batchsize=batchsize,
convert_to_spatialite_based=True,
)
if processing_params is None or processing_params.batches is None:
return
# Prepare column names,... to format the select
# Format column strings for use in select
assert processing_params.input1_path is not None
input1_tmp_layerinfo = gfo.get_layerinfo(
processing_params.input1_path, processing_params.input1_layer
)
input1_columnstrings = format_column_strings(
columns_specified=input1_columns,
columns_available=input1_tmp_layerinfo.columns,
table_alias="layer1",
columnname_prefix=input1_columns_prefix,
)
assert processing_params.input2_path is not None
input2_tmp_layerinfo = gfo.get_layerinfo(
processing_params.input2_path, processing_params.input2_layer
)
input2_columnstrings = format_column_strings(
columns_specified=input2_columns,
columns_available=input2_tmp_layerinfo.columns,
table_alias="layer2",
columnname_prefix=input2_columns_prefix,
)
# Check input crs'es
if input1_tmp_layerinfo.crs != input2_tmp_layerinfo.crs:
logger.warning(
f"input1 has a different crs than input2: \n\tinput1: {input1_tmp_layerinfo.crs} \n\tinput2: {input2_tmp_layerinfo.crs}"
)
# Calculate
# Processing in threads is 2x faster for small datasets (on Windows)
calculate_in_threads = (
True if input1_tmp_layerinfo.featurecount <= 100 else False
)
logger.info(
f"Start {operation_name} in {processing_params.nb_parallel} parallel workers"
)
with _general_util.PooledExecutorFactory(
threadpool=calculate_in_threads,
max_workers=processing_params.nb_parallel,
initializer=_general_util.initialize_worker(),
) as calculate_pool:
# Start looping
batches = {}
future_to_batch_id = {}
for batch_id in processing_params.batches:
batches[batch_id] = {}
batches[batch_id]["layer"] = output_layer
tmp_partial_output_path = (
tempdir / f"{output_path.stem}_{batch_id}.gpkg"
)
batches[batch_id]["tmp_partial_output_path"] = tmp_partial_output_path
# Keep input1_tmp_layer and input2_tmp_layer for backwards
# compatibility
sql_stmt = sql_template.format(
input1_databasename="{input1_databasename}",
input2_databasename="{input2_databasename}",
layer1_columns_from_subselect_str=input1_columnstrings.columns_from_subselect,
layer1_columns_prefix_alias_str=input1_columnstrings.columns_prefix_alias,
layer1_columns_prefix_str=input1_columnstrings.columns_prefix,
input1_layer=processing_params.batches[batch_id]["layer"],
input1_tmp_layer=processing_params.batches[batch_id]["layer"],
input1_geometrycolumn=input1_tmp_layerinfo.geometrycolumn,
layer2_columns_from_subselect_str=input2_columnstrings.columns_from_subselect,
layer2_columns_prefix_alias_str=input2_columnstrings.columns_prefix_alias,
layer2_columns_prefix_str=input2_columnstrings.columns_prefix,
layer2_columns_prefix_alias_null_str=input2_columnstrings.columns_prefix_alias_null,
input2_layer=processing_params.input2_layer,
input2_tmp_layer=processing_params.input2_layer,
input2_geometrycolumn=input2_tmp_layerinfo.geometrycolumn,
batch_filter=processing_params.batches[batch_id]["batch_filter"],
)
batches[batch_id]["sqlite_stmt"] = sql_stmt
# Remark: this temp file doesn't need spatial index
if use_ogr is False:
# Use an aggressive speedy sqlite profile
future = calculate_pool.submit(
_sqlite_util.create_table_as_sql,
input1_path=processing_params.batches[batch_id]["path"],
input1_layer=processing_params.batches[batch_id]["layer"],
input2_path=processing_params.input2_path,
output_path=tmp_partial_output_path,
sql_stmt=sql_stmt,
output_layer=output_layer,
output_geometrytype=force_output_geometrytype,
create_spatial_index=False,
profile=_sqlite_util.SqliteProfile.SPEED,
)
future_to_batch_id[future] = batch_id
else:
# Use ogr to run the query
# * input2 path (= using attach) doesn't seem to work
# * ogr doesn't fill out database names, so do it now
sql_stmt = sql_stmt.format(
input1_databasename=processing_params.input1_databasename,
input2_databasename=processing_params.input2_databasename,
)
future = calculate_pool.submit(
_ogr_util.vector_translate,
input_path=processing_params.batches[batch_id]["path"],
output_path=tmp_partial_output_path,
sql_stmt=sql_stmt,
output_layer=output_layer,
explodecollections=explodecollections,
force_output_geometrytype=force_output_geometrytype,
options={"LAYER_CREATION.SPATIAL_INDEX": False},
)
future_to_batch_id[future] = batch_id
# Loop till all parallel processes are ready, but process each one
# that is ready already
nb_done = 0
_general_util.report_progress(
start_time,
nb_done,
len(processing_params.batches),
operation_name,
processing_params.nb_parallel,
)
for future in futures.as_completed(future_to_batch_id):
try:
# Get the result
result = future.result()
if result is not None:
logger.debug(result)
# Start copy of the result to a common file
batch_id = future_to_batch_id[future]
# If the calculate gave results, copy to output
tmp_partial_output_path = batches[batch_id][
"tmp_partial_output_path"
]
if (
tmp_partial_output_path.exists()
and tmp_partial_output_path.stat().st_size > 0
):
fileops._append_to_nolock(
src=tmp_partial_output_path,
dst=tmp_output_path,
explodecollections=explodecollections,
force_output_geometrytype=force_output_geometrytype,
create_spatial_index=False,
)
else:
logger.debug(f"Result file {tmp_partial_output_path} was empty")
# Cleanup tmp partial file
gfo.remove(tmp_partial_output_path, missing_ok=True)
except Exception as ex:
batch_id = future_to_batch_id[future]
raise Exception(f"Error executing {batches[batch_id]}") from ex
# Log the progress and prediction speed
nb_done += 1
_general_util.report_progress(
start_time=start_time,
nb_done=nb_done,
nb_todo=len(processing_params.batches),
operation=operation_name,
nb_parallel=processing_params.nb_parallel,
)
# Round up and clean up
# Now create spatial index and move to output location
if tmp_output_path.exists():
if output_with_spatial_index is True:
gfo.create_spatial_index(path=tmp_output_path, layer=output_layer)
if tmp_output_path != output_path:
output_path.parent.mkdir(parents=True, exist_ok=True)
gfo.move(tmp_output_path, output_path)
else:
logger.debug(f"Result of {operation_name} was empty!")
logger.info(f"{operation_name} ready, took {datetime.now()-start_time}!")
except Exception:
gfo.remove(output_path)
gfo.remove(tmp_output_path)
raise
finally:
shutil.rmtree(tempdir)
class ProcessingParams:
def __init__(
self,
input1_path: Optional[Path] = None,
input1_layer: Optional[str] = None,
input1_databasename: Optional[str] = None,
input2_path: Optional[Path] = None,
input2_layer: Optional[str] = None,
input2_databasename: Optional[str] = None,
nb_parallel: int = -1,
batches: Optional[dict] = None,
):
self.input1_path = input1_path
self.input1_layer = input1_layer
self.input1_databasename = input1_databasename
self.input2_path = input2_path
self.input2_layer = input2_layer
self.input2_databasename = input2_databasename
self.nb_parallel = nb_parallel
self.batches = batches
def _prepare_processing_params(
input1_path: Path,
input1_layer: str,
tempdir: Path,
convert_to_spatialite_based: bool,
nb_parallel: int,
batchsize: int = -1,
input1_layer_alias: Optional[str] = None,
input2_path: Optional[Path] = None,
input2_layer: Optional[str] = None,
) -> Optional[ProcessingParams]:
# Init
returnvalue = ProcessingParams(nb_parallel=nb_parallel)
input1_layerinfo = gfo.get_layerinfo(input1_path, input1_layer)
if input1_layerinfo.featurecount == 0:
logger.info(
f"input1 layer contains 0 rows, file: {input1_path}, layer: {input1_layer}"
)
return None
# Determine the optimal number of parallel processes + batches
if returnvalue.nb_parallel == -1:
# If no batch size specified, put at least 100 rows in a batch
if batchsize <= 0:
min_rows_per_batch = 100
else:
# If batchsize is specified, use the batch size
min_rows_per_batch = batchsize
max_parallel = max(int(input1_layerinfo.featurecount / min_rows_per_batch), 1)
returnvalue.nb_parallel = min(multiprocessing.cpu_count(), max_parallel)
# Determine optimal number of batches
# Remark: especially for 'select' operation, if nb_parallel is 1
# nb_batches should be 1 (select might give wrong results)
if returnvalue.nb_parallel > 1:
# Limit number of rows processed in parallel to limit memory use
if batchsize > 0:
max_rows_parallel = batchsize * returnvalue.nb_parallel
else:
max_rows_parallel = 200000
# Adapt number of batches to max_rows_parallel
if input1_layerinfo.featurecount > max_rows_parallel:
# If more rows than can be handled simultanously in parallel
nb_batches = int(
input1_layerinfo.featurecount
/ (max_rows_parallel / returnvalue.nb_parallel)
)
elif batchsize > 0:
# If a batchsize is specified, try to honer it
nb_batches = returnvalue.nb_parallel
else:
# If no batchsize specified, add some batches to reduce impact of possible
# "unbalanced" batches regarding needed processing time.
nb_batches = returnvalue.nb_parallel * 2
elif batchsize > 0:
nb_batches = math.ceil(input1_layerinfo.featurecount / batchsize)
else:
nb_batches = 1
# Prepare input files for the calculation
returnvalue.input1_layer = input1_layer
returnvalue.input2_layer = input2_layer
if convert_to_spatialite_based is False:
returnvalue.input1_path = input1_path
returnvalue.input2_path = input2_path
else:
# Check if the input files are of the correct geofiletype
input1_geofiletype = GeofileType(input1_path)
input2_geofiletype = None
if input2_path is not None:
input2_geofiletype = GeofileType(input2_path)
# If input files are of the same format + are spatialite compatible,
# just use them
if input1_geofiletype.is_spatialite_based and (
input2_geofiletype is None or input1_geofiletype == input2_geofiletype
):
returnvalue.input1_path = input1_path
else:
# If not ok, copy the input layer to gpkg
returnvalue.input1_path = tempdir / f"{input1_path.stem}.gpkg"
gfo.convert(
src=input1_path,
src_layer=input1_layer,
dst=returnvalue.input1_path,
dst_layer=returnvalue.input1_layer,
)
if input2_path is not None and input2_geofiletype is not None:
if (
input2_geofiletype == input1_geofiletype
and input2_geofiletype.is_spatialite_based
):
returnvalue.input2_path = input2_path
else:
# If not spatialite compatible, copy the input layer to gpkg
returnvalue.input2_path = tempdir / f"{input2_path.stem}.gpkg"
gfo.convert(
src=input2_path,
src_layer=input2_layer,
dst=returnvalue.input2_path,
dst_layer=returnvalue.input2_layer,
)
# Fill out the database names to use in the sql statements
returnvalue.input1_databasename = "main"
if input2_path is None or input1_path == input2_path:
returnvalue.input2_databasename = returnvalue.input1_databasename
else:
returnvalue.input2_databasename = "input2"
# Prepare batches to process
# Get column names and info
layer1_info = gfo.get_layerinfo(returnvalue.input1_path, returnvalue.input1_layer)
# Check number of batches + appoint nb rows to batches
nb_rows_input_layer = layer1_info.featurecount
if nb_batches > int(nb_rows_input_layer / 10):
nb_batches = max(int(nb_rows_input_layer / 10), 1)
batches = {}
if nb_batches == 1:
# If only one batch, no filtering is needed
batches[0] = {}
batches[0]["layer"] = returnvalue.input1_layer
batches[0]["path"] = returnvalue.input1_path
batches[0]["batch_filter"] = ""
else:
# Determine the min_rowid and max_rowid
sql_stmt = f'''SELECT MIN(rowid) as min_rowid, MAX(rowid) as max_rowid
FROM "{layer1_info.name}"'''
batch_info_df = gfo.read_file_sql(
path=returnvalue.input1_path, sql_stmt=sql_stmt
)
min_rowid = pd.to_numeric(batch_info_df["min_rowid"][0])
max_rowid = pd.to_numeric(batch_info_df["max_rowid"][0])
# Determine the exact batches to use
if ((max_rowid - min_rowid) / nb_rows_input_layer) < 1.1:
# If the rowid's are quite consecutive, use an imperfect, but
# fast distribution in batches
batch_info_list = []
nb_rows_per_batch = round(nb_rows_input_layer / nb_batches)
offset = 0
offset_per_batch = round((max_rowid - min_rowid) / nb_batches)
for batch_id in range(nb_batches):
start_rowid = offset
if batch_id < (nb_batches - 1):
# End rowid for this batch is the next start_rowid - 1
end_rowid = offset + offset_per_batch - 1
else:
# For the last batch, take the max_rowid so no rowid's are
# 'lost' due to rounding errors
end_rowid = max_rowid
batch_info_list.append(
(batch_id, nb_rows_per_batch, start_rowid, end_rowid)
)
offset += offset_per_batch
batch_info_df = pd.DataFrame(
batch_info_list, columns=["id", "nb_rows", "start_rowid", "end_rowid"]
)
else:
# The rowids are not consecutive, so determine the optimal rowid
# ranges for each batch so each batch has same number of elements
# Remark: this might take some seconds for larger datasets!
sql_stmt = f"""
SELECT batch_id AS id
,COUNT(*) AS nb_rows
,MIN(rowid) AS start_rowid
,MAX(rowid) AS end_rowid
FROM (SELECT rowid
,NTILE({nb_batches}) OVER (ORDER BY rowid) batch_id
FROM "{layer1_info.name}"
)
GROUP BY batch_id;
"""
batch_info_df = gfo.read_file_sql(
path=returnvalue.input1_path, sql_stmt=sql_stmt
)
# Prepare the layer alias to use in the batch filter
layer_alias_d = ""
if input1_layer_alias is not None:
layer_alias_d = f"{input1_layer_alias}."
# Now loop over all batch ranges to build up the necessary filters
for batch_info in batch_info_df.itertuples():
# Fill out the batch properties
batches[batch_info.id] = {}
batches[batch_info.id]["layer"] = returnvalue.input1_layer
batches[batch_info.id]["path"] = returnvalue.input1_path
# The batch filter
if batch_info.id < nb_batches:
batches[batch_info.id][
"batch_filter"
] = f"AND ({layer_alias_d}rowid >= {batch_info.start_rowid} AND {layer_alias_d}rowid <= {batch_info.end_rowid}) "
else:
batches[batch_info.id][
"batch_filter"
] = f"AND {layer_alias_d}rowid >= {batch_info.start_rowid} "
# No use starting more processes than the number of batches...
if len(batches) < returnvalue.nb_parallel:
returnvalue.nb_parallel = len(batches)
returnvalue.batches = batches
return returnvalue
@dataclass
class FormattedColumnStrings:
columns: str
columns_prefix: str
columns_prefix_alias: str
columns_prefix_alias_null: str
columns_from_subselect: str
def format_column_strings(
columns_specified: Optional[List[str]],
columns_available: Iterable[str],
table_alias: str = "",
columnname_prefix: str = "",
) -> FormattedColumnStrings:
# First prepare the actual column list to use
if columns_specified is not None:
# Case-insensitive check if input1_columns contains columns not in layer...
columns_available_upper = [column.upper() for column in columns_available]
missing_columns = [
col
for col in columns_specified
if (col.upper() not in columns_available_upper)
]
if len(missing_columns) > 0:
raise Exception(
f"Error, columns_specified contains following columns not in columns_available: {missing_columns}. Existing columns: {columns_available}"
)
# Create column list to keep in the casing of the original columns
columns_specified_upper = [column.upper() for column in columns_specified]
columns = [
col for col in columns_available if (col.upper() in columns_specified_upper)
]
else:
columns = columns_available
# Now format the column strings
columns_quoted_str = ""
columns_prefix_alias_null_str = ""
columns_prefix_str = ""
columns_prefix_alias_str = ""
columns_from_subselect_str = ""
if len(columns) > 0:
if table_alias is not None and table_alias != "":
table_alias_d = f"{table_alias}."
else:
table_alias_d = ""
columns_quoted = [f'"{column}"' for column in columns]
columns_quoted_str = "," + ", ".join(columns_quoted)
columns_prefix_alias_null = [
f'NULL "{columnname_prefix}{column}"' for column in columns
]
columns_prefix_alias_null_str = "," + ", ".join(columns_prefix_alias_null)
columns_prefix = [f'{table_alias_d}"{column}"' for column in columns]
columns_prefix_str = "," + ", ".join(columns_prefix)
columns_table_aliased_column_aliased = [
f'{table_alias_d}"{column}" "{columnname_prefix}{column}"'
for column in columns
]
columns_prefix_alias_str = "," + ", ".join(columns_table_aliased_column_aliased)
columns_from_subselect = [
f'sub."{columnname_prefix}{column}"' for column in columns
]
columns_from_subselect_str = "," + ", ".join(columns_from_subselect)
return FormattedColumnStrings(
columns=columns_quoted_str,
columns_prefix=columns_prefix_str,
columns_prefix_alias=columns_prefix_alias_str,
columns_prefix_alias_null=columns_prefix_alias_null_str,
columns_from_subselect=columns_from_subselect_str,
)
def dissolve_singlethread(
input_path: Path,
output_path: Path,
groupby_columns: Optional[Iterable[str]] = None,
agg_columns: Optional[dict] = None,
explodecollections: bool = False,
input_layer: Optional[str] = None,
output_layer: Optional[str] = None,
force: bool = False,
):
"""
Remark: this is not a parallelized version!!!
"""
# Init
start_time = datetime.now()
if output_path.exists():
if force is False:
logger.info(f"Stop dissolve: Output exists already {output_path}")
return
else:
gfo.remove(output_path)
# Check layer names
if input_layer is None:
input_layer = gfo.get_only_layer(input_path)
if output_layer is None:
output_layer = gfo.get_default_layer(output_path)
# Use get_layerinfo to check if the layer definition is OK
layerinfo = gfo.get_layerinfo(input_path, input_layer)
# Prepare the strings regarding groupby_columns to use in the select statement.
if groupby_columns is not None:
# Because the query uses a subselect, the groupby columns need to be prefixed.
columns_with_prefix = [f'layer."{column}"' for column in groupby_columns]
groupby_columns_str = ", ".join(columns_with_prefix)
groupby_columns_for_groupby_str = groupby_columns_str
groupby_columns_for_select_str = ", " + groupby_columns_str
else:
# Even if no groupby is provided, we still need to use a groupby clause,
# otherwise ST_union doesn't seem to work.
groupby_columns_for_groupby_str = "'1'"
groupby_columns_for_select_str = ""
# Prepare the strings regarding agg_columns to use in the select statement.
agg_columns_str = ""
if agg_columns is not None:
# Prepare some lists for later use
columns_upper_dict = {col.upper(): col for col in layerinfo.columns}
groupby_columns_upper_dict = {}
if groupby_columns is not None:
groupby_columns_upper_dict = {col.upper(): col for col in groupby_columns}
# Start preparation of agg_columns_str
if "json" in agg_columns:
agg_columns_str = ""
# If the columns specified are None, take all columns that are not in
# groupby_columns
if agg_columns["json"] is None:
for column in layerinfo.columns:
if column.upper() not in groupby_columns_upper_dict:
agg_columns_str += f"'{column}', layer.{column}"
else:
for column in agg_columns["json"]:
agg_columns_str += f"'{column}', layer.{column}"
agg_columns_str = f", json_object({agg_columns_str}) as json"
elif "columns" in agg_columns:
for agg_column in agg_columns["columns"]:
# Init
distinct_str = ""
extra_param_str = ""
# Prepare aggregation keyword.
if agg_column["agg"].lower() in [
"count",
"sum",
"min",
"max",
"median",
]:
aggregation_str = agg_column["agg"]
elif agg_column["agg"].lower() in ["mean", "avg"]:
aggregation_str = "avg"
elif agg_column["agg"].lower() == "concat":
aggregation_str = "group_concat"
if "sep" in agg_column:
extra_param_str = f", '{agg_column['sep']}'"
else:
raise ValueError(
f"Error: aggregation {agg_column['agg']} is not supported!"
)
# If distinct is specified, add the distinct keyword
if "distinct" in agg_column and agg_column["distinct"] is True:
distinct_str = "DISTINCT "
# Prepare column name string.
# Make sure the columns name casing is same as input file
column_str = (
f'layer."{columns_upper_dict[agg_column["column"].upper()]}"'
)
# Now put everything togethers
agg_columns_str += f', {aggregation_str}({distinct_str}{column_str}{extra_param_str}) AS "{agg_column["as"]}"'
# Now prepare the sql statement
# Remark: calculating the area in the enclosing selects halves the
# processing time
# The operation to run on the geometry
operation = f"ST_union(layer.{layerinfo.geometrycolumn})"
force_output_geometrytype = None
# If the input is a linestring, also apply st_linemerge().
# If not, the individual lines are just concatenated together8 and common
# points are not removed, resulting in the original seperate lines again
# if explodecollections is True.
if layerinfo.geometrytype.to_primitivetype == PrimitiveType.LINESTRING:
operation = f"ST_LineMerge({operation})"
if explodecollections is True:
force_output_geometrytype = GeometryType.LINESTRING
sql_stmt = f"""
SELECT {operation} AS geom
{groupby_columns_for_select_str}
{agg_columns_str}
FROM "{input_layer}" layer
GROUP BY {groupby_columns_for_groupby_str}"""
_ogr_util.vector_translate(
input_path=input_path,
output_path=output_path,
output_layer=output_layer,
sql_stmt=sql_stmt,
sql_dialect="SQLITE",
force_output_geometrytype=force_output_geometrytype,
explodecollections=explodecollections,
)
logger.info(f"Processing ready, took {datetime.now()-start_time}!")
'''
def dissolve_cardsheets(
input_path: Path,
input_cardsheets_path: Path,
output_path: Path,
groupby_columns: Optional[List[str]] = None,
explodecollections: bool = False,
input_layer: Optional[str] = None,
output_layer: Optional[str] = None,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False):
# Init
start_time = datetime.now()
if output_path.exists():
if force is False:
logger.info(f"Stop dissolve_cardsheets: output exists already {output_path}, so stop")
return
else:
gfo.remove(output_path)
if nb_parallel == -1:
nb_parallel = multiprocessing.cpu_count()
# Get input data to temp gpkg file
tempdir = io_util.create_tempdir("geofileops/dissolve_cardsheets")
input_tmp_path = tempdir / "input_layers.gpkg"
if(input_path.suffix.lower() == '.gpkg'):
logger.info(f"Copy {input_path} to {input_tmp_path}")
gfo.copy(input_path, input_tmp_path)
logger.debug("Copy ready")
else:
# Remark: this temp file doesn't need spatial index
logger.info(f"Copy {input_path} to {input_tmp_path} using ogr2ogr")
ogr_util.vector_translate(
input_path=input_path,
output_path=input_tmp_path,
create_spatial_index=False,
output_layer=input_layer
)
logger.debug("Copy ready")
if input_layer is None:
input_layer = gfo.get_only_layer(input_tmp_path)
if output_layer is None:
output_layer = gfo.get_default_layer(output_path)
# Prepare tmp files
# Prepare the strings to use in the select statement
if groupby_columns is not None:
# Because the query uses a subselect, the groupby columns need to be prefixed
columns_with_prefix = [f"layer.{column}" for column in groupby_columns]
groupby_columns_str = ", ".join(columns_with_prefix)
groupby_columns_for_groupby_str = groupby_columns_str
groupby_columns_for_select_str = ", " + groupby_columns_str
else:
# Even if no groupby is provided, we still need to use a groupby clause, otherwise
# ST_union doesn't seem to work
groupby_columns_for_groupby_str = "'1'"
groupby_columns_for_select_str = ""
# Load the cardsheets we want the dissolve to be bound on
cardsheets_gdf = gfo.read_file(input_cardsheets_path)
try:
# Start calculation of intersections in parallel
logger.info(f"Start calculation of dissolves in file {input_tmp_path} to partial files")
tmp_output_path = tempdir / output_path.name
# Processing in threads is 2x faster for small datasets (on Windows)
input_layerinfo = gfo.get_layerinfo(input_path)
calculate_in_threads = True if input_layerinfo.featurecount <= 100 else False
with _general_util.PooledExecutorFactory(
threadpool=calculate_in_threads,
max_workers=nb_parallel,
initializer=_general_util.initialize_worker()) as calculate_pool:
translate_jobs = {}
future_to_batch_id = {}
nb_batches = len(cardsheets_gdf)
for batch_id, cardsheet in enumerate(cardsheets_gdf.itertuples()):
translate_jobs[batch_id] = {}
translate_jobs[batch_id]['layer'] = output_layer
output_tmp_partial_path = tempdir / f"{output_path.stem}_{batch_id}{output_path.suffix}"
translate_jobs[batch_id]['tmp_partial_output_path'] = output_tmp_partial_path
# Remarks:
# - calculating the area in the enclosing selects halves the processing time
# - ST_union() gives same performance as ST_unaryunion(ST_collect())!
bbox_xmin, bbox_ymin, bbox_xmax, bbox_ymax = cardsheet.geometry.bounds
bbox_wkt = f"POLYGON (({bbox_xmin} {bbox_ymin}, {bbox_xmax} {bbox_ymin}, {bbox_xmax} {bbox_ymax}, {bbox_xmin} {bbox_ymax}, {bbox_xmin} {bbox_ymin}))"
sql_stmt = f"""
SELECT ST_union(ST_intersection(layer.geom, ST_GeomFromText('{bbox_wkt}'))) AS geom{groupby_columns_for_select_str}
FROM {input_layer} layer
JOIN rtree_{input_layer}_geom t_tree ON layer.fid = t_tree.id
WHERE t_tree.minx <= {bbox_xmax} AND t_tree.maxx >= {bbox_xmin}
AND t_tree.miny <= {bbox_ymax} AND t_tree.maxy >= {bbox_ymin}
AND ST_Intersects(layer.geom, ST_GeomFromText('{bbox_wkt}')) = 1
AND ST_Touches(layer.geom, ST_GeomFromText('{bbox_wkt}')) = 0
GROUP BY {groupby_columns_for_groupby_str}"""
# Force geometrytype to multipolygon, because normal polygons easily are turned into
# multipolygon if self-touching...
force_output_geometrytype = GeometryType.MULTIPOLYGON
translate_jobs[batch_id]['sqlite_stmt'] = sql_stmt
translate_description = f"Async dissolve {batch_id} of {nb_batches}, bounds: {cardsheet.geometry.bounds}"
# Remark: this temp file doesn't need spatial index
translate_info = ogr_util.VectorTranslateInfo(
input_path=input_tmp_path,
output_path=output_tmp_partial_path,
translate_description=translate_description,
output_layer=output_layer,
#clip_bounds=cardsheet.geometry.bounds,
sql_stmt=sql_stmt,
sql_dialect='SQLITE',
append=True,
update=True,
explodecollections=True,
force_output_geometrytype=force_output_geometrytype
)
future = calculate_pool.submit(
ogr_util.vector_translate_by_info,
info=translate_info)
future_to_batch_id[future] = batch_id
# Loop till all parallel processes are ready, but process each one that is
# ready already
for future in futures.as_completed(future_to_batch_id):
try:
_ = future.result()
# Start copy of the result to a common file
# Remark: give higher priority, because this is the slowest factor
batch_id = future_to_batch_id[future]
# If the calculate gave results, copy to output
tmp_partial_output_path = translate_jobs[batch_id]['tmp_partial_output_path']
if tmp_partial_output_path.exists():
translate_description = f"Copy result {batch_id} of {nb_batches} to {output_layer}"
translate_info = ogr_util.VectorTranslateInfo(
input_path=tmp_partial_output_path,
output_path=tmp_output_path,
translate_description=translate_description,
output_layer=output_layer,
transaction_size=200000,
append=True,
update=True,
create_spatial_index=False,
force_output_geometrytype=GeometryType.MULTIPOLYGON,
)
ogr_util.vector_translate_by_info(info=translate_info)
gfo.remove(tmp_partial_output_path)
except Exception as ex:
batch_id = future_to_batch_id[future]
#calculate_pool.shutdown()
logger.error(f"Error executing {translate_jobs[batch_id]}: {ex}")
# Round up and clean up
# Now create spatial index and move to output location
gfo.create_spatial_index(path=tmp_output_path, layer=output_layer)
gfo.move(tmp_output_path, output_path)
finally:
# Clean tmp dir
shutil.rmtree(tempdir)
logger.info(f"Processing ready, took {datetime.now()-start_time}!")
'''
|
[
"shutil.rmtree",
"multiprocessing.cpu_count",
"geofileops.convert",
"geofileops.move",
"pandas.DataFrame",
"re.fullmatch",
"datetime.datetime.now",
"geofileops.get_default_layer",
"re.split",
"math.ceil",
"geofileops.GeofileType",
"geofileops.append_to",
"geofileops.get_layerinfo",
"geofileops.read_file_sql",
"geofileops.get_only_layer",
"pandas.to_numeric",
"concurrent.futures.as_completed",
"geofileops.create_spatial_index",
"geofileops.fileops._append_to_nolock",
"geofileops.add_column",
"geofileops.remove",
"logging.getLogger"
] |
[((843, 870), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (860, 870), False, 'import logging\n'), ((3538, 3580), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input_path', 'input_layer'], {}), '(input_path, input_layer)\n', (3555, 3580), True, 'import geofileops as gfo\n'), ((4881, 4923), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input_path', 'input_layer'], {}), '(input_path, input_layer)\n', (4898, 4923), True, 'import geofileops as gfo\n'), ((7748, 7790), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input_path', 'input_layer'], {}), '(input_path, input_layer)\n', (7765, 7790), True, 'import geofileops as gfo\n'), ((9384, 9407), 'geofileops.GeofileType', 'GeofileType', (['input_path'], {}), '(input_path)\n', (9395, 9407), False, 'from geofileops import GeofileType, GeometryType, PrimitiveType\n'), ((11837, 11879), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input_path', 'input_layer'], {}), '(input_path, input_layer)\n', (11854, 11879), True, 'import geofileops as gfo\n'), ((12847, 12861), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12859, 12861), False, 'from datetime import datetime\n'), ((13579, 13621), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input_path', 'input_layer'], {}), '(input_path, input_layer)\n', (13596, 13621), True, 'import geofileops as gfo\n'), ((20441, 20483), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input_path', 'input_layer'], {}), '(input_path, input_layer)\n', (20458, 20483), True, 'import geofileops as gfo\n'), ((24373, 24415), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input_path', 'input_layer'], {}), '(input_path, input_layer)\n', (24390, 24415), True, 'import geofileops as gfo\n'), ((31699, 31757), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input_to_select_from_path', 'input1_layer'], {}), '(input_to_select_from_path, input1_layer)\n', (31716, 31757), True, 'import geofileops as gfo\n'), ((33928, 33986), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input_to_select_from_path', 'input1_layer'], {}), '(input_to_select_from_path, input1_layer)\n', (33945, 33986), True, 'import geofileops as gfo\n'), ((35212, 35256), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input1_path', 'input1_layer'], {}), '(input1_path, input1_layer)\n', (35229, 35256), True, 'import geofileops as gfo\n'), ((35281, 35325), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input2_path', 'input2_layer'], {}), '(input2_path, input2_layer)\n', (35298, 35325), True, 'import geofileops as gfo\n'), ((43169, 43213), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input1_path', 'input1_layer'], {}), '(input1_path, input1_layer)\n', (43186, 43213), True, 'import geofileops as gfo\n'), ((44647, 44674), 're.split', 're.split', (['"""([ =()])"""', 'query'], {}), "('([ =()])', query)\n", (44655, 44674), False, 'import re\n'), ((49356, 49400), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input1_path', 'input1_layer'], {}), '(input1_path, input1_layer)\n', (49373, 49400), True, 'import geofileops as gfo\n'), ((52203, 52247), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input1_path', 'input1_layer'], {}), '(input1_path, input1_layer)\n', (52220, 52247), True, 'import geofileops as gfo\n'), ((66394, 66408), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (66406, 66408), False, 'from datetime import datetime\n'), ((66801, 66845), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input1_path', 'input1_layer'], {}), '(input1_path, input1_layer)\n', (66818, 66845), True, 'import geofileops as gfo\n'), ((66850, 66894), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input2_path', 'input2_layer'], {}), '(input2_path, input2_layer)\n', (66867, 66894), True, 'import geofileops as gfo\n'), ((67041, 67068), 'geofileops.remove', 'gfo.remove', (['tmp_output_path'], {}), '(tmp_output_path)\n', (67051, 67068), True, 'import geofileops as gfo\n'), ((77910, 77954), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input1_path', 'input1_layer'], {}), '(input1_path, input1_layer)\n', (77927, 77954), True, 'import geofileops as gfo\n'), ((82192, 82260), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['returnvalue.input1_path', 'returnvalue.input1_layer'], {}), '(returnvalue.input1_path, returnvalue.input1_layer)\n', (82209, 82260), True, 'import geofileops as gfo\n'), ((89796, 89810), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (89808, 89810), False, 'from datetime import datetime\n'), ((90288, 90330), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input_path', 'input_layer'], {}), '(input_path, input_layer)\n', (90305, 90330), True, 'import geofileops as gfo\n'), ((7047, 7077), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['output_path'], {}), '(output_path)\n', (7064, 7077), True, 'import geofileops as gfo\n'), ((13119, 13149), 'geofileops.get_only_layer', 'gfo.get_only_layer', (['input_path'], {}), '(input_path)\n', (13137, 13149), True, 'import geofileops as gfo\n'), ((13202, 13236), 'geofileops.get_default_layer', 'gfo.get_default_layer', (['output_path'], {}), '(output_path)\n', (13223, 13236), True, 'import geofileops as gfo\n'), ((19593, 19615), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (19606, 19615), False, 'import shutil\n'), ((47296, 47327), 'geofileops.get_only_layer', 'gfo.get_only_layer', (['input1_path'], {}), '(input1_path)\n', (47314, 47327), True, 'import geofileops as gfo\n'), ((47380, 47411), 'geofileops.get_only_layer', 'gfo.get_only_layer', (['input2_path'], {}), '(input2_path)\n', (47398, 47411), True, 'import geofileops as gfo\n'), ((48065, 48170), 'geofileops.convert', 'gfo.convert', ([], {'src': 'input1_path', 'src_layer': 'input1_layer', 'dst': 'input1_tmp_path', 'dst_layer': 'input1_tmp_layer'}), '(src=input1_path, src_layer=input1_layer, dst=input1_tmp_path,\n dst_layer=input1_tmp_layer)\n', (48076, 48170), True, 'import geofileops as gfo\n'), ((48363, 48470), 'geofileops.append_to', 'gfo.append_to', ([], {'src': 'input2_path', 'src_layer': 'input2_layer', 'dst': 'input2_tmp_path', 'dst_layer': 'input2_tmp_layer'}), '(src=input2_path, src_layer=input2_layer, dst=input2_tmp_path,\n dst_layer=input2_tmp_layer)\n', (48376, 48470), True, 'import geofileops as gfo\n'), ((57862, 57896), 'geofileops.get_default_layer', 'gfo.get_default_layer', (['output_path'], {}), '(output_path)\n', (57883, 57896), True, 'import geofileops as gfo\n'), ((59883, 60001), 'geofileops.fileops._append_to_nolock', '_append_to_nolock', ([], {'src': 'erase2_output_path', 'dst': 'erase1_output_path', 'src_layer': 'output_layer', 'dst_layer': 'output_layer'}), '(src=erase2_output_path, dst=erase1_output_path, src_layer\n =output_layer, dst_layer=output_layer)\n', (59900, 60001), False, 'from geofileops.fileops import _append_to_nolock\n'), ((60096, 60165), 'geofileops.create_spatial_index', 'gfo.create_spatial_index', ([], {'path': 'erase1_output_path', 'layer': 'output_layer'}), '(path=erase1_output_path, layer=output_layer)\n', (60120, 60165), True, 'import geofileops as gfo\n'), ((60311, 60352), 'geofileops.move', 'gfo.move', (['erase1_output_path', 'output_path'], {}), '(erase1_output_path, output_path)\n', (60319, 60352), True, 'import geofileops as gfo\n'), ((60375, 60397), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (60388, 60397), False, 'import shutil\n'), ((61288, 61322), 'geofileops.get_default_layer', 'gfo.get_default_layer', (['output_path'], {}), '(output_path)\n', (61309, 61322), True, 'import geofileops as gfo\n'), ((62876, 62992), 'geofileops.fileops._append_to_nolock', '_append_to_nolock', ([], {'src': 'erase_output_path', 'dst': 'split_output_path', 'src_layer': 'output_layer', 'dst_layer': 'output_layer'}), '(src=erase_output_path, dst=split_output_path, src_layer=\n output_layer, dst_layer=output_layer)\n', (62893, 62992), False, 'from geofileops.fileops import _append_to_nolock\n'), ((63087, 63155), 'geofileops.create_spatial_index', 'gfo.create_spatial_index', ([], {'path': 'split_output_path', 'layer': 'output_layer'}), '(path=split_output_path, layer=output_layer)\n', (63111, 63155), True, 'import geofileops as gfo\n'), ((63301, 63341), 'geofileops.move', 'gfo.move', (['split_output_path', 'output_path'], {}), '(split_output_path, output_path)\n', (63309, 63341), True, 'import geofileops as gfo\n'), ((63364, 63386), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (63377, 63386), False, 'import shutil\n'), ((66461, 66492), 'geofileops.get_only_layer', 'gfo.get_only_layer', (['input1_path'], {}), '(input1_path)\n', (66479, 66492), True, 'import geofileops as gfo\n'), ((66545, 66576), 'geofileops.get_only_layer', 'gfo.get_only_layer', (['input2_path'], {}), '(input2_path)\n', (66563, 66576), True, 'import geofileops as gfo\n'), ((66629, 66663), 'geofileops.get_default_layer', 'gfo.get_default_layer', (['output_path'], {}), '(output_path)\n', (66650, 66663), True, 'import geofileops as gfo\n'), ((67922, 68007), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['processing_params.input1_path', 'processing_params.input1_layer'], {}), '(processing_params.input1_path, processing_params.input1_layer\n )\n', (67939, 68007), True, 'import geofileops as gfo\n'), ((68370, 68455), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['processing_params.input2_path', 'processing_params.input2_layer'], {}), '(processing_params.input2_path, processing_params.input2_layer\n )\n', (68387, 68455), True, 'import geofileops as gfo\n'), ((76694, 76716), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (76707, 76716), False, 'import shutil\n'), ((80318, 80342), 'geofileops.GeofileType', 'GeofileType', (['input1_path'], {}), '(input1_path)\n', (80329, 80342), False, 'from geofileops import GeofileType, GeometryType, PrimitiveType\n'), ((82963, 83029), 'geofileops.read_file_sql', 'gfo.read_file_sql', ([], {'path': 'returnvalue.input1_path', 'sql_stmt': 'sql_stmt'}), '(path=returnvalue.input1_path, sql_stmt=sql_stmt)\n', (82980, 83029), True, 'import geofileops as gfo\n'), ((83072, 83116), 'pandas.to_numeric', 'pd.to_numeric', (["batch_info_df['min_rowid'][0]"], {}), "(batch_info_df['min_rowid'][0])\n", (83085, 83116), True, 'import pandas as pd\n'), ((83137, 83181), 'pandas.to_numeric', 'pd.to_numeric', (["batch_info_df['max_rowid'][0]"], {}), "(batch_info_df['max_rowid'][0])\n", (83150, 83181), True, 'import pandas as pd\n'), ((90090, 90120), 'geofileops.get_only_layer', 'gfo.get_only_layer', (['input_path'], {}), '(input_path)\n', (90108, 90120), True, 'import geofileops as gfo\n'), ((90173, 90207), 'geofileops.get_default_layer', 'gfo.get_default_layer', (['output_path'], {}), '(output_path)\n', (90194, 90207), True, 'import geofileops as gfo\n'), ((6605, 6628), 'geofileops.GeofileType', 'GeofileType', (['input_path'], {}), '(input_path)\n', (6616, 6628), False, 'from geofileops import GeofileType, GeometryType, PrimitiveType\n'), ((10380, 10422), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input_path', 'input_layer'], {}), '(input_path, input_layer)\n', (10397, 10422), True, 'import geofileops as gfo\n'), ((13492, 13515), 'geofileops.remove', 'gfo.remove', (['output_path'], {}), '(output_path)\n', (13502, 13515), True, 'import geofileops as gfo\n'), ((17691, 17731), 'concurrent.futures.as_completed', 'futures.as_completed', (['future_to_batch_id'], {}), '(future_to_batch_id)\n', (17711, 17731), False, 'from concurrent import futures\n'), ((19282, 19348), 'geofileops.create_spatial_index', 'gfo.create_spatial_index', ([], {'path': 'tmp_output_path', 'layer': 'output_layer'}), '(path=tmp_output_path, layer=output_layer)\n', (19306, 19348), True, 'import geofileops as gfo\n'), ((19427, 19465), 'geofileops.move', 'gfo.move', (['tmp_output_path', 'output_path'], {}), '(tmp_output_path, output_path)\n', (19435, 19465), True, 'import geofileops as gfo\n'), ((47589, 47613), 'geofileops.GeofileType', 'GeofileType', (['input1_path'], {}), '(input1_path)\n', (47600, 47613), False, 'from geofileops import GeofileType, GeometryType, PrimitiveType\n'), ((58743, 58773), 'geofileops.get_layerinfo', 'gfo.get_layerinfo', (['input2_path'], {}), '(input2_path)\n', (58760, 58773), True, 'import geofileops as gfo\n'), ((60279, 60302), 'geofileops.remove', 'gfo.remove', (['output_path'], {}), '(output_path)\n', (60289, 60302), True, 'import geofileops as gfo\n'), ((63269, 63292), 'geofileops.remove', 'gfo.remove', (['output_path'], {}), '(output_path)\n', (63279, 63292), True, 'import geofileops as gfo\n'), ((66214, 66237), 'geofileops.remove', 'gfo.remove', (['output_path'], {}), '(output_path)\n', (66224, 66237), True, 'import geofileops as gfo\n'), ((74023, 74063), 'concurrent.futures.as_completed', 'futures.as_completed', (['future_to_batch_id'], {}), '(future_to_batch_id)\n', (74043, 74063), False, 'from concurrent import futures\n'), ((76599, 76622), 'geofileops.remove', 'gfo.remove', (['output_path'], {}), '(output_path)\n', (76609, 76622), True, 'import geofileops as gfo\n'), ((76631, 76658), 'geofileops.remove', 'gfo.remove', (['tmp_output_path'], {}), '(tmp_output_path)\n', (76641, 76658), True, 'import geofileops as gfo\n'), ((78622, 78649), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (78647, 78649), False, 'import multiprocessing\n'), ((79854, 79906), 'math.ceil', 'math.ceil', (['(input1_layerinfo.featurecount / batchsize)'], {}), '(input1_layerinfo.featurecount / batchsize)\n', (79863, 79906), False, 'import math\n'), ((80446, 80470), 'geofileops.GeofileType', 'GeofileType', (['input2_path'], {}), '(input2_path)\n', (80457, 80470), False, 'from geofileops import GeofileType, GeometryType, PrimitiveType\n'), ((80928, 81050), 'geofileops.convert', 'gfo.convert', ([], {'src': 'input1_path', 'src_layer': 'input1_layer', 'dst': 'returnvalue.input1_path', 'dst_layer': 'returnvalue.input1_layer'}), '(src=input1_path, src_layer=input1_layer, dst=returnvalue.\n input1_path, dst_layer=returnvalue.input1_layer)\n', (80939, 81050), True, 'import geofileops as gfo\n'), ((84281, 84369), 'pandas.DataFrame', 'pd.DataFrame', (['batch_info_list'], {'columns': "['id', 'nb_rows', 'start_rowid', 'end_rowid']"}), "(batch_info_list, columns=['id', 'nb_rows', 'start_rowid',\n 'end_rowid'])\n", (84293, 84369), True, 'import pandas as pd\n'), ((85146, 85212), 'geofileops.read_file_sql', 'gfo.read_file_sql', ([], {'path': 'returnvalue.input1_path', 'sql_stmt': 'sql_stmt'}), '(path=returnvalue.input1_path, sql_stmt=sql_stmt)\n', (85163, 85212), True, 'import geofileops as gfo\n'), ((89991, 90014), 'geofileops.remove', 'gfo.remove', (['output_path'], {}), '(output_path)\n', (90001, 90014), True, 'import geofileops as gfo\n'), ((58963, 59086), 'geofileops.add_column', 'gfo.add_column', (['erase1_output_path'], {'name': 'f"""{input2_columns_prefix}{column}"""', 'type': 'input2_info.columns[column].gdal_type'}), "(erase1_output_path, name=f'{input2_columns_prefix}{column}',\n type=input2_info.columns[column].gdal_type)\n", (58977, 59086), True, 'import geofileops as gfo\n'), ((76166, 76232), 'geofileops.create_spatial_index', 'gfo.create_spatial_index', ([], {'path': 'tmp_output_path', 'layer': 'output_layer'}), '(path=tmp_output_path, layer=output_layer)\n', (76190, 76232), True, 'import geofileops as gfo\n'), ((76366, 76404), 'geofileops.move', 'gfo.move', (['tmp_output_path', 'output_path'], {}), '(tmp_output_path, output_path)\n', (76374, 76404), True, 'import geofileops as gfo\n'), ((81589, 81711), 'geofileops.convert', 'gfo.convert', ([], {'src': 'input2_path', 'src_layer': 'input2_layer', 'dst': 'returnvalue.input2_path', 'dst_layer': 'returnvalue.input2_layer'}), '(src=input2_path, src_layer=input2_layer, dst=returnvalue.\n input2_path, dst_layer=returnvalue.input2_layer)\n', (81600, 81711), True, 'import geofileops as gfo\n'), ((18412, 18532), 'geofileops.append_to', 'gfo.append_to', ([], {'src': 'tmp_partial_output_path', 'dst': 'tmp_output_path', 'dst_layer': 'output_layer', 'create_spatial_index': '(False)'}), '(src=tmp_partial_output_path, dst=tmp_output_path, dst_layer=\n output_layer, create_spatial_index=False)\n', (18425, 18532), True, 'import geofileops as gfo\n'), ((18667, 18702), 'geofileops.remove', 'gfo.remove', (['tmp_partial_output_path'], {}), '(tmp_partial_output_path)\n', (18677, 18702), True, 'import geofileops as gfo\n'), ((75331, 75383), 'geofileops.remove', 'gfo.remove', (['tmp_partial_output_path'], {'missing_ok': '(True)'}), '(tmp_partial_output_path, missing_ok=True)\n', (75341, 75383), True, 'import geofileops as gfo\n'), ((95126, 95140), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (95138, 95140), False, 'from datetime import datetime\n'), ((19662, 19676), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19674, 19676), False, 'from datetime import datetime\n'), ((74785, 74989), 'geofileops.fileops._append_to_nolock', 'fileops._append_to_nolock', ([], {'src': 'tmp_partial_output_path', 'dst': 'tmp_output_path', 'explodecollections': 'explodecollections', 'force_output_geometrytype': 'force_output_geometrytype', 'create_spatial_index': '(False)'}), '(src=tmp_partial_output_path, dst=tmp_output_path,\n explodecollections=explodecollections, force_output_geometrytype=\n force_output_geometrytype, create_spatial_index=False)\n', (74810, 74989), False, 'from geofileops import fileops\n'), ((76539, 76553), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (76551, 76553), False, 'from datetime import datetime\n'), ((45787, 45821), 're.fullmatch', 're.fullmatch', (['"""^[FT012*]+$"""', 'token'], {}), "('^[FT012*]+$', token)\n", (45799, 45821), False, 'import re\n')]
|
from urllib.parse import urlparse
from flask import Blueprint, render_template, request, redirect, url_for
from werkzeug.exceptions import NotFound, Forbidden
from .utils import srcf_db_sess as sess
from .utils import parse_domain_name, create_job_maybe_email_and_redirect, find_member
from . import utils, inspect_services
from srcf.controllib import jobs
from srcf.database import Domain
from srcf import domains
import re
import string
bp = Blueprint("member", __name__)
@bp.route('/member')
def home():
crsid, mem = find_member(allow_inactive=True)
if not mem.user:
return redirect(url_for('member.reactivate'))
inspect_services.lookup_all(mem)
return render_template("member/home.html", member=mem)
@bp.route("/reactivate", methods=["GET", "POST"])
def reactivate():
crsid, mem = find_member(allow_inactive=True)
if mem.user:
raise NotFound
email = None
error = None
if request.method == "POST":
email = request.form.get("email", "").strip()
error = utils.validate_member_email(crsid, email)
if request.method == "POST" and not error:
return create_job_maybe_email_and_redirect(
jobs.Reactivate, member=mem, email=email)
else:
return render_template("member/reactivate.html", member=mem, email=email, error=error)
@bp.route("/member/email", methods=["GET", "POST"])
def update_email_address():
crsid, mem = find_member()
email = mem.email
error = None
if request.method == "POST":
email = request.form.get("email", "").strip()
if mem.email == email:
error = "That's the address we have already."
else:
error = utils.validate_member_email(crsid, email)
if request.method == "POST" and not error:
return create_job_maybe_email_and_redirect(
jobs.UpdateEmailAddress, member=mem, email=email)
else:
return render_template("member/update_email_address.html", member=mem, email=email, error=error)
@bp.route("/member/srcf-email", methods=["GET", "POST"])
def update_email_handler():
crsid, mem = find_member()
mail_handler = mem.mail_handler
if request.method == "POST":
mail_handler = request.form.get("mail_handler", "").strip()
if mem.mail_handler == mail_handler:
# No change requested
return redirect(url_for("member.home"))
if request.method == "POST":
if not request.form.get("confirm", ""):
return render_template("member/update_email_handler_confirm.html", member=mem,
old_mail_handler=mem.mail_handler, mail_handler=mail_handler)
else:
return create_job_maybe_email_and_redirect(
jobs.UpdateMailHandler, member=mem, mail_handler=mail_handler)
else:
return render_template("member/update_email_handler.html", member=mem, mail_handler=mail_handler)
@bp.route("/member/mailinglist", methods=["GET", "POST"])
def create_mailing_list():
crsid, mem = find_member()
listname = ""
error = None
if request.method == "POST":
listname = request.form.get("listname", "").strip()
if not listname:
error = "Please enter a list name."
elif re.search(r"[^a-z0-9_-]", listname):
error = "List names can only contain letters, numbers, hyphens and underscores."
else:
lists = inspect_services.lookup_mailinglists(crsid)
if "{}-{}".format(crsid, listname) in lists:
error = "This mailing list already exists."
if request.method == "POST" and not error:
return create_job_maybe_email_and_redirect(
jobs.CreateUserMailingList, member=mem,
listname=listname)
else:
return render_template("member/create_mailing_list.html", member=mem, listname=listname, error=error)
@bp.route("/member/mailinglist/<listname>/password", methods=["GET", "POST"])
def reset_mailing_list_password(listname):
crsid, mem = find_member()
lists = inspect_services.lookup_mailinglists(crsid)
if listname not in lists:
raise NotFound
if request.method == "POST":
return create_job_maybe_email_and_redirect(
jobs.ResetUserMailingListPassword, member=mem, listname=listname)
else:
return render_template("member/reset_mailing_list_password.html", member=mem, listname=listname)
@bp.route("/member/srcf/password", methods=["GET", "POST"], defaults={"type": "srcf"})
@bp.route("/member/mysql/password", methods=["GET", "POST"], defaults={"type": "mysql"})
@bp.route("/member/postgres/password", methods=["GET", "POST"], defaults={"type": "postgres"})
def reset_password(type):
crsid, mem = find_member()
if request.method == "POST":
cls = {"mysql": jobs.ResetMySQLUserPassword,
"postgres": jobs.ResetPostgresUserPassword,
"srcf": jobs.ResetUserPassword}[type]
return create_job_maybe_email_and_redirect(cls, member=mem)
else:
formatted_name = {"mysql": "MySQL",
"postgres": "PostgreSQL",
"srcf": "SRCF"}[type]
web_interface = {"mysql": "phpMyAdmin",
"postgres": "phpPgAdmin",
"srcf": None}[type]
if type == "srcf":
affects = "password-based access to the shell service and SFTP"
else:
affects = "access to " + web_interface + ", as well as any scripts that access databases using your account"
return render_template("member/reset_password.html", member=mem, type=type, name=formatted_name, affects=affects)
@bp.route("/member/mysql/createuser", methods=["GET", "POST"], defaults={"type": "mysql"})
@bp.route("/member/postgres/createuser", methods=["GET", "POST"], defaults={"type": "postgres"})
def create_database_account(type):
crsid, mem = find_member()
if request.method == "POST":
cls = {"mysql": jobs.ResetMySQLUserPassword,
"postgres": jobs.ResetPostgresUserPassword}[type]
return create_job_maybe_email_and_redirect(cls, member=mem)
else:
formatted_name = {"mysql": "MySQL",
"postgres": "PostgreSQL"}[type]
return render_template("member/create_database_account.html", member=mem, type=type, name=formatted_name)
@bp.route("/member/mysql/create", methods=["GET", "POST"], defaults={"type": "mysql"})
@bp.route("/member/postgres/create", methods=["GET", "POST"], defaults={"type": "postgres"})
def create_database(type):
crsid, mem = find_member()
if request.method == "POST":
cls = {"mysql": jobs.CreateMySQLUserDatabase,
"postgres": jobs.CreatePostgresUserDatabase}[type]
return create_job_maybe_email_and_redirect(cls, member=mem)
else:
formatted_name = {"mysql": "MySQL",
"postgres": "PostgreSQL"}[type]
inspect = {"mysql": inspect_services.lookup_mysqluser,
"postgres": inspect_services.lookup_pguser}[type]
has_user = inspect(mem.crsid)
return render_template("member/create_database.html", member=mem, type=type, name=formatted_name, user=has_user)
@bp.route("/member/domains/add", methods=["GET", "POST"])
def add_vhost():
crsid, mem = find_member()
domain = ""
root = ""
errors = {}
if request.method == "POST":
domain = request.form.get("domain", "").strip()
root = request.form.get("root", "").strip()
if domain:
parsed = parse_domain_name(domain)
if domain != parsed:
domain = parsed
errors["domain"] = "We've corrected your input to just the domain name, submit again once you've checked it's correct."
elif "." not in domain:
errors["domain"] = "Please enter a fully-qualified domain name."
elif domain.endswith("." + crsid + ".user.srcf.net"):
pass
elif domain.endswith(".user.srcf.net") or domain.endswith(".soc.srcf.net"):
errors["domain"] = "SRCF domains can't be registered here."
elif sess.query(Domain).filter(Domain.domain == domain).count():
errors["domain"] = "This domain is already registered."
else:
errors["domain"] = "Please enter a domain or subdomain."
if request.form.get("edit") or errors:
return render_template("member/add_vhost.html", member=mem, domain=domain, root=root, errors=errors)
elif not request.form.get("confirm"):
valid = {}
prefixed = "www.{}".format(domain)
for d in (domain, prefixed):
valid[d] = domains.verify(d)
good = all(v == (True, True) for v in valid.values())
return render_template("member/add_vhost_test.html", member=mem, domain=domain, root=root, valid=valid, good=good)
else:
return create_job_maybe_email_and_redirect(
jobs.AddUserVhost, member=mem,
domain=domain, root=root)
else:
return render_template("member/add_vhost.html", member=mem, domain=domain, root=root, errors=errors)
@bp.route("/member/domains/<domain>/changedocroot", methods=["GET", "POST"])
def change_vhost_docroot(domain):
crsid, mem = find_member()
errors = {}
try:
record = sess.query(Domain).filter(Domain.domain == domain, Domain.owner == crsid)[0]
except IndexError:
raise NotFound
root = record.root.replace("public_html/", "") if record.root else ""
if request.method == "POST":
root = request.form.get("root", "").strip()
if any([ch in root for ch in string.whitespace + "\\" + "\"" + "\'"]) or ".." in root:
errors["root"] = "This document root is invalid."
try:
domain = parse_domain_name(domain)
except ValueError as e:
errors["domain"] = e.args[0]
if request.method == "POST" and not errors:
return create_job_maybe_email_and_redirect(
jobs.ChangeUserVhostDocroot, member=mem,
domain=domain, root=root)
else:
return render_template("member/change_vhost_docroot.html", member=mem, domain=domain, root=root, errors=errors)
@bp.route("/member/domains/<domain>/remove", methods=["GET", "POST"])
def remove_vhost(domain):
crsid, mem = find_member()
try:
record = sess.query(Domain).filter(Domain.domain == domain)[0]
except IndexError:
raise NotFound
if not record.owner == crsid:
raise Forbidden
if request.method == "POST":
return create_job_maybe_email_and_redirect(
jobs.RemoveUserVhost, member=mem,
domain=domain)
else:
return render_template("member/remove_vhost.html", member=mem, domain=domain)
|
[
"flask.Blueprint",
"flask.request.form.get",
"flask.url_for",
"flask.render_template",
"srcf.domains.verify",
"re.search"
] |
[((449, 478), 'flask.Blueprint', 'Blueprint', (['"""member"""', '__name__'], {}), "('member', __name__)\n", (458, 478), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((689, 736), 'flask.render_template', 'render_template', (['"""member/home.html"""'], {'member': 'mem'}), "('member/home.html', member=mem)\n", (704, 736), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((1263, 1342), 'flask.render_template', 'render_template', (['"""member/reactivate.html"""'], {'member': 'mem', 'email': 'email', 'error': 'error'}), "('member/reactivate.html', member=mem, email=email, error=error)\n", (1278, 1342), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((1942, 2035), 'flask.render_template', 'render_template', (['"""member/update_email_address.html"""'], {'member': 'mem', 'email': 'email', 'error': 'error'}), "('member/update_email_address.html', member=mem, email=email,\n error=error)\n", (1957, 2035), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((2859, 2953), 'flask.render_template', 'render_template', (['"""member/update_email_handler.html"""'], {'member': 'mem', 'mail_handler': 'mail_handler'}), "('member/update_email_handler.html', member=mem,\n mail_handler=mail_handler)\n", (2874, 2953), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((3831, 3930), 'flask.render_template', 'render_template', (['"""member/create_mailing_list.html"""'], {'member': 'mem', 'listname': 'listname', 'error': 'error'}), "('member/create_mailing_list.html', member=mem, listname=\n listname, error=error)\n", (3846, 3930), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((4386, 4479), 'flask.render_template', 'render_template', (['"""member/reset_mailing_list_password.html"""'], {'member': 'mem', 'listname': 'listname'}), "('member/reset_mailing_list_password.html', member=mem,\n listname=listname)\n", (4401, 4479), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((5625, 5736), 'flask.render_template', 'render_template', (['"""member/reset_password.html"""'], {'member': 'mem', 'type': 'type', 'name': 'formatted_name', 'affects': 'affects'}), "('member/reset_password.html', member=mem, type=type, name=\n formatted_name, affects=affects)\n", (5640, 5736), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((6335, 6438), 'flask.render_template', 'render_template', (['"""member/create_database_account.html"""'], {'member': 'mem', 'type': 'type', 'name': 'formatted_name'}), "('member/create_database_account.html', member=mem, type=\n type, name=formatted_name)\n", (6350, 6438), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((7195, 7305), 'flask.render_template', 'render_template', (['"""member/create_database.html"""'], {'member': 'mem', 'type': 'type', 'name': 'formatted_name', 'user': 'has_user'}), "('member/create_database.html', member=mem, type=type, name=\n formatted_name, user=has_user)\n", (7210, 7305), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((9222, 9320), 'flask.render_template', 'render_template', (['"""member/add_vhost.html"""'], {'member': 'mem', 'domain': 'domain', 'root': 'root', 'errors': 'errors'}), "('member/add_vhost.html', member=mem, domain=domain, root=\n root, errors=errors)\n", (9237, 9320), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((10310, 10419), 'flask.render_template', 'render_template', (['"""member/change_vhost_docroot.html"""'], {'member': 'mem', 'domain': 'domain', 'root': 'root', 'errors': 'errors'}), "('member/change_vhost_docroot.html', member=mem, domain=\n domain, root=root, errors=errors)\n", (10325, 10419), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((10928, 10998), 'flask.render_template', 'render_template', (['"""member/remove_vhost.html"""'], {'member': 'mem', 'domain': 'domain'}), "('member/remove_vhost.html', member=mem, domain=domain)\n", (10943, 10998), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((609, 637), 'flask.url_for', 'url_for', (['"""member.reactivate"""'], {}), "('member.reactivate')\n", (616, 637), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((2467, 2498), 'flask.request.form.get', 'request.form.get', (['"""confirm"""', '""""""'], {}), "('confirm', '')\n", (2483, 2498), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((2519, 2656), 'flask.render_template', 'render_template', (['"""member/update_email_handler_confirm.html"""'], {'member': 'mem', 'old_mail_handler': 'mem.mail_handler', 'mail_handler': 'mail_handler'}), "('member/update_email_handler_confirm.html', member=mem,\n old_mail_handler=mem.mail_handler, mail_handler=mail_handler)\n", (2534, 2656), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((3282, 3316), 're.search', 're.search', (['"""[^a-z0-9_-]"""', 'listname'], {}), "('[^a-z0-9_-]', listname)\n", (3291, 3316), False, 'import re\n'), ((8477, 8501), 'flask.request.form.get', 'request.form.get', (['"""edit"""'], {}), "('edit')\n", (8493, 8501), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((8532, 8630), 'flask.render_template', 'render_template', (['"""member/add_vhost.html"""'], {'member': 'mem', 'domain': 'domain', 'root': 'root', 'errors': 'errors'}), "('member/add_vhost.html', member=mem, domain=domain, root=\n root, errors=errors)\n", (8547, 8630), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((980, 1009), 'flask.request.form.get', 'request.form.get', (['"""email"""', '""""""'], {}), "('email', '')\n", (996, 1009), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((1544, 1573), 'flask.request.form.get', 'request.form.get', (['"""email"""', '""""""'], {}), "('email', '')\n", (1560, 1573), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((2242, 2278), 'flask.request.form.get', 'request.form.get', (['"""mail_handler"""', '""""""'], {}), "('mail_handler', '')\n", (2258, 2278), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((2394, 2416), 'flask.url_for', 'url_for', (['"""member.home"""'], {}), "('member.home')\n", (2401, 2416), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((3155, 3187), 'flask.request.form.get', 'request.form.get', (['"""listname"""', '""""""'], {}), "('listname', '')\n", (3171, 3187), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((7507, 7537), 'flask.request.form.get', 'request.form.get', (['"""domain"""', '""""""'], {}), "('domain', '')\n", (7523, 7537), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((7561, 7589), 'flask.request.form.get', 'request.form.get', (['"""root"""', '""""""'], {}), "('root', '')\n", (7577, 7589), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((8643, 8670), 'flask.request.form.get', 'request.form.get', (['"""confirm"""'], {}), "('confirm')\n", (8659, 8670), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((8913, 9024), 'flask.render_template', 'render_template', (['"""member/add_vhost_test.html"""'], {'member': 'mem', 'domain': 'domain', 'root': 'root', 'valid': 'valid', 'good': 'good'}), "('member/add_vhost_test.html', member=mem, domain=domain,\n root=root, valid=valid, good=good)\n", (8928, 9024), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((9750, 9778), 'flask.request.form.get', 'request.form.get', (['"""root"""', '""""""'], {}), "('root', '')\n", (9766, 9778), False, 'from flask import Blueprint, render_template, request, redirect, url_for\n'), ((8810, 8827), 'srcf.domains.verify', 'domains.verify', (['d'], {}), '(d)\n', (8824, 8827), False, 'from srcf import domains\n')]
|
import logging
from .vid_eval import do_vid_evaluation
def vid_evaluation(dataset, predictions, output_folder, box_only, **_):
logger = logging.getLogger("mega_core.inference")
logger.info("performing vid evaluation, ignored iou_types.")
return do_vid_evaluation(
dataset=dataset,
predictions=predictions,
output_folder=output_folder,
box_only=box_only,
logger=logger,
)
|
[
"logging.getLogger"
] |
[((143, 183), 'logging.getLogger', 'logging.getLogger', (['"""mega_core.inference"""'], {}), "('mega_core.inference')\n", (160, 183), False, 'import logging\n')]
|
# Step 0: Add NRPy's directory to the path
# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory
import os,sys
nrpy_dir_path = os.path.join("..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
nrpy_dir_path = os.path.join("../..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
# Step 0.a: Import the NRPy+ core modules and set the reference metric to Cartesian
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import NRPy_param_funcs as par # NRPy+: Parameter interface
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Common_Functions as gfcf # Some useful functions for GiRaFFE initial data.
import reference_metric as rfm # NRPy+: Reference metric support
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric()
# Step 1a: Set commonly used parameters.
thismodule = __name__
import Min_Max_and_Piecewise_Expressions as noif
bound = sp.Rational(1,10)
def Ax_FW(x,y,z, **params):
return sp.sympify(0)
def Ay_FW(x,y,z, **params):
return sp.sympify(0)
def Az_FW(x,y,z, **params):
# A_z = y+ (-x-0.0075) if x <= -0.1
# (0.75x^2 - 0.85x) if -0.1 < x <= 0.1
# (-0.7x-0.0075) if x > 0.1
Azleft = y - x - sp.Rational(75,10000)
Azcenter = y + sp.Rational(75,100)*x*x - sp.Rational(85,100)*x
Azright = y - sp.Rational(7,10)*x - sp.Rational(75,10000)
out = noif.coord_leq_bound(x,-bound)*Azleft\
+noif.coord_greater_bound(x,-bound)*noif.coord_leq_bound(x,bound)*Azcenter\
+noif.coord_greater_bound(x,bound)*Azright
return out
def ValenciavU_func_FW(**params):
# B^x(0,x) = 1.0
# B^y(0,x) = 1.0 if x <= -0.1
# 1.0-1.5(x+0.1) if -0.1 < x <= 0.1
# 0.7 if x > 0.1
# B^z(0,x) = 0
x = rfm.xx_to_Cart[0]
y = rfm.xx_to_Cart[1]
Byleft = sp.sympify(1)
Bycenter = sp.sympify(1) - sp.Rational(15,10)*(x+sp.Rational(1,10))
Byright = sp.Rational(7,10)
BU = ixp.zerorank1()
BU[0] = sp.sympify(1)
BU[1] = noif.coord_leq_bound(x,-bound)*Byleft\
+noif.coord_greater_bound(x,-bound)*noif.coord_leq_bound(x,bound)*Bycenter\
+noif.coord_greater_bound(x,bound)*Byright
BU[2] = 0
# E^x(0,x) = 0.0 , E^y(x) = 0.0 , E^z(x) = -B^y(0,x)
EU = ixp.zerorank1()
EU[0] = sp.sympify(0)
EU[1] = sp.sympify(0)
EU[2] = -BU[1]
# In flat space, ED and EU are identical, so we can still use this function.
return gfcf.compute_ValenciavU_from_ED_and_BU(EU, BU)
|
[
"sys.path.append",
"NRPy_param_funcs.set_parval_from_str",
"sympy.Rational",
"indexedexp.zerorank1",
"sympy.sympify",
"Min_Max_and_Piecewise_Expressions.coord_greater_bound",
"GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Common_Functions.compute_ValenciavU_from_ED_and_BU",
"os.path.join",
"Min_Max_and_Piecewise_Expressions.coord_leq_bound",
"reference_metric.reference_metric"
] |
[((154, 172), 'os.path.join', 'os.path.join', (['""".."""'], {}), "('..')\n", (166, 172), False, 'import os, sys\n'), ((258, 279), 'os.path.join', 'os.path.join', (['"""../.."""'], {}), "('../..')\n", (270, 279), False, 'import os, sys\n'), ((892, 961), 'NRPy_param_funcs.set_parval_from_str', 'par.set_parval_from_str', (['"""reference_metric::CoordSystem"""', '"""Cartesian"""'], {}), "('reference_metric::CoordSystem', 'Cartesian')\n", (915, 961), True, 'import NRPy_param_funcs as par\n'), ((961, 983), 'reference_metric.reference_metric', 'rfm.reference_metric', ([], {}), '()\n', (981, 983), True, 'import reference_metric as rfm\n'), ((1106, 1124), 'sympy.Rational', 'sp.Rational', (['(1)', '(10)'], {}), '(1, 10)\n', (1117, 1124), True, 'import sympy as sp\n'), ((211, 241), 'sys.path.append', 'sys.path.append', (['nrpy_dir_path'], {}), '(nrpy_dir_path)\n', (226, 241), False, 'import os, sys\n'), ((318, 348), 'sys.path.append', 'sys.path.append', (['nrpy_dir_path'], {}), '(nrpy_dir_path)\n', (333, 348), False, 'import os, sys\n'), ((1164, 1177), 'sympy.sympify', 'sp.sympify', (['(0)'], {}), '(0)\n', (1174, 1177), True, 'import sympy as sp\n'), ((1218, 1231), 'sympy.sympify', 'sp.sympify', (['(0)'], {}), '(0)\n', (1228, 1231), True, 'import sympy as sp\n'), ((2026, 2039), 'sympy.sympify', 'sp.sympify', (['(1)'], {}), '(1)\n', (2036, 2039), True, 'import sympy as sp\n'), ((2126, 2144), 'sympy.Rational', 'sp.Rational', (['(7)', '(10)'], {}), '(7, 10)\n', (2137, 2144), True, 'import sympy as sp\n'), ((2154, 2169), 'indexedexp.zerorank1', 'ixp.zerorank1', ([], {}), '()\n', (2167, 2169), True, 'import indexedexp as ixp\n'), ((2182, 2195), 'sympy.sympify', 'sp.sympify', (['(1)'], {}), '(1)\n', (2192, 2195), True, 'import sympy as sp\n'), ((2471, 2486), 'indexedexp.zerorank1', 'ixp.zerorank1', ([], {}), '()\n', (2484, 2486), True, 'import indexedexp as ixp\n'), ((2499, 2512), 'sympy.sympify', 'sp.sympify', (['(0)'], {}), '(0)\n', (2509, 2512), True, 'import sympy as sp\n'), ((2525, 2538), 'sympy.sympify', 'sp.sympify', (['(0)'], {}), '(0)\n', (2535, 2538), True, 'import sympy as sp\n'), ((2651, 2697), 'GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Common_Functions.compute_ValenciavU_from_ED_and_BU', 'gfcf.compute_ValenciavU_from_ED_and_BU', (['EU', 'BU'], {}), '(EU, BU)\n', (2689, 2697), True, 'import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Common_Functions as gfcf\n'), ((1415, 1437), 'sympy.Rational', 'sp.Rational', (['(75)', '(10000)'], {}), '(75, 10000)\n', (1426, 1437), True, 'import sympy as sp\n'), ((1544, 1566), 'sympy.Rational', 'sp.Rational', (['(75)', '(10000)'], {}), '(75, 10000)\n', (1555, 1566), True, 'import sympy as sp\n'), ((2055, 2068), 'sympy.sympify', 'sp.sympify', (['(1)'], {}), '(1)\n', (2065, 2068), True, 'import sympy as sp\n'), ((1482, 1502), 'sympy.Rational', 'sp.Rational', (['(85)', '(100)'], {}), '(85, 100)\n', (1493, 1502), True, 'import sympy as sp\n'), ((1711, 1745), 'Min_Max_and_Piecewise_Expressions.coord_greater_bound', 'noif.coord_greater_bound', (['x', 'bound'], {}), '(x, bound)\n', (1735, 1745), True, 'import Min_Max_and_Piecewise_Expressions as noif\n'), ((2071, 2090), 'sympy.Rational', 'sp.Rational', (['(15)', '(10)'], {}), '(15, 10)\n', (2082, 2090), True, 'import sympy as sp\n'), ((2348, 2382), 'Min_Max_and_Piecewise_Expressions.coord_greater_bound', 'noif.coord_greater_bound', (['x', 'bound'], {}), '(x, bound)\n', (2372, 2382), True, 'import Min_Max_and_Piecewise_Expressions as noif\n'), ((1522, 1540), 'sympy.Rational', 'sp.Rational', (['(7)', '(10)'], {}), '(7, 10)\n', (1533, 1540), True, 'import sympy as sp\n'), ((1577, 1608), 'Min_Max_and_Piecewise_Expressions.coord_leq_bound', 'noif.coord_leq_bound', (['x', '(-bound)'], {}), '(x, -bound)\n', (1597, 1608), True, 'import Min_Max_and_Piecewise_Expressions as noif\n'), ((2093, 2111), 'sympy.Rational', 'sp.Rational', (['(1)', '(10)'], {}), '(1, 10)\n', (2104, 2111), True, 'import sympy as sp\n'), ((2208, 2239), 'Min_Max_and_Piecewise_Expressions.coord_leq_bound', 'noif.coord_leq_bound', (['x', '(-bound)'], {}), '(x, -bound)\n', (2228, 2239), True, 'import Min_Max_and_Piecewise_Expressions as noif\n'), ((1456, 1476), 'sympy.Rational', 'sp.Rational', (['(75)', '(100)'], {}), '(75, 100)\n', (1467, 1476), True, 'import sympy as sp\n'), ((1626, 1661), 'Min_Max_and_Piecewise_Expressions.coord_greater_bound', 'noif.coord_greater_bound', (['x', '(-bound)'], {}), '(x, -bound)\n', (1650, 1661), True, 'import Min_Max_and_Piecewise_Expressions as noif\n'), ((1661, 1691), 'Min_Max_and_Piecewise_Expressions.coord_leq_bound', 'noif.coord_leq_bound', (['x', 'bound'], {}), '(x, bound)\n', (1681, 1691), True, 'import Min_Max_and_Piecewise_Expressions as noif\n'), ((2260, 2295), 'Min_Max_and_Piecewise_Expressions.coord_greater_bound', 'noif.coord_greater_bound', (['x', '(-bound)'], {}), '(x, -bound)\n', (2284, 2295), True, 'import Min_Max_and_Piecewise_Expressions as noif\n'), ((2295, 2325), 'Min_Max_and_Piecewise_Expressions.coord_leq_bound', 'noif.coord_leq_bound', (['x', 'bound'], {}), '(x, bound)\n', (2315, 2325), True, 'import Min_Max_and_Piecewise_Expressions as noif\n')]
|
# Generated by Django 2.2 on 2019-04-15 23:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0006_auto_20190415_1639'),
]
operations = [
migrations.AlterModelTable(
name='post',
table='posts',
),
]
|
[
"django.db.migrations.AlterModelTable"
] |
[((222, 276), 'django.db.migrations.AlterModelTable', 'migrations.AlterModelTable', ([], {'name': '"""post"""', 'table': '"""posts"""'}), "(name='post', table='posts')\n", (248, 276), False, 'from django.db import migrations\n')]
|
import re
def audit_link(linkText, uri):
"""Generate link "markdown" from URI."""
return '{{{}|{}}}'.format(linkText, uri)
def path_to_text(path):
"""Convert object path to the text portion."""
accession = re.match(r'\/.*\/(.*)\/', path)
return accession.group(1) if accession else None
def space_in_words(objects_string):
"""Insert a space between objects that have more than one
capital letter eg. AntibodyChar --> Antibody Char"""
add_space = re.sub(r"(\w)([A-Z])", r"\1 \2", objects_string)
return add_space
|
[
"re.sub",
"re.match"
] |
[((224, 257), 're.match', 're.match', (['"""\\\\/.*\\\\/(.*)\\\\/"""', 'path'], {}), "('\\\\/.*\\\\/(.*)\\\\/', path)\n", (232, 257), False, 'import re\n'), ((481, 530), 're.sub', 're.sub', (['"""(\\\\w)([A-Z])"""', '"""\\\\1 \\\\2"""', 'objects_string'], {}), "('(\\\\w)([A-Z])', '\\\\1 \\\\2', objects_string)\n", (487, 530), False, 'import re\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2020/9/10-07:22
# @Author : TuringEmmy
# @Email : <EMAIL>
# @WeChat : csy_lgy
# @File : sklearn_nb.py
# @Project : Sep-Dragon
# *************************************************
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
X, y = load_iris(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)
gnb = GaussianNB()
y_pred = gnb.fit(X_train, y_train).predict(X_test)
print("Number of mislabeled points out of a total %d points : %d"
% (X_test.shape[0], (y_test != y_pred).sum()))
|
[
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"sklearn.naive_bayes.GaussianNB"
] |
[((371, 397), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (380, 397), False, 'from sklearn.datasets import load_iris\n'), ((433, 486), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.5)', 'random_state': '(0)'}), '(X, y, test_size=0.5, random_state=0)\n', (449, 486), False, 'from sklearn.model_selection import train_test_split\n'), ((493, 505), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (503, 505), False, 'from sklearn.naive_bayes import GaussianNB\n')]
|
import sqlite3
conn = sqlite3.connect('customer.db')
c = conn.cursor()
customer_list = [
('Subhadeep', 'Banerjee', '<EMAIL>'),
('Richard', 'Chakraborty', '<EMAIL>'),
('Soumya', 'Mitra', '<EMAIL>'),
]
c.executemany("INSERT INTO customers VALUES (?, ?, ?)", customer_list)
conn.commit()
conn.close()
|
[
"sqlite3.connect"
] |
[((23, 53), 'sqlite3.connect', 'sqlite3.connect', (['"""customer.db"""'], {}), "('customer.db')\n", (38, 53), False, 'import sqlite3\n')]
|
from os.path import dirname, join
from setuptools import setup, find_packages
with open(join(dirname(__file__), 'reminder/VERSION'), 'rb') as f:
version = f.read().decode('ascii').strip()
setup(
name='reminder',
version=version,
description='A Service to keep track of vehicle service date',
long_description=open('README.md').read(),
author='<NAME>',
author_email="<EMAIL>",
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license='Apache 2.0',
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': ['reminder=reminder.launch.launch:execute']
},
classifiers=[
'Framework :: Reminder',
'Development Status :: 1 - Development',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Ubuntu',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
],
)
|
[
"os.path.dirname",
"setuptools.find_packages"
] |
[((503, 546), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "('tests', 'tests.*')"}), "(exclude=('tests', 'tests.*'))\n", (516, 546), False, 'from setuptools import setup, find_packages\n'), ((94, 111), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (101, 111), False, 'from os.path import dirname, join\n')]
|
# Copyright © 2021 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Unit Tests for the Registrars Order filing."""
import copy
import random
from legal_api.models import Filing
from registry_schemas.example_data import REGISTRARS_ORDER_FILING_TEMPLATE
from entity_filer.worker import process_filing
from tests.unit import create_business, create_filing
async def test_worker_registrars_order(app, session):
"""Assert that the registrars order object is correctly populated to model objects."""
identifier = 'BC1234567'
business = create_business(identifier, legal_type='BC')
filing = copy.deepcopy(REGISTRARS_ORDER_FILING_TEMPLATE)
filing['filing']['business']['identifier'] = identifier
payment_id = str(random.SystemRandom().getrandbits(0x58))
filing_id = (create_filing(payment_id, filing, business_id=business.id)).id
filing_msg = {'filing': {'id': filing_id}}
# Test
await process_filing(filing_msg, app)
# Check outcome
final_filing = Filing.find_by_id(filing_id)
assert filing['filing']['registrarsOrder']['fileNumber'] == final_filing.court_order_file_number
assert filing['filing']['registrarsOrder']['effectOfOrder'] == final_filing.court_order_effect_of_order
assert filing['filing']['registrarsOrder']['orderDetails'] == final_filing.comments.first().comment
|
[
"copy.deepcopy",
"tests.unit.create_business",
"random.SystemRandom",
"entity_filer.worker.process_filing",
"legal_api.models.Filing.find_by_id",
"tests.unit.create_filing"
] |
[((1079, 1123), 'tests.unit.create_business', 'create_business', (['identifier'], {'legal_type': '"""BC"""'}), "(identifier, legal_type='BC')\n", (1094, 1123), False, 'from tests.unit import create_business, create_filing\n'), ((1138, 1185), 'copy.deepcopy', 'copy.deepcopy', (['REGISTRARS_ORDER_FILING_TEMPLATE'], {}), '(REGISTRARS_ORDER_FILING_TEMPLATE)\n', (1151, 1185), False, 'import copy\n'), ((1531, 1559), 'legal_api.models.Filing.find_by_id', 'Filing.find_by_id', (['filing_id'], {}), '(filing_id)\n', (1548, 1559), False, 'from legal_api.models import Filing\n'), ((1326, 1384), 'tests.unit.create_filing', 'create_filing', (['payment_id', 'filing'], {'business_id': 'business.id'}), '(payment_id, filing, business_id=business.id)\n', (1339, 1384), False, 'from tests.unit import create_business, create_filing\n'), ((1459, 1490), 'entity_filer.worker.process_filing', 'process_filing', (['filing_msg', 'app'], {}), '(filing_msg, app)\n', (1473, 1490), False, 'from entity_filer.worker import process_filing\n'), ((1268, 1289), 'random.SystemRandom', 'random.SystemRandom', ([], {}), '()\n', (1287, 1289), False, 'import random\n')]
|
import pytest
from django.utils import timezone
from dynamic_models import cache
TEST_MODEL_NAME = "test"
now = timezone.now()
@pytest.fixture
def mock_now(monkeypatch):
monkeypatch.setattr(timezone, "now", lambda: now)
def test_get_and_update_last_modified(mock_now):
assert cache.get_last_modified(TEST_MODEL_NAME) is None
cache.update_last_modified(TEST_MODEL_NAME)
assert cache.get_last_modified(TEST_MODEL_NAME) == now
def test_delete_last_modified(mock_now):
cache.update_last_modified(TEST_MODEL_NAME)
assert cache.get_last_modified(TEST_MODEL_NAME) == now
cache.clear_last_modified(TEST_MODEL_NAME)
assert cache.get_last_modified(TEST_MODEL_NAME) is None
|
[
"django.utils.timezone.now",
"dynamic_models.cache.update_last_modified",
"dynamic_models.cache.get_last_modified",
"dynamic_models.cache.clear_last_modified"
] |
[((115, 129), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (127, 129), False, 'from django.utils import timezone\n'), ((344, 387), 'dynamic_models.cache.update_last_modified', 'cache.update_last_modified', (['TEST_MODEL_NAME'], {}), '(TEST_MODEL_NAME)\n', (370, 387), False, 'from dynamic_models import cache\n'), ((494, 537), 'dynamic_models.cache.update_last_modified', 'cache.update_last_modified', (['TEST_MODEL_NAME'], {}), '(TEST_MODEL_NAME)\n', (520, 537), False, 'from dynamic_models import cache\n'), ((601, 643), 'dynamic_models.cache.clear_last_modified', 'cache.clear_last_modified', (['TEST_MODEL_NAME'], {}), '(TEST_MODEL_NAME)\n', (626, 643), False, 'from dynamic_models import cache\n'), ((291, 331), 'dynamic_models.cache.get_last_modified', 'cache.get_last_modified', (['TEST_MODEL_NAME'], {}), '(TEST_MODEL_NAME)\n', (314, 331), False, 'from dynamic_models import cache\n'), ((399, 439), 'dynamic_models.cache.get_last_modified', 'cache.get_last_modified', (['TEST_MODEL_NAME'], {}), '(TEST_MODEL_NAME)\n', (422, 439), False, 'from dynamic_models import cache\n'), ((549, 589), 'dynamic_models.cache.get_last_modified', 'cache.get_last_modified', (['TEST_MODEL_NAME'], {}), '(TEST_MODEL_NAME)\n', (572, 589), False, 'from dynamic_models import cache\n'), ((655, 695), 'dynamic_models.cache.get_last_modified', 'cache.get_last_modified', (['TEST_MODEL_NAME'], {}), '(TEST_MODEL_NAME)\n', (678, 695), False, 'from dynamic_models import cache\n')]
|
from enum import Enum
import aiogram.utils.markdown as md
from aiogram.dispatcher.filters.state import StatesGroup, State
class PurchaseStates(StatesGroup):
""" States purchase flow """
category = State()
amount = State()
description = State()
class PurchaseCategory(Enum):
""" Categories for purchase """
MEAL = 'Еда'
REST = 'Отдых'
CAR = 'Машина'
EDUCATION = 'Образование'
RESET = 'Сброс'
# Description for /help command
HELP_DESCRIPTION = md.text(
md.bold('🔥C помощью бота вы сможете:', ),
md.text('✅Добавить покупку'),
md.text('✅Узнать остаток'),
md.text('✅Посмотреь свои покупки'),
md.text(''),
md.bold('Основные команды:'),
md.text('/add \- Добавление покупки'), # noqa
md.text('/balance \- Остаток на сегодня'), # noqa
md.text('/purchases \- Показать покупки'), # noqa
md.text('/menu \- Показать меню'), # noqa
md.text('/reset \- Сброс'), # noqa
md.text('/help \- Помощь'), # noqa
sep='\n',
)
|
[
"aiogram.utils.markdown.bold",
"aiogram.dispatcher.filters.state.State",
"aiogram.utils.markdown.text"
] |
[((209, 216), 'aiogram.dispatcher.filters.state.State', 'State', ([], {}), '()\n', (214, 216), False, 'from aiogram.dispatcher.filters.state import StatesGroup, State\n'), ((230, 237), 'aiogram.dispatcher.filters.state.State', 'State', ([], {}), '()\n', (235, 237), False, 'from aiogram.dispatcher.filters.state import StatesGroup, State\n'), ((256, 263), 'aiogram.dispatcher.filters.state.State', 'State', ([], {}), '()\n', (261, 263), False, 'from aiogram.dispatcher.filters.state import StatesGroup, State\n'), ((504, 542), 'aiogram.utils.markdown.bold', 'md.bold', (['"""🔥C помощью бота вы сможете:"""'], {}), "('🔥C помощью бота вы сможете:')\n", (511, 542), True, 'import aiogram.utils.markdown as md\n'), ((550, 578), 'aiogram.utils.markdown.text', 'md.text', (['"""✅Добавить покупку"""'], {}), "('✅Добавить покупку')\n", (557, 578), True, 'import aiogram.utils.markdown as md\n'), ((584, 610), 'aiogram.utils.markdown.text', 'md.text', (['"""✅Узнать остаток"""'], {}), "('✅Узнать остаток')\n", (591, 610), True, 'import aiogram.utils.markdown as md\n'), ((616, 650), 'aiogram.utils.markdown.text', 'md.text', (['"""✅Посмотреь свои покупки"""'], {}), "('✅Посмотреь свои покупки')\n", (623, 650), True, 'import aiogram.utils.markdown as md\n'), ((656, 667), 'aiogram.utils.markdown.text', 'md.text', (['""""""'], {}), "('')\n", (663, 667), True, 'import aiogram.utils.markdown as md\n'), ((673, 701), 'aiogram.utils.markdown.bold', 'md.bold', (['"""Основные команды:"""'], {}), "('Основные команды:')\n", (680, 701), True, 'import aiogram.utils.markdown as md\n'), ((707, 745), 'aiogram.utils.markdown.text', 'md.text', (['"""/add \\\\- Добавление покупки"""'], {}), "('/add \\\\- Добавление покупки')\n", (714, 745), True, 'import aiogram.utils.markdown as md\n'), ((758, 800), 'aiogram.utils.markdown.text', 'md.text', (['"""/balance \\\\- Остаток на сегодня"""'], {}), "('/balance \\\\- Остаток на сегодня')\n", (765, 800), True, 'import aiogram.utils.markdown as md\n'), ((813, 855), 'aiogram.utils.markdown.text', 'md.text', (['"""/purchases \\\\- Показать покупки"""'], {}), "('/purchases \\\\- Показать покупки')\n", (820, 855), True, 'import aiogram.utils.markdown as md\n'), ((868, 902), 'aiogram.utils.markdown.text', 'md.text', (['"""/menu \\\\- Показать меню"""'], {}), "('/menu \\\\- Показать меню')\n", (875, 902), True, 'import aiogram.utils.markdown as md\n'), ((915, 942), 'aiogram.utils.markdown.text', 'md.text', (['"""/reset \\\\- Сброс"""'], {}), "('/reset \\\\- Сброс')\n", (922, 942), True, 'import aiogram.utils.markdown as md\n'), ((955, 982), 'aiogram.utils.markdown.text', 'md.text', (['"""/help \\\\- Помощь"""'], {}), "('/help \\\\- Помощь')\n", (962, 982), True, 'import aiogram.utils.markdown as md\n')]
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import json
import os
import unittest
import tempfile
import time
from django.contrib.auth.models import User
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_false, assert_equal
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access, add_to_group, add_permission, remove_from_group
from azure.abfs.abfs import ABFS
from azure.active_directory import ActiveDirectory
from azure.conf import ABFS_CLUSTERS,AZURE_ACCOUNTS, is_abfs_enabled
from azure.abfs.upload import DEFAULT_WRITE_SIZE
LOG = logging.getLogger(__name__)
"""
Interfaces for ADLS via HttpFs/WebHDFS
"""
class ABFSTestBase(unittest.TestCase):
integration = True
def setUp(self):
if not is_abfs_enabled():
raise SkipTest
self.client = ABFS.from_config(ABFS_CLUSTERS['default'], ActiveDirectory.from_config(AZURE_ACCOUNTS['default'], version='v2.0'))
self.c = make_logged_in_client(username='test', is_superuser=False)
grant_access('test', 'test', 'filebrowser')
add_to_group('test')
self.user = User.objects.get(username="test")
self.test_fs = 'abfs://testfs' + (str(int(time.time()) ))
LOG.debug("%s" %self.test_fs)
self.client.mkdir(self.test_fs)
def tearDown(self):
self.client.rmtree(self.test_fs)
def test_list(self):
filesystems = self.client.listdir('abfs://')
LOG.debug("%s" %filesystems)
assert_true(filesystems is not None, filesystems)
pathing = self.client.listdir('abfs://' + filesystems[0], {"recursive" : "true"} )
LOG.debug("%s" %pathing)
assert_true(pathing is not None, pathing)
directory = self.client.listdir('abfs://' + filesystems[0] + '/' + pathing[0])
LOG.debug("%s" %directory)
assert_true(directory is not None, directory)
directory = self.client.listdir(self.test_fs)
LOG.debug("%s" %directory)
assert_true(directory is not None, directory)
pathing = self.client._statsf(filesystems[276])
LOG.debug("%s" %pathing)
assert_true(pathing is not None, pathing)
pathing = self.client._statsf(filesystems[277])
LOG.debug("%s" %pathing)
assert_true(pathing is not None, pathing)
def test_existence(self):
test_fs = self.test_fs
test_dir = test_fs + '/test_existence'
test_file = test_dir + '/test.txt'
self.client.mkdir(test_dir)
self.client.create(test_file)
#Testing root and filesystems
assert_true(self.client.exists('abfs://'))
assert_true(self.client.exists(test_fs))
#testing created directories and files
assert_true(self.client.exists(test_dir))
assert_true(self.client.exists(test_file))
assert_false(self.client.exists(test_dir + 'a'))
def test_stat_output(self):
"""
Only tests if the stat outputs something
"""
test_fs = self.test_fs
test_dir = test_fs + '/test_stats'
test_dir2 = test_dir + '/test2'
test_dir3 = test_dir2 + '/test3'
self.client.mkdir(test_dir)
self.client.mkdir(test_dir2)
self.client.mkdir(test_dir3)
#testing filesystems
result = self.client.stats(test_fs)
LOG.debug("%s" %result)
assert_true(result is not None, result)
result = self.client.listdir_stats(test_fs)
LOG.debug("%s" %result)
#testing directories
result = self.client.stats(test_dir)
LOG.debug("%s" %result)
result = self.client.listdir_stats(test_dir)
LOG.debug("%s" %result)
result = self.client.stats(test_dir2)
LOG.debug("%s" %result)
result = self.client.listdir_stats(test_dir2)
LOG.debug("%s" %result)
result = self.client.stats(test_dir3)
LOG.debug("%s" %result)
result = self.client.listdir_stats(test_dir3)
LOG.debug("%s" %result)
def test_mkdir(self):
test_dir = self.test_fs + '/test_mkdir'
assert_false(self.client.exists(test_dir))
self.client.mkdir(test_dir)
assert_true(self.client.exists(test_dir))
self.client.isdir(test_dir)
def test_append_and_flush(self):
test_fs = self.test_fs
test_file = test_fs + '/test.txt'
self.client.create(test_file)
test_string = "This is a test."
test_len = len(test_string)
resp = self.client.append(test_file, test_string) #only works with strings
LOG.debug("%s" %self.client.stats(test_file))
try:
LOG.debug("%s" %resp)
resp = self.client.read(test_file, length = test_len)
except:
LOG.debug("Not written yet")
self.client.flush(test_file, {"position" : test_len} )
resp = self.client.read(test_file)
assert_true(resp == test_string)
self.client.remove(test_file)
def test_rename(self):
test_fs = self.test_fs
test_dir = test_fs + '/test'
test_dir2 = test_fs + '/test2'
test_file = test_fs + '/test.txt'
test_file2 = test_fs + '/test2.txt'
self.client.mkdir(test_dir)
assert_true(self.client.exists(test_dir))
assert_false(self.client.exists(test_dir2))
self.client.rename(test_dir, test_dir2)
assert_false(self.client.exists(test_dir))
assert_true(self.client.exists(test_dir2))
self.client.create(test_file)
assert_true(self.client.exists(test_file))
assert_false(self.client.exists(test_file2))
self.client.rename(test_file, test_file2)
assert_false(self.client.exists(test_file))
assert_true(self.client.exists(test_file2))
def test_chmod(self):
test_dir = self.test_fs + '/test_chmod'
self.client.mkdir(test_dir)
test_dir_permission = test_dir +'/test'
test_file_permission = test_dir +'/test.txt'
self.client.create(test_file_permission)
self.client.chmod(test_file_permission, '0777')
self.client.stats(test_file_permission)
self.client.mkdir(test_dir_permission)
self.client.chmod(test_dir_permission, '0777')
self.client.stats(test_dir_permission)
def test_chown(self):
test_dir = self.test_fs + '/test_chown'
self.client.mkdir(test_dir)
test_dir_permission = test_dir +'/test'
test_file_permission = test_dir +'/test.txt'
self.client.create(test_file_permission)
self.client.chown(test_file_permission, 'temp')
self.client.stats(test_file_permission)
self.client.mkdir(test_dir_permission)
self.client.chown(test_dir_permission, 'temp')
self.client.stats(test_dir_permission)
def test_create_with_file_permissions(self):
test_dir = self.test_fs + '/test_chown'
test_file = test_dir + '/test.txt'
self.client.mkdir(test_dir)
self.client.create(test_file, headers = {'x-ms-permissions' : '0777'})
def test_upload(self):
with tempfile.NamedTemporaryFile() as local_file:
# Make sure we can upload larger than the UPLOAD chunk size
file_size = DEFAULT_WRITE_SIZE * 2
local_file.write('0' * file_size)
local_file.flush()
self.client.mkdir(self.test_fs + '/test_upload')
dest_dir = self.test_fs + '/test_upload'
local_file = local_file.name
dest_path = '%s/%s' % (dest_dir, os.path.basename(local_file))
add_permission(self.user.username, 'has_abfs', permname='abfs_access', appname='filebrowser')
# Just upload the current python file
try:
resp = self.c.post('/filebrowser/upload/file?dest=%s' % dest_dir, dict(dest=dest_dir, hdfs_file=file(local_file)))
response = json.loads(resp.content)
finally:
remove_from_group(self.user.username, 'has_abfs')
assert_equal(0, response['status'], response)
stats = self.client.stats(dest_path)
actual = self.client.read(dest_path)
expected = file(local_file).read()
assert_equal(actual, expected, 'files do not match: %s != %s' % (len(actual), len(expected)))
def test_copy_file(self):
test_fs = self.test_fs
testdir1 = test_fs + '/testcpy1'
testdir2 = test_fs + '/testcpy2'
test_file = testdir1 + '/test.txt'
self.client.mkdir(testdir1)
self.client.mkdir(testdir2)
self.client.create(test_file)
test_string = "This is a test."
test_len = len(test_string)
resp = self.client.append(test_file, test_string)
self.client.flush(test_file, {"position" : test_len} )
self.client.copy(test_file, testdir2)
self.client.stats(testdir2 + '/test.txt')
resp = self.client.read(testdir2 + '/test.txt')
resp2 = self.client.read(test_file)
assert_equal(resp, resp2, "Files %s and %s are not equal" %(test_file, testdir2 + '/test.txt'))
def test_copy_dir(self):
test_fs = self.test_fs
testdir1 = test_fs + '/testcpy1'
testdir2 = test_fs + '/testcpy2'
test_dir3 = testdir1 + '/test'
test_dir4 = test_dir3 + '/test2'
self.client.mkdir(testdir1)
self.client.mkdir(testdir2)
self.client.mkdir(test_dir3)
self.client.mkdir(test_dir4)
self.client.copy(test_dir3, testdir2)
self.client.stats(testdir2 + '/test')
self.client.stats(testdir2 + '/test/test2')
@staticmethod
def test_static_methods():
test_dir = 'abfss://testfs/test_static/'
LOG.debug("%s" %test_dir)
norm_path = ABFS.normpath(test_dir)
LOG.debug("%s" %norm_path)
parent = ABFS.parent_path(test_dir)
LOG.debug("%s" %parent)
join_path = ABFS.join(test_dir, 'test1')
LOG.debug("%s" %join_path)
|
[
"azure.abfs.abfs.ABFS.join",
"tempfile.NamedTemporaryFile",
"json.loads",
"django.contrib.auth.models.User.objects.get",
"os.path.basename",
"nose.tools.assert_true",
"desktop.lib.test_utils.add_permission",
"nose.tools.assert_equal",
"time.time",
"azure.active_directory.ActiveDirectory.from_config",
"desktop.lib.django_test_util.make_logged_in_client",
"desktop.lib.test_utils.remove_from_group",
"azure.abfs.abfs.ABFS.normpath",
"azure.conf.is_abfs_enabled",
"desktop.lib.test_utils.add_to_group",
"desktop.lib.test_utils.grant_access",
"logging.getLogger",
"azure.abfs.abfs.ABFS.parent_path"
] |
[((1431, 1458), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1448, 1458), False, 'import logging\n'), ((1784, 1842), 'desktop.lib.django_test_util.make_logged_in_client', 'make_logged_in_client', ([], {'username': '"""test"""', 'is_superuser': '(False)'}), "(username='test', is_superuser=False)\n", (1805, 1842), False, 'from desktop.lib.django_test_util import make_logged_in_client\n'), ((1847, 1890), 'desktop.lib.test_utils.grant_access', 'grant_access', (['"""test"""', '"""test"""', '"""filebrowser"""'], {}), "('test', 'test', 'filebrowser')\n", (1859, 1890), False, 'from desktop.lib.test_utils import grant_access, add_to_group, add_permission, remove_from_group\n'), ((1895, 1915), 'desktop.lib.test_utils.add_to_group', 'add_to_group', (['"""test"""'], {}), "('test')\n", (1907, 1915), False, 'from desktop.lib.test_utils import grant_access, add_to_group, add_permission, remove_from_group\n'), ((1932, 1965), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': '"""test"""'}), "(username='test')\n", (1948, 1965), False, 'from django.contrib.auth.models import User\n'), ((2279, 2328), 'nose.tools.assert_true', 'assert_true', (['(filesystems is not None)', 'filesystems'], {}), '(filesystems is not None, filesystems)\n', (2290, 2328), False, 'from nose.tools import assert_true, assert_false, assert_equal\n'), ((2455, 2496), 'nose.tools.assert_true', 'assert_true', (['(pathing is not None)', 'pathing'], {}), '(pathing is not None, pathing)\n', (2466, 2496), False, 'from nose.tools import assert_true, assert_false, assert_equal\n'), ((2620, 2665), 'nose.tools.assert_true', 'assert_true', (['(directory is not None)', 'directory'], {}), '(directory is not None, directory)\n', (2631, 2665), False, 'from nose.tools import assert_true, assert_false, assert_equal\n'), ((2756, 2801), 'nose.tools.assert_true', 'assert_true', (['(directory is not None)', 'directory'], {}), '(directory is not None, directory)\n', (2767, 2801), False, 'from nose.tools import assert_true, assert_false, assert_equal\n'), ((2892, 2933), 'nose.tools.assert_true', 'assert_true', (['(pathing is not None)', 'pathing'], {}), '(pathing is not None, pathing)\n', (2903, 2933), False, 'from nose.tools import assert_true, assert_false, assert_equal\n'), ((3024, 3065), 'nose.tools.assert_true', 'assert_true', (['(pathing is not None)', 'pathing'], {}), '(pathing is not None, pathing)\n', (3035, 3065), False, 'from nose.tools import assert_true, assert_false, assert_equal\n'), ((4040, 4079), 'nose.tools.assert_true', 'assert_true', (['(result is not None)', 'result'], {}), '(result is not None, result)\n', (4051, 4079), False, 'from nose.tools import assert_true, assert_false, assert_equal\n'), ((5470, 5502), 'nose.tools.assert_true', 'assert_true', (['(resp == test_string)'], {}), '(resp == test_string)\n', (5481, 5502), False, 'from nose.tools import assert_true, assert_false, assert_equal\n'), ((9308, 9409), 'nose.tools.assert_equal', 'assert_equal', (['resp', 'resp2', "('Files %s and %s are not equal' % (test_file, testdir2 + '/test.txt'))"], {}), "(resp, resp2, 'Files %s and %s are not equal' % (test_file, \n testdir2 + '/test.txt'))\n", (9320, 9409), False, 'from nose.tools import assert_true, assert_false, assert_equal\n'), ((10025, 10048), 'azure.abfs.abfs.ABFS.normpath', 'ABFS.normpath', (['test_dir'], {}), '(test_dir)\n', (10038, 10048), False, 'from azure.abfs.abfs import ABFS\n'), ((10093, 10119), 'azure.abfs.abfs.ABFS.parent_path', 'ABFS.parent_path', (['test_dir'], {}), '(test_dir)\n', (10109, 10119), False, 'from azure.abfs.abfs import ABFS\n'), ((10164, 10192), 'azure.abfs.abfs.ABFS.join', 'ABFS.join', (['test_dir', '"""test1"""'], {}), "(test_dir, 'test1')\n", (10173, 10192), False, 'from azure.abfs.abfs import ABFS\n'), ((1598, 1615), 'azure.conf.is_abfs_enabled', 'is_abfs_enabled', ([], {}), '()\n', (1613, 1615), False, 'from azure.conf import ABFS_CLUSTERS, AZURE_ACCOUNTS, is_abfs_enabled\n'), ((1699, 1769), 'azure.active_directory.ActiveDirectory.from_config', 'ActiveDirectory.from_config', (["AZURE_ACCOUNTS['default']"], {'version': '"""v2.0"""'}), "(AZURE_ACCOUNTS['default'], version='v2.0')\n", (1726, 1769), False, 'from azure.active_directory import ActiveDirectory\n'), ((7547, 7576), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (7574, 7576), False, 'import tempfile\n'), ((7983, 8080), 'desktop.lib.test_utils.add_permission', 'add_permission', (['self.user.username', '"""has_abfs"""'], {'permname': '"""abfs_access"""', 'appname': '"""filebrowser"""'}), "(self.user.username, 'has_abfs', permname='abfs_access',\n appname='filebrowser')\n", (7997, 8080), False, 'from desktop.lib.test_utils import grant_access, add_to_group, add_permission, remove_from_group\n'), ((8385, 8430), 'nose.tools.assert_equal', 'assert_equal', (['(0)', "response['status']", 'response'], {}), "(0, response['status'], response)\n", (8397, 8430), False, 'from nose.tools import assert_true, assert_false, assert_equal\n'), ((8274, 8298), 'json.loads', 'json.loads', (['resp.content'], {}), '(resp.content)\n', (8284, 8298), False, 'import json\n'), ((8322, 8371), 'desktop.lib.test_utils.remove_from_group', 'remove_from_group', (['self.user.username', '"""has_abfs"""'], {}), "(self.user.username, 'has_abfs')\n", (8339, 8371), False, 'from desktop.lib.test_utils import grant_access, add_to_group, add_permission, remove_from_group\n'), ((2019, 2030), 'time.time', 'time.time', ([], {}), '()\n', (2028, 2030), False, 'import time\n'), ((7940, 7968), 'os.path.basename', 'os.path.basename', (['local_file'], {}), '(local_file)\n', (7956, 7968), False, 'import os\n')]
|
import logging
from django.conf import settings
from celery.task import task
from elasticutils.utils import chunked
log = logging.getLogger('elasticutils')
@task
def index_objects(mapping_type, ids, chunk_size=100, es=None, index=None):
"""Index documents of a specified mapping type.
This allows for asynchronous indexing.
If a mapping_type extends Indexable, you can add a ``post_save``
hook for the model that it's based on like this::
@receiver(dbsignals.post_save, sender=MyModel)
def update_in_index(sender, instance, **kw):
from elasticutils.contrib.django import tasks
tasks.index_objects.delay(MyMappingType, [instance.id])
:arg mapping_type: the mapping type for these ids
:arg ids: the list of ids of things to index
:arg chunk_size: the size of the chunk for bulk indexing
.. Note::
The default chunk_size is 100. The number of documents you
can bulk index at once depends on the size of the
documents.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `mapping_type.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `mapping_type.get_index()`.
"""
if settings.ES_DISABLED:
return
log.debug('Indexing objects {0}-{1}. [{2}]'.format(
ids[0], ids[-1], len(ids)))
# Get the model this mapping type is based on.
model = mapping_type.get_model()
# Retrieve all the objects that we're going to index and do it in
# bulk.
for id_list in chunked(ids, chunk_size):
documents = []
for obj in model.objects.filter(id__in=id_list):
try:
documents.append(mapping_type.extract_document(obj.id, obj))
except Exception as exc:
log.exception('Unable to extract document {0}: {1}'.format(
obj, repr(exc)))
if documents:
mapping_type.bulk_index(documents, id_field='id', es=es, index=index)
@task
def unindex_objects(mapping_type, ids, es=None, index=None):
"""Remove documents of a specified mapping_type from the index.
This allows for asynchronous deleting.
If a mapping_type extends Indexable, you can add a ``pre_delete``
hook for the model that it's based on like this::
@receiver(dbsignals.pre_delete, sender=MyModel)
def remove_from_index(sender, instance, **kw):
from elasticutils.contrib.django import tasks
tasks.unindex_objects.delay(MyMappingType, [instance.id])
:arg mapping_type: the mapping type for these ids
:arg ids: the list of ids of things to remove
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `mapping_type.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `mapping_type.get_index()`.
"""
if settings.ES_DISABLED:
return
for id_ in ids:
mapping_type.unindex(id_, es=es, index=index)
|
[
"elasticutils.utils.chunked",
"logging.getLogger"
] |
[((126, 159), 'logging.getLogger', 'logging.getLogger', (['"""elasticutils"""'], {}), "('elasticutils')\n", (143, 159), False, 'import logging\n'), ((1621, 1645), 'elasticutils.utils.chunked', 'chunked', (['ids', 'chunk_size'], {}), '(ids, chunk_size)\n', (1628, 1645), False, 'from elasticutils.utils import chunked\n')]
|
# Unit 単位
import math
from . import lia
from . import const
V2 = lia.Vector2
V2z = lia.Vector2C.Zero
V3 = lia.Vector3
V3z = lia.Vector3C.Zero
M2 = lia.Matrix2
M3 = lia.Matrix3
# 角度(弧度法)
class Rad:
@classmethod
def mapDegRad(cls, degree=0.0):
return degree * math.pi / 180
@classmethod
def mapRadDeg(cls, radian=0.0):
return radian / math.pi * 180
@classmethod
def XY(cls, x=1.0, y=1.0):
return Rad(math.atan2(y, x))
@classmethod
def V2(cls, v2=V2z):
return Rad(math.atan2(v2.y, v2.x))
def __init__(self, radian=0.0):
self.n = radian
def addDeg(self, d=0.0):
return Deg(self.deg() + d)
def __mul__(self, n=1.0):
return self.scalar(n)
def __div__(self, n=1.0):
return self.scalar(1/n)
def scalar(self, n=0.0):
return Rad(self.n * n)
def __neg__(self):
return self.scalar(-1)
def rev(self):
return self.scalar(-1)
def deg(self):
return self.mapRadDeg(self.n)
def clone(self):
return Rad(self.n)
def toString(self):
return str(self.deg())
def __repr__(self):
return '<{0}: {1}>'.format('Rad(Deg)', self.toString())
def cos(self):
return math.cos(self.n)
def sin(self):
return math.sin(self.n)
def tan(self):
return math.tan(self.n)
def xy(self):
return self.cos(), self.sin()
def v2(self):
return V2(self.cos(), self.sin())
def turn(self):
c = self.cos()
s = self.sin()
return M2((
(c, -s),
(s, c)))
def turnX(self):
c = self.cos()
s = self.sin()
return M3((
(1, 0, 0),
(0, c, -s),
(0, s, c)))
def turnY(self):
c = self.cos()
s = self.sin()
return M3((
(c, 0, s),
(0, 1, 0),
(-s, 0, c)))
def turnZ(self):
c = self.cos()
s = self.sin()
return M3((
(c, -s, 0),
(s, c, 0),
(0, 0, 1)))
# 角度(度数法)
def Deg(deg=0.0):
return Rad(Rad.mapDegRad(deg))
class RadC:
Deg0 = Deg(0)
Deg5 = Deg(45)
Deg90 = Deg(90)
Deg180 = Deg(180)
Deg270 = Deg(270)
Deg360 = Deg(360)
EmptyList = [Rad(0) for i in range(0)]
class Rad3:
@classmethod
def V3(cls, d=lia.Vector3C.Zero):
x = Rad.V2(lia.Vector2(d.y, d.z))
y = Rad.V2(lia.Vector2(d.z, d.x))
z = Rad.V2(lia.Vector2(d.x, d.y))
return Rad3(x, y, z)
def __init__(self, x=RadC.Deg0, y=RadC.Deg0, z=RadC.Deg0):
self.x = x
self.y = y
self.z = z
class Dir:
def __init__(self, d=V3z):
self.v = d
def toTuple(self):
return self.v.toTuple()
def toString(self):
return self.v.toString()
def __repr__(self):
return '<{0}: {1}>'.format('Dir', self.toString())
def unit(self):
return self.v.normalize()
def length(self):
return self.v.length()
def radZ(self):
return Rad.XY(self.v.x, self.v.y)
def radX(self):
return Rad.XY(self.v.y, self.v.z)
def radY(self):
return Rad.XY(self.v.z, self.v.x)
def radV(self):
return Rad.XY(self.v.v2().length(), self.v.z)
def turnXtoDir(self):
turnV = self.radV().rev().turnY()
turnH = self.radZ().turnZ()
return turnH.mul(turnV)
def turnZtoDir(self):
turnV = self.radV().rev().addDeg(90).turnY()
turnH = self.radZ().turnZ()
return turnH.mul(turnV)
# Coordinate 'c' & Direction 'd'
# 座標c(3次元ベクトル)と方向d(3次元ベクトル)
# - 直線cd(点cを通る方向dの直線)を表す
# - dは単位ベクトルとは限らない
class CD:
def __init__(self, c=V3z, d=V3z):
if isinstance(d, V3):
d = Dir(d)
assert isinstance(d, Dir)
self.c = c
self.d = d
def toTuple(self):
return (self.c, self.d)
def toString(self):
return '(%.1f, %.1f)' % self.toTuple()
def __repr__(self):
return '<{0}: {1}>'.format('CD', self.toString())
# 点vを直線cdに投影
# - return: 投影点と投影方向を表す直線
def projection(self, v=V3z):
d1 = v.sub(self.c)
d2u = self.d.unit()
d2 = d2u.scalar(d2u.ip(d1))
d3 = d2.sub(d1)
return CD(v.add(d3), d3)
def dirToLine(self, dist):
assert isinstance(dist, CD)
d1 = self.d.normalize()
d2 = dist.d.normalize()
dircp = d1.cp(d2)
dircplen = dircp.length()
if dircplen == 0:
return self.projection(dist.c).d
dir = dist.c.sub(self.c)
dircp2 = dircp.scalar(dircp.ip(dir) / dircplen)
return Dir(dircp2)
# Cylindrical Coordinate System 円柱座標系
class CCS:
Dim = 3
@classmethod
def Tuple(cls, t):
return CCS(*t)
@classmethod
def V3(cls, v3):
assert isinstance(v3, V3)
r = v3.v2().length()
d = Rad.mapRadDeg(math.atan2(v3.y, v3.x))
z = v3.z
return CCS(r, d, z)
def __init__(self, r=1.0, d=0.0, z=0.0):
self.r = r
self.d = d
self.z = z
def tuple(self):
return (self.r, self.d, self.z)
def toString(self):
return '(%.1f, %.1f, %.1f)' % self.tuple()
def __repr__(self):
return '<{0}: {1}>'.format('CCS', self.toString())
def v3(self):
v2 = Deg(self.d).v2().scalar(self.r)
return lia.Vector3(v2.x, v2.y, self.z)
def rad(self):
return Deg(self.d)
def add3(self, r=0.0, d=0.0, z=0.0):
return self.add((r, d, z))
def __add__(self, ccs):
return self.add(ccs)
def add(self, ccs):
if isinstance(ccs, CCS):
ccs = ccs.tuple()
return self.Tuple(lia.add(self.tuple(), ccs, self.Dim))
def __sub__(self, ccs):
return self.sub(ccs)
def sub(self, ccs):
if isinstance(ccs, CCS):
ccs = ccs.tuple()
return self.Tuple(lia.sub(self.tuple(), ccs, self.Dim))
def scale3(self, r=1.0, d=1.0, z=1.0):
return CCS(self.r * r, self.d * d, self.z * z)
def __mul__(self, n):
return self.scalar(n)
def __div__(self, n):
return self.scalar(1/n)
def __neg__(self):
return self.scalar(-1)
def scalar(self, n=1.0):
return self.scale3(n, n, n)
def cd(self, ccs, delta=0.001):
assert isinstance(ccs, CCS)
d1 = self.sub(ccs.scalar(delta))
d2 = self.add(ccs.scalar(delta))
return CD(self.v3(), d2.sub(d1))
# Spherical Coordinate System
class SCS:
@classmethod
def V3(cls, v3):
assert isinstance(v3, lia.Vector3)
r = v3.length()
dh = Rad.mapRadDeg(math.atan2(v3.y, v3.x))
dv = Rad.mapRadDeg(math.atan2(v3.z, v3.v2().length()))
return SCS(r, dh, dv)
def __init__(self, r=1.0, dh=0.0, dv=0.0):
self.r = r
self.dh = dh
self.dv = dv
def toTuple(self):
return (self.r, self.dh, self.dv)
def toString(self):
return '(%.1f, %.1f, %.1f)' % self.toTuple()
def __repr__(self):
return '<{0}: {1}>'.format('SCS', self.toString())
def v3(self):
turnY = Deg(self.dv).rev().turnY()
turnZ = Deg(self.dh).turnZ()
return lia.Vector3(self.r, 0, 0).projection(turnZ.mul(turnY))
def add3(self, r=1.0, dh=0.0, dv=0.0):
return SCS(self.r + r, self.dh + dh, self.dv + dv)
def add(self, scs):
assert isinstance(scs, SCS)
return SCS(self.r + scs.r, self.dh + scs.dh, self.dv + scs.dv)
def sub(self, scs):
assert isinstance(scs, SCS)
return SCS(self.r - scs.r, self.dh - scs.dh, self.dv - scs.dv)
def scale3(self, r=1.0, dh=1.0, dv=1.0):
return SCS(self.r * r, self.dh * dh, self.dv * dv)
def scalar(self, n=1.0):
return self.scale3(n, n, n)
def cd(self, scs, delta=0.001):
assert isinstance(scs, SCS)
d1 = self.sub(scs.scalar(delta))
d2 = self.add(scs.scalar(delta))
return CD(self.v3(), d2.sub(d1))
# Polar Coordinate System
class Polar:
@classmethod
def V2(cls, v2):
assert isinstance(v2, lia.Vector2)
r = v2.length()
d = Rad.mapRadDeg(math.atan2(v2.y, v2.x))
return Polar
def __init__(self, r=1.0, d=0.0):
self.r = r # 0.0 ..
self.d = d # 0 .. 360
def v2(self):
return Deg(self.d).v2().scalar(self.r)
def z(self, z=0.0):
return PolarZ(self, z)
class PolarZ:
@classmethod
def V3(cls, v3):
assert isinstance(v3, lia.Vector3)
return PolarZ(lia.Vector2(v3.x, v3.y), v3.z)
@classmethod
def RDZ(cls, r=1.0, d=0.0, z=0.0):
return PolarZ(Polar(r, d), z)
def __init__(self, p, z=0.0):
assert isinstance(p, Polar)
self.p = p
self.z = z
def v3(self):
v2 = self.p.v2()
return lia.Vector3(v2.x, v2.y, self.z)
class PolarSquare:
def __init__(self, ir=0.0, d=0):
if d < 0:
d = 8 - (-d % 8)
d %= 8
self.r = ir # 半径
self.d = d # 角度 0:0*PI, 4:1*PI, 8:2*PI
def polar(self):
r = self.r if self.d % 2 == 0 else self.r * const.r2
return Polar(r, self.d * 45)
def v2(self):
return self.polar().v2()
def z(self, z=0.0):
return PolarSquareZ(self, z)
class PolarSquareZ:
def __init__(self, ps, z=0.0):
assert isinstance(ps, PolarSquare)
self.ps = ps
self.z = z
def v3(self):
v2 = self.ps.v2()
return lia.Vector3(v2.x, v2.y, self.z)
class empty:
polar = Polar()
polarZ = PolarZ.RDZ()
ccs = CCS()
scs = SCS()
def TestDeg():
name = 'Deg(30)'
deg = Deg(30)
print('%s: %s' % (name, deg))
print('%s.cos(): %.1f' % (name, deg.cos()))
print('%s.sin(): %.1f' % (name, deg.sin()))
print('%s.tan(): %.1f' % (name, deg.tan()))
print('%s.turn(): %s' % (name, deg.turn()))
print('%s.turnX(): %s' % (name, deg.turnX()))
print('%s.turnY(): %s' % (name, deg.turnY()))
print('%s.turnZ(): %s' % (name, deg.turnZ()))
def TestCCS():
fn = lambda v: (v, CCS.V3(v), CCS.V3(v).v3())
print('V3 -> CCS -> V3: %s -> %s -> %s' % fn((lia.Vector3(1, 0, 0))))
print('V3 -> CCS -> V3: %s -> %s -> %s' % fn((lia.Vector3(0, 1, 0))))
print('V3 -> CCS -> V3: %s -> %s -> %s' % fn((lia.Vector3(0, 0, 1))))
print('V3 -> CCS -> V3: %s -> %s -> %s' % fn((lia.Vector3(1, 1, 1))))
def TestSCS():
fn = lambda v: (v, SCS.V3(v), SCS.V3(v).v3())
print('V3 -> SCS -> V3: %s -> %s -> %s' % fn((lia.Vector3(1, 0, 0))))
print('V3 -> SCS -> V3: %s -> %s -> %s' % fn((lia.Vector3(0, 1, 0))))
print('V3 -> SCS -> V3: %s -> %s -> %s' % fn((lia.Vector3(0, 0, 1))))
print('V3 -> SCS -> V3: %s -> %s -> %s' % fn((lia.Vector3(1, 1, 1))))
if __name__ == '__main__':
print('unit.py __main__ start')
TestDeg()
TestCCS()
TestSCS()
print('unit.py __main__ end')
|
[
"math.tan",
"math.cos",
"math.sin",
"math.atan2"
] |
[((1259, 1275), 'math.cos', 'math.cos', (['self.n'], {}), '(self.n)\n', (1267, 1275), False, 'import math\n'), ((1319, 1335), 'math.sin', 'math.sin', (['self.n'], {}), '(self.n)\n', (1327, 1335), False, 'import math\n'), ((1371, 1387), 'math.tan', 'math.tan', (['self.n'], {}), '(self.n)\n', (1379, 1387), False, 'import math\n'), ((451, 467), 'math.atan2', 'math.atan2', (['y', 'x'], {}), '(y, x)\n', (461, 467), False, 'import math\n'), ((531, 553), 'math.atan2', 'math.atan2', (['v2.y', 'v2.x'], {}), '(v2.y, v2.x)\n', (541, 553), False, 'import math\n'), ((5008, 5030), 'math.atan2', 'math.atan2', (['v3.y', 'v3.x'], {}), '(v3.y, v3.x)\n', (5018, 5030), False, 'import math\n'), ((6800, 6822), 'math.atan2', 'math.atan2', (['v3.y', 'v3.x'], {}), '(v3.y, v3.x)\n', (6810, 6822), False, 'import math\n'), ((8345, 8367), 'math.atan2', 'math.atan2', (['v2.y', 'v2.x'], {}), '(v2.y, v2.x)\n', (8355, 8367), False, 'import math\n')]
|
'''
FILENAME: pix2pix_data.py
AUTHORS: <NAME>
START DATE: Friday February 25th 2022
CONTACT: <EMAIL>
INFO: preparing data for training with pix2pix model
'''
import sys
from matplotlib.pyplot import bar_label
sys.path.append('/data1/shaohua/code/GANToy')
import glob
import os
import numpy as np
import torch
import torchvision
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from PIL import Image
from options.train_options import args_option
class Pix2PixDataset(Dataset):
def __init__(self, args, transforms_=None, mode='train'):
self.transform = transforms_
self.args = args
self.filelist = sorted(glob.glob(os.path.join(args.dataroot, args.dataset, mode, '*.jpg')))
def __len__(self):
return len(self.filelist)
def __getitem__(self, index):
img = Image.open(self.filelist[index])
w, h = img.size
img_A = img.crop((0, 0, w // 2, h))
img_B = img.crop((w // 2, 0, w, h))
if self.args.which_direction != 'AtoB':
img_A, img_B = img_B, img_A
# data augmentation
if np.random.random() < 0.5:
img_A = Image.fromarray(np.array(img_A)[:, ::-1, :], 'RGB')
img_B = Image.fromarray(np.array(img_B)[:, ::-1, :], 'RGB')
img_A = self.transform(img_A)
img_B = self.transform(img_B)
return {'A': img_A, 'B': img_B}
if __name__ == '__main__':
from torch.utils.data import DataLoader
args = args_option()
trans = transforms.Compose([
transforms.Resize((args.image_size, args.image_size), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
datasets = Pix2PixDataset(args, trans)
dataloader = DataLoader(datasets, batch_size=1)
for index, item in enumerate(dataloader):
img_A = item['A']
img_B = item['B']
torchvision.utils.save_image(img_A[0], 'img_A_{}.png'.format(index))
if index >= 10:
break
|
[
"sys.path.append",
"options.train_options.args_option",
"torch.utils.data.DataLoader",
"PIL.Image.open",
"torchvision.transforms.ToTensor",
"numpy.random.random",
"numpy.array",
"torchvision.transforms.Normalize",
"os.path.join",
"torchvision.transforms.Resize"
] |
[((249, 294), 'sys.path.append', 'sys.path.append', (['"""/data1/shaohua/code/GANToy"""'], {}), "('/data1/shaohua/code/GANToy')\n", (264, 294), False, 'import sys\n'), ((1533, 1546), 'options.train_options.args_option', 'args_option', ([], {}), '()\n', (1544, 1546), False, 'from options.train_options import args_option\n'), ((1821, 1855), 'torch.utils.data.DataLoader', 'DataLoader', (['datasets'], {'batch_size': '(1)'}), '(datasets, batch_size=1)\n', (1831, 1855), False, 'from torch.utils.data import DataLoader\n'), ((885, 917), 'PIL.Image.open', 'Image.open', (['self.filelist[index]'], {}), '(self.filelist[index])\n', (895, 917), False, 'from PIL import Image\n'), ((1160, 1178), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1176, 1178), True, 'import numpy as np\n'), ((1589, 1657), 'torchvision.transforms.Resize', 'transforms.Resize', (['(args.image_size, args.image_size)', 'Image.BICUBIC'], {}), '((args.image_size, args.image_size), Image.BICUBIC)\n', (1606, 1657), True, 'import torchvision.transforms as transforms\n'), ((1667, 1688), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1686, 1688), True, 'import torchvision.transforms as transforms\n'), ((1698, 1752), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1718, 1752), True, 'import torchvision.transforms as transforms\n'), ((718, 774), 'os.path.join', 'os.path.join', (['args.dataroot', 'args.dataset', 'mode', '"""*.jpg"""'], {}), "(args.dataroot, args.dataset, mode, '*.jpg')\n", (730, 774), False, 'import os\n'), ((1222, 1237), 'numpy.array', 'np.array', (['img_A'], {}), '(img_A)\n', (1230, 1237), True, 'import numpy as np\n'), ((1294, 1309), 'numpy.array', 'np.array', (['img_B'], {}), '(img_B)\n', (1302, 1309), True, 'import numpy as np\n')]
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("READ")
process.source = cms.Source("DQMRootSource",
fileNames = cms.untracked.vstring("file:dqm_merged_file1_file3_file2_filterOnRun1.root"))
process.out = cms.OutputModule("DQMRootOutputModule",
fileName = cms.untracked.string("dqm_merged_file1_file3_file2_filterOnRun1_copy.root"))
process.e = cms.EndPath(process.out)
process.add_(cms.Service("DQMStore"))
|
[
"FWCore.ParameterSet.Config.untracked.vstring",
"FWCore.ParameterSet.Config.untracked.string",
"FWCore.ParameterSet.Config.EndPath",
"FWCore.ParameterSet.Config.Service",
"FWCore.ParameterSet.Config.Process"
] |
[((52, 71), 'FWCore.ParameterSet.Config.Process', 'cms.Process', (['"""READ"""'], {}), "('READ')\n", (63, 71), True, 'import FWCore.ParameterSet.Config as cms\n'), ((424, 448), 'FWCore.ParameterSet.Config.EndPath', 'cms.EndPath', (['process.out'], {}), '(process.out)\n', (435, 448), True, 'import FWCore.ParameterSet.Config as cms\n'), ((463, 486), 'FWCore.ParameterSet.Config.Service', 'cms.Service', (['"""DQMStore"""'], {}), "('DQMStore')\n", (474, 486), True, 'import FWCore.ParameterSet.Config as cms\n'), ((158, 234), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""file:dqm_merged_file1_file3_file2_filterOnRun1.root"""'], {}), "('file:dqm_merged_file1_file3_file2_filterOnRun1.root')\n", (179, 234), True, 'import FWCore.ParameterSet.Config as cms\n'), ((333, 408), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""dqm_merged_file1_file3_file2_filterOnRun1_copy.root"""'], {}), "('dqm_merged_file1_file3_file2_filterOnRun1_copy.root')\n", (353, 408), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
import unittest
from p4_execution.ex5_execute import TpfServer
from p8_test.test_local import TestDataUTS
class Ts23Test(unittest.TestCase):
def setUp(self) -> None:
self.tpf_server = TpfServer()
self.test_data = TestDataUTS()
self.test_data.add_fields([('EBW000', 28), 'CE1$UID'], 'EB0EB')
self.test_data.add_fields(['@HAALC'], 'GLOBAL')
self.test_data.add_all_regs()
def test_ts23(self):
test_data = self.tpf_server.run('TS23', self.test_data)
self.assertEqual('00000001000000020000000300000004000000050000000600000007', test_data.get_field('EBW000'))
self.assertEqual('E5E7', test_data.get_field('@HAALC'))
self.assertEqual('44', test_data.get_field('CE1$UID'))
self.assertEqual(1, test_data.output.regs['R11'])
self.assertEqual(1, test_data.output.regs['R12'])
self.assertEqual(2, test_data.output.regs['R13'])
def test_prima_1f(self):
self.test_data.set_field('WA0PHA', bytes([0x02]))
test_data = self.tpf_server.run('TS23', self.test_data)
self.assertEqual(4, test_data.output.regs['R11'])
def test_prima_1b(self):
self.test_data.set_field('WA0PHA', bytes([0x03]))
test_data = self.tpf_server.run('TS23', self.test_data)
self.assertEqual(5, test_data.output.regs['R11'])
def test_mcpck(self):
self.test_data.partition = 'LA'
test_data = self.tpf_server.run('TS23', self.test_data)
self.assertEqual(2, test_data.output.regs['R12'])
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"p4_execution.ex5_execute.TpfServer",
"p8_test.test_local.TestDataUTS"
] |
[((1565, 1580), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1578, 1580), False, 'import unittest\n'), ((199, 210), 'p4_execution.ex5_execute.TpfServer', 'TpfServer', ([], {}), '()\n', (208, 210), False, 'from p4_execution.ex5_execute import TpfServer\n'), ((236, 249), 'p8_test.test_local.TestDataUTS', 'TestDataUTS', ([], {}), '()\n', (247, 249), False, 'from p8_test.test_local import TestDataUTS\n')]
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of common learning rate schedules."""
import numpy as np
import tensorflow as tf
def exponential_decay_with_burnin(global_step,
learning_rate_base,
learning_rate_decay_steps,
learning_rate_decay_factor,
burnin_learning_rate=0.0,
burnin_steps=0):
"""Exponential decay schedule with burn-in period.
In this schedule, learning rate is fixed at burnin_learning_rate
for a fixed period, before transitioning to a regular exponential
decay schedule.
Args:
global_step: int tensor representing global step.
learning_rate_base: base learning rate.
learning_rate_decay_steps: steps to take between decaying the learning rate.
Note that this includes the number of burn-in steps.
learning_rate_decay_factor: multiplicative factor by which to decay
learning rate.
burnin_learning_rate: initial learning rate during burn-in period. If
0.0 (which is the default), then the burn-in learning rate is simply
set to learning_rate_base.
burnin_steps: number of steps to use burnin learning rate.
Returns:
a (scalar) float tensor representing learning rate
"""
if burnin_learning_rate == 0:
burnin_learning_rate = learning_rate_base
post_burnin_learning_rate = tf.train.exponential_decay(
learning_rate_base,
global_step,
learning_rate_decay_steps,
learning_rate_decay_factor,
staircase=True)
return tf.where(
tf.less(tf.cast(global_step, tf.int32), tf.constant(burnin_steps)),
tf.constant(burnin_learning_rate),
post_burnin_learning_rate)
def cosine_decay_with_warmup(global_step,
learning_rate_base,
total_steps,
warmup_learning_rate=0.0,
warmup_steps=0):
"""Cosine decay schedule with warm up period.
Cosine annealing learning rate as described in:
Loshchilov and Hutter, SGDR: Stochastic Gradient Descent with Warm Restarts.
ICLR 2017. https://arxiv.org/abs/1608.03983
In this schedule, the learning rate grows linearly from warmup_learning_rate
to learning_rate_base for warmup_steps, then transitions to a cosine decay
schedule.
Args:
global_step: int64 (scalar) tensor representing global step.
learning_rate_base: base learning rate.
total_steps: total number of training steps.
warmup_learning_rate: initial learning rate for warm up.
warmup_steps: number of warmup steps.
Returns:
a (scalar) float tensor representing learning rate.
Raises:
ValueError: if warmup_learning_rate is larger than learning_rate_base,
or if warmup_steps is larger than total_steps.
"""
if learning_rate_base < warmup_learning_rate:
raise ValueError('learning_rate_base must be larger '
'or equal to warmup_learning_rate.')
if total_steps < warmup_steps:
raise ValueError('total_steps must be larger or equal to '
'warmup_steps.')
learning_rate = 0.5 * learning_rate_base * (
1 + tf.cos(np.pi * (tf.cast(global_step, tf.float32) - warmup_steps
) / float(total_steps - warmup_steps)))
if warmup_steps > 0:
slope = (learning_rate_base - warmup_learning_rate) / warmup_steps
pre_cosine_learning_rate = slope * tf.cast(
global_step, tf.float32) + warmup_learning_rate
learning_rate = tf.where(
tf.less(tf.cast(global_step, tf.int32), warmup_steps),
pre_cosine_learning_rate,
learning_rate)
return learning_rate
def manual_stepping(global_step, boundaries, rates):
"""Manually stepped learning rate schedule.
This function provides fine grained control over learning rates. One must
specify a sequence of learning rates as well as a set of integer steps
at which the current learning rate must transition to the next. For example,
if boundaries = [5, 10] and rates = [.1, .01, .001], then the learning
rate returned by this function is .1 for global_step=0,...,4, .01 for
global_step=5...9, and .001 for global_step=10 and onward.
Args:
global_step: int64 (scalar) tensor representing global step.
boundaries: a list of global steps at which to switch learning
rates. This list is assumed to consist of increasing positive integers.
rates: a list of (float) learning rates corresponding to intervals between
the boundaries. The length of this list must be exactly
len(boundaries) + 1.
Returns:
a (scalar) float tensor representing learning rate
Raises:
ValueError: if one of the following checks fails:
1. boundaries is a strictly increasing list of positive integers
2. len(rates) == len(boundaries) + 1
"""
if any([b < 0 for b in boundaries]) or any(
[not isinstance(b, int) for b in boundaries]):
raise ValueError('boundaries must be a list of positive integers')
if any([bnext <= b for bnext, b in zip(boundaries[1:], boundaries[:-1])]):
raise ValueError('Entries in boundaries must be strictly increasing.')
if any([not isinstance(r, float) for r in rates]):
raise ValueError('Learning rates must be floats')
if len(rates) != len(boundaries) + 1:
raise ValueError('Number of provided learning rates must exceed '
'number of boundary points by exactly 1.')
if not boundaries: return tf.constant(rates[0])
step_boundaries = tf.constant(boundaries, tf.int32)
num_boundaries = len(boundaries)
learning_rates = tf.constant(rates, tf.float32)
index = tf.reduce_min(
tf.where(
# Casting global step to tf.int32 is dangerous, but necessary to be
# compatible with TPU.
tf.greater(step_boundaries, tf.cast(global_step, tf.int32)),
tf.constant(range(num_boundaries), dtype=tf.int32),
tf.constant([num_boundaries] * num_boundaries, dtype=tf.int32)))
return tf.reduce_sum(learning_rates * tf.one_hot(index, len(rates),
dtype=tf.float32))
|
[
"tensorflow.cast",
"tensorflow.train.exponential_decay",
"tensorflow.constant"
] |
[((2093, 2227), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['learning_rate_base', 'global_step', 'learning_rate_decay_steps', 'learning_rate_decay_factor'], {'staircase': '(True)'}), '(learning_rate_base, global_step,\n learning_rate_decay_steps, learning_rate_decay_factor, staircase=True)\n', (2119, 2227), True, 'import tensorflow as tf\n'), ((6243, 6276), 'tensorflow.constant', 'tf.constant', (['boundaries', 'tf.int32'], {}), '(boundaries, tf.int32)\n', (6254, 6276), True, 'import tensorflow as tf\n'), ((6331, 6361), 'tensorflow.constant', 'tf.constant', (['rates', 'tf.float32'], {}), '(rates, tf.float32)\n', (6342, 6361), True, 'import tensorflow as tf\n'), ((2354, 2387), 'tensorflow.constant', 'tf.constant', (['burnin_learning_rate'], {}), '(burnin_learning_rate)\n', (2365, 2387), True, 'import tensorflow as tf\n'), ((6201, 6222), 'tensorflow.constant', 'tf.constant', (['rates[0]'], {}), '(rates[0])\n', (6212, 6222), True, 'import tensorflow as tf\n'), ((2288, 2318), 'tensorflow.cast', 'tf.cast', (['global_step', 'tf.int32'], {}), '(global_step, tf.int32)\n', (2295, 2318), True, 'import tensorflow as tf\n'), ((2320, 2345), 'tensorflow.constant', 'tf.constant', (['burnin_steps'], {}), '(burnin_steps)\n', (2331, 2345), True, 'import tensorflow as tf\n'), ((6657, 6719), 'tensorflow.constant', 'tf.constant', (['([num_boundaries] * num_boundaries)'], {'dtype': 'tf.int32'}), '([num_boundaries] * num_boundaries, dtype=tf.int32)\n', (6668, 6719), True, 'import tensorflow as tf\n'), ((4154, 4186), 'tensorflow.cast', 'tf.cast', (['global_step', 'tf.float32'], {}), '(global_step, tf.float32)\n', (4161, 4186), True, 'import tensorflow as tf\n'), ((4265, 4295), 'tensorflow.cast', 'tf.cast', (['global_step', 'tf.int32'], {}), '(global_step, tf.int32)\n', (4272, 4295), True, 'import tensorflow as tf\n'), ((6552, 6582), 'tensorflow.cast', 'tf.cast', (['global_step', 'tf.int32'], {}), '(global_step, tf.int32)\n', (6559, 6582), True, 'import tensorflow as tf\n'), ((3908, 3940), 'tensorflow.cast', 'tf.cast', (['global_step', 'tf.float32'], {}), '(global_step, tf.float32)\n', (3915, 3940), True, 'import tensorflow as tf\n')]
|
from collections import Counter
from io import BytesIO
import base64
from PIL import Image, ImageDraw
import numpy as np
from kmeans import KMeans
def rgb2hex(rgb):
return '#{:02x}{:02x}{:02x}'.format(*[int(x) for x in rgb])
def receive_image(buf):
return Image.open(BytesIO(buf))
def image_to_array(img, img_size):
denom = np.sqrt(np.product(img.size[:2]) / img_size**2)
im = np.asarray(img.resize(
(np.array(img.size) / denom).astype(int)), dtype='int32')
X = im.reshape((im.shape[0]*im.shape[1], 3))
return X, im.shape
def image_resize(img, img_size):
denom = np.sqrt(max(1, np.product(img.size[:2]) / img_size**2))
im = np.asarray(img.resize(
(np.array(img.size) / denom).astype(int)), dtype='int32')
img = img.resize((np.array(img.size) / denom).astype(int))
buf = BytesIO()
img.save(buf, format='jpeg')
return base64.b64encode(buf.getvalue()).decode('utf')
def get_clusters(X, algo, n_clusters):
if algo == 'KMeans':
clf = KMeans(n_clusters=n_clusters)
else:
clf = KMeans(n_clusters=n_clusters)
clf.fit(X)
return clf
def get_colors_from_clf(clf, X):
clf_labels = clf.predict(X)
colors = []
hist = []
items = sorted(Counter(clf_labels).items())
for k, v in items:
colors.append(X[clf_labels == k].mean(axis=0).astype(int).tolist())
hist.append(v)
return hist, colors
def get_image_from_clf(clf, colors, X, dims):
recoded = np.array([
[int(y) for y in colors[x]]
for x in clf.predict(X)]).reshape(dims)
buf = BytesIO()
Image.fromarray(np.uint8(recoded)).save(buf, format='jpeg')
return base64.b64encode(buf.getvalue()).decode('utf')
def generate_image(w, h, color):
img_io = BytesIO()
Image.new('RGB', (w, h), color).save(img_io, 'JPEG', quality=90)
img_io.seek(0)
return img_io
|
[
"io.BytesIO",
"PIL.Image.new",
"numpy.uint8",
"numpy.product",
"numpy.array",
"kmeans.KMeans",
"collections.Counter"
] |
[((838, 847), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (845, 847), False, 'from io import BytesIO\n'), ((1597, 1606), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1604, 1606), False, 'from io import BytesIO\n'), ((1778, 1787), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1785, 1787), False, 'from io import BytesIO\n'), ((281, 293), 'io.BytesIO', 'BytesIO', (['buf'], {}), '(buf)\n', (288, 293), False, 'from io import BytesIO\n'), ((1020, 1049), 'kmeans.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters'}), '(n_clusters=n_clusters)\n', (1026, 1049), False, 'from kmeans import KMeans\n'), ((1074, 1103), 'kmeans.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters'}), '(n_clusters=n_clusters)\n', (1080, 1103), False, 'from kmeans import KMeans\n'), ((352, 376), 'numpy.product', 'np.product', (['img.size[:2]'], {}), '(img.size[:2])\n', (362, 376), True, 'import numpy as np\n'), ((1792, 1823), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', 'color'], {}), "('RGB', (w, h), color)\n", (1801, 1823), False, 'from PIL import Image, ImageDraw\n'), ((625, 649), 'numpy.product', 'np.product', (['img.size[:2]'], {}), '(img.size[:2])\n', (635, 649), True, 'import numpy as np\n'), ((1253, 1272), 'collections.Counter', 'Counter', (['clf_labels'], {}), '(clf_labels)\n', (1260, 1272), False, 'from collections import Counter\n'), ((1627, 1644), 'numpy.uint8', 'np.uint8', (['recoded'], {}), '(recoded)\n', (1635, 1644), True, 'import numpy as np\n'), ((787, 805), 'numpy.array', 'np.array', (['img.size'], {}), '(img.size)\n', (795, 805), True, 'import numpy as np\n'), ((433, 451), 'numpy.array', 'np.array', (['img.size'], {}), '(img.size)\n', (441, 451), True, 'import numpy as np\n'), ((707, 725), 'numpy.array', 'np.array', (['img.size'], {}), '(img.size)\n', (715, 725), True, 'import numpy as np\n')]
|
#!/usr/local/bin/python3
import RPi.GPIO as GPIO
import time
class Button(object):
'''
Button class represents a button.
Has one property to read the current value.
'''
def __init__(self, pin):
self.pin = pin
# Have to set pull_up_down to make sure the button
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# @Property in Python allows you to call a function like an attribute of a class
# Basically functions like a getter
@property
def value(self):
return 1 - GPIO.input(self.pin)
|
[
"RPi.GPIO.setup",
"RPi.GPIO.input"
] |
[((307, 362), 'RPi.GPIO.setup', 'GPIO.setup', (['self.pin', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (317, 362), True, 'import RPi.GPIO as GPIO\n'), ((543, 563), 'RPi.GPIO.input', 'GPIO.input', (['self.pin'], {}), '(self.pin)\n', (553, 563), True, 'import RPi.GPIO as GPIO\n')]
|
#!/usr/bin/env python
# encoding: utf-8
'''
@license: (C) Copyright 2013-2020, Node Supply Chain Manager Corporation Limited.
@time: 2021/5/8 16:55
@file: bert_model.py
@author: baidq
@Software: PyCharm
@desc:
'''
from bert4keras.backend import keras,set_gelu
from bert4keras.models import build_transformer_model
from bert4keras.optimizers import Adam
set_gelu('tanh')
def textcnn(inputs, kernel_initializer):
"""
基于keras实现的textcnn
:param inputs:
:param kernel_initializer:
:return:
"""
# 3,4,5
cnn1 = keras.layers.Conv1D(256,
3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=kernel_initializer)(inputs) # shape=[batch_size,maxlen-2,256]
cnn1 = keras.layers.GlobalMaxPooling1D()(cnn1)
cnn2 = keras.layers.Conv1D(256,
4,
strides=1,
padding='same',
activation='relu',
kernel_initializer=kernel_initializer)(inputs)
cnn2 = keras.layers.GlobalMaxPooling1D()(cnn2)
cnn3 = keras.layers.Conv1D(256,
5,
strides=1,
padding='same',
kernel_initializer=kernel_initializer)(inputs)
cnn3 = keras.layers.GlobalMaxPooling1D()(cnn3)
output = keras.layers.concatenate([cnn1, cnn2, cnn3],axis=-1)
output = keras.layers.Dropout(0.2)(output)
return output
def build_bert_model(config_path, checkpoint_path, class_nums):
"""
构建bert模型用来进行医疗意图的识别
:param config_path:
:param checkpoint_path:
:param class_nums:
:return:
"""
# 预加载bert模型
bert = build_transformer_model(
config_path=config_path,
checkpoint_path=checkpoint_path,
model='bert',
return_keras_model=False
)
# 抽取cls 这个token
cls_features = keras.layers.Lambda(
lambda x: x[:,0], # 所有行的第一列
name='cls-token')(bert.model.output) #shape=[batch_size,768]
# 抽取所有的token,从第二个到倒数第二个
all_token_embedding = keras.layers.Lambda(
lambda x: x[:,1:-1],
name='all-token')(bert.model.output) #shape=[batch_size,maxlen-2,768]
cnn_features = textcnn(all_token_embedding, bert.initializer) #shape=[batch_size,cnn_output_dim]
# 特征拼接
concat_features = keras.layers.concatenate([cls_features, cnn_features], axis=-1)
dense = keras.layers.Dense(units=512,
activation='relu',
kernel_initializer=bert.initializer)(concat_features)
output = keras.layers.Dense(units=class_nums,
activation='softmax',
kernel_initializer=bert.initializer)(dense)
model = keras.models.Model(bert.model.input, output)
print(model.summary())
return model
if __name__ == '__main__':
config_path = 'E:/bert_weight_files/bert_wwm/bert_config.json'
checkpoint_path = 'E:/bert_weight_files/bert_wwm/bert_model.ckpt'
class_nums = 13
build_bert_model(config_path, checkpoint_path, class_nums)
|
[
"bert4keras.models.build_transformer_model",
"bert4keras.backend.keras.layers.Conv1D",
"bert4keras.backend.set_gelu",
"bert4keras.backend.keras.layers.concatenate",
"bert4keras.backend.keras.models.Model",
"bert4keras.backend.keras.layers.Lambda",
"bert4keras.backend.keras.layers.Dense",
"bert4keras.backend.keras.layers.GlobalMaxPooling1D",
"bert4keras.backend.keras.layers.Dropout"
] |
[((355, 371), 'bert4keras.backend.set_gelu', 'set_gelu', (['"""tanh"""'], {}), "('tanh')\n", (363, 371), False, 'from bert4keras.backend import keras, set_gelu\n'), ((1540, 1593), 'bert4keras.backend.keras.layers.concatenate', 'keras.layers.concatenate', (['[cnn1, cnn2, cnn3]'], {'axis': '(-1)'}), '([cnn1, cnn2, cnn3], axis=-1)\n', (1564, 1593), False, 'from bert4keras.backend import keras, set_gelu\n'), ((1880, 2006), 'bert4keras.models.build_transformer_model', 'build_transformer_model', ([], {'config_path': 'config_path', 'checkpoint_path': 'checkpoint_path', 'model': '"""bert"""', 'return_keras_model': '(False)'}), "(config_path=config_path, checkpoint_path=\n checkpoint_path, model='bert', return_keras_model=False)\n", (1903, 2006), False, 'from bert4keras.models import build_transformer_model\n'), ((2524, 2587), 'bert4keras.backend.keras.layers.concatenate', 'keras.layers.concatenate', (['[cls_features, cnn_features]'], {'axis': '(-1)'}), '([cls_features, cnn_features], axis=-1)\n', (2548, 2587), False, 'from bert4keras.backend import keras, set_gelu\n'), ((2960, 3004), 'bert4keras.backend.keras.models.Model', 'keras.models.Model', (['bert.model.input', 'output'], {}), '(bert.model.input, output)\n', (2978, 3004), False, 'from bert4keras.backend import keras, set_gelu\n'), ((537, 653), 'bert4keras.backend.keras.layers.Conv1D', 'keras.layers.Conv1D', (['(256)', '(3)'], {'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': 'kernel_initializer'}), "(256, 3, strides=1, padding='same', activation='relu',\n kernel_initializer=kernel_initializer)\n", (556, 653), False, 'from bert4keras.backend import keras, set_gelu\n'), ((858, 891), 'bert4keras.backend.keras.layers.GlobalMaxPooling1D', 'keras.layers.GlobalMaxPooling1D', ([], {}), '()\n', (889, 891), False, 'from bert4keras.backend import keras, set_gelu\n'), ((910, 1026), 'bert4keras.backend.keras.layers.Conv1D', 'keras.layers.Conv1D', (['(256)', '(4)'], {'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': 'kernel_initializer'}), "(256, 4, strides=1, padding='same', activation='relu',\n kernel_initializer=kernel_initializer)\n", (929, 1026), False, 'from bert4keras.backend import keras, set_gelu\n'), ((1197, 1230), 'bert4keras.backend.keras.layers.GlobalMaxPooling1D', 'keras.layers.GlobalMaxPooling1D', ([], {}), '()\n', (1228, 1230), False, 'from bert4keras.backend import keras, set_gelu\n'), ((1249, 1347), 'bert4keras.backend.keras.layers.Conv1D', 'keras.layers.Conv1D', (['(256)', '(5)'], {'strides': '(1)', 'padding': '"""same"""', 'kernel_initializer': 'kernel_initializer'}), "(256, 5, strides=1, padding='same', kernel_initializer=\n kernel_initializer)\n", (1268, 1347), False, 'from bert4keras.backend import keras, set_gelu\n'), ((1486, 1519), 'bert4keras.backend.keras.layers.GlobalMaxPooling1D', 'keras.layers.GlobalMaxPooling1D', ([], {}), '()\n', (1517, 1519), False, 'from bert4keras.backend import keras, set_gelu\n'), ((1606, 1631), 'bert4keras.backend.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (1626, 1631), False, 'from bert4keras.backend import keras, set_gelu\n'), ((2080, 2136), 'bert4keras.backend.keras.layers.Lambda', 'keras.layers.Lambda', (['(lambda x: x[:, 0])'], {'name': '"""cls-token"""'}), "(lambda x: x[:, 0], name='cls-token')\n", (2099, 2136), False, 'from bert4keras.backend import keras, set_gelu\n'), ((2260, 2319), 'bert4keras.backend.keras.layers.Lambda', 'keras.layers.Lambda', (['(lambda x: x[:, 1:-1])'], {'name': '"""all-token"""'}), "(lambda x: x[:, 1:-1], name='all-token')\n", (2279, 2319), False, 'from bert4keras.backend import keras, set_gelu\n'), ((2601, 2691), 'bert4keras.backend.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(512)', 'activation': '"""relu"""', 'kernel_initializer': 'bert.initializer'}), "(units=512, activation='relu', kernel_initializer=bert.\n initializer)\n", (2619, 2691), False, 'from bert4keras.backend import keras, set_gelu\n'), ((2780, 2879), 'bert4keras.backend.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': 'class_nums', 'activation': '"""softmax"""', 'kernel_initializer': 'bert.initializer'}), "(units=class_nums, activation='softmax',\n kernel_initializer=bert.initializer)\n", (2798, 2879), False, 'from bert4keras.backend import keras, set_gelu\n')]
|
def pretty_dump(label, o):
import pprint
return """
<{}>
{}
</{}>
""".format(label, pprint.pformat(o, indent=4), label)
|
[
"pprint.pformat"
] |
[((92, 119), 'pprint.pformat', 'pprint.pformat', (['o'], {'indent': '(4)'}), '(o, indent=4)\n', (106, 119), False, 'import pprint\n')]
|
from . core import Literal
from collections import defaultdict
def gen_args(args):
return tuple(chr(ord('A') + arg.number) for arg in args)
def generate_program(model):
before = defaultdict(set)
min_clause = defaultdict(lambda: 0)
directions = defaultdict(lambda: defaultdict(lambda: '?'))
clause_id_to_body = defaultdict(set)
clause_id_to_head = {}
for atom in model:
if atom.name == 'body_literal':
clause_id = atom.arguments[0].number
predicate = atom.arguments[1].name
arity = atom.arguments[2].number
arguments = gen_args(atom.arguments[3].arguments)
body_literal = (predicate, arguments, arity)
clause_id_to_body[clause_id].add(body_literal)
elif atom.name == 'head_literal':
clause_id = atom.arguments[0].number
predicate = atom.arguments[1].name
arity = atom.arguments[2].number
args = atom.arguments[3].arguments
arguments = gen_args(atom.arguments[3].arguments)
head_literal = (predicate, arguments, arity)
clause_id_to_head[clause_id] = head_literal
elif atom.name == 'direction_':
pred_name = atom.arguments[0].name
arg_index = atom.arguments[1].number
arg_dir_str = atom.arguments[2].name
if arg_dir_str == 'in':
arg_dir = '+'
elif arg_dir_str == 'out':
arg_dir = '-'
else:
raise Exception(f'Unrecognised argument direction "{arg_dir_str}"')
directions[pred_name][arg_index] = arg_dir
elif atom.name == 'before':
clause1 = atom.arguments[0].number
clause2 = atom.arguments[1].number
before[clause1].add(clause2)
elif atom.name == 'min_clause':
clause = atom.arguments[0].number
min_clause_num = atom.arguments[1].number
min_clause[clause] = max(min_clause[clause], min_clause_num)
clauses = []
for clause_id in clause_id_to_head:
(head_pred, head_args, head_arity) = clause_id_to_head[clause_id]
head_modes = tuple(directions[head_pred][i] for i in range(head_arity))
head = Literal(head_pred, head_args, head_modes)
body = set()
for (body_pred, body_args, body_arity) in clause_id_to_body[clause_id]:
body_modes = tuple(directions[body_pred][i] for i in range(body_arity))
body.add(Literal(body_pred, body_args, body_modes))
body = frozenset(body)
clauses.append((head, body))
clauses = tuple(clauses)
return (clauses, before, min_clause)
|
[
"collections.defaultdict"
] |
[((192, 208), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (203, 208), False, 'from collections import defaultdict\n'), ((226, 249), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (237, 249), False, 'from collections import defaultdict\n'), ((336, 352), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (347, 352), False, 'from collections import defaultdict\n'), ((286, 311), 'collections.defaultdict', 'defaultdict', (["(lambda : '?')"], {}), "(lambda : '?')\n", (297, 311), False, 'from collections import defaultdict\n')]
|
import pickle
import torch
from datasets import supported_datamodules
from models import supported_models
from modules.pca_base_module import PCABaseModule
from modules.cae_base_module import CAEBaseModule
from modules.aae_base_module import AAEBaseModule
from modules.vae_base_module import VAEBaseModule
from utils import tools, supported_preprocessing_transforms
from utils.dtypes import *
def load_modules(paths_to_archived_models: List[Path]) -> dict:
# Load the checkpoints for all the training modules and save them in a dictionary
module_catalog = {}
for pth in paths_to_archived_models:
model_type = pth.parent.name
model_name = pth.name
config = tools.load_config(pth / 'configuration.yaml', silent=True)
print(f'Loading state dict for: {model_type}/{model_name}')
# Unsupervising region proposal is called implicity in this line, see utils/preprocessing.py
preprocessing_transforms = supported_preprocessing_transforms[config['data-parameters']['preprocessing']]
datamodule = supported_datamodules[config['experiment-parameters']['datamodule']](
data_transforms=preprocessing_transforms,
**config['data-parameters'])
if 'PCA' in model_type:
_, pca_test_labels = datamodule.split(train=False)
with open(pth / 'fitted_model.p', 'rb') as f:
model = pickle.load(f)
module = PCABaseModule(model)
else:
ckpt_path = next(iter((pth / 'checkpoints').glob('val_*')))
datamodule.setup('test')
# Handle the various model instantiations
if 'AAE' in model_type:
model = supported_models[model_type](
in_shape=datamodule.data_shape,
latent_nodes=config['module-parameters']['latent_nodes'])
module = AAEBaseModule(model, **config['module-parameters'])
elif 'VAE' in model_type:
model = supported_models[model_type](
in_shape=datamodule.data_shape,
latent_nodes=config['module-parameters']['latent_nodes'])
module = VAEBaseModule(model, **config['module-parameters'])
elif 'CAE' in model_type:
model = supported_models[model_type](in_shape=datamodule.data_shape)
module = CAEBaseModule(model, **config['module-parameters'])
else:
raise ValueError(f'Model substring not found, got {model_type}')
# Load the state_dict into the module architecture
checkpoint = torch.load(ckpt_path)
module.load_state_dict(checkpoint['state_dict'])
if model_type not in module_catalog:
module_catalog[model_type] = {}
module_catalog[model_type][model_name] = module
if 'pca_test_labels' in locals():
return module_catalog, datamodule, pca_test_labels
return module_catalog, datamodule, -1
|
[
"utils.tools.load_config",
"modules.aae_base_module.AAEBaseModule",
"torch.load",
"modules.vae_base_module.VAEBaseModule",
"pickle.load",
"modules.pca_base_module.PCABaseModule",
"modules.cae_base_module.CAEBaseModule"
] |
[((697, 755), 'utils.tools.load_config', 'tools.load_config', (["(pth / 'configuration.yaml')"], {'silent': '(True)'}), "(pth / 'configuration.yaml', silent=True)\n", (714, 755), False, 'from utils import tools, supported_preprocessing_transforms\n'), ((2631, 2652), 'torch.load', 'torch.load', (['ckpt_path'], {}), '(ckpt_path)\n', (2641, 2652), False, 'import torch\n'), ((1407, 1421), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1418, 1421), False, 'import pickle\n'), ((1447, 1467), 'modules.pca_base_module.PCABaseModule', 'PCABaseModule', (['model'], {}), '(model)\n', (1460, 1467), False, 'from modules.pca_base_module import PCABaseModule\n'), ((1892, 1943), 'modules.aae_base_module.AAEBaseModule', 'AAEBaseModule', (['model'], {}), "(model, **config['module-parameters'])\n", (1905, 1943), False, 'from modules.aae_base_module import AAEBaseModule\n'), ((2191, 2242), 'modules.vae_base_module.VAEBaseModule', 'VAEBaseModule', (['model'], {}), "(model, **config['module-parameters'])\n", (2204, 2242), False, 'from modules.vae_base_module import VAEBaseModule\n'), ((2391, 2442), 'modules.cae_base_module.CAEBaseModule', 'CAEBaseModule', (['model'], {}), "(model, **config['module-parameters'])\n", (2404, 2442), False, 'from modules.cae_base_module import CAEBaseModule\n')]
|
import torch
from torch.distributions import multinomial
from matplotlib import pyplot as plt
fair_probs = torch.ones([6]) / 6
print("1\n",multinomial.Multinomial(1, fair_probs).sample())
print("2\n",multinomial.Multinomial(10, fair_probs).sample())
counts = multinomial.Multinomial(1000, fair_probs).sample()
print("3\n",counts / 1000)
counts = multinomial.Multinomial(10, fair_probs).sample((500,))
cum_counts = counts.cumsum(dim=0)
estimates = cum_counts / cum_counts.sum(dim=1, keepdims=True)
# 设置图像大小
plt.figure(figsize=(6,4.5))
# 显示中文标签(不然会报错)
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
for i in range(6):
plt.plot(estimates[:, i].numpy(),
label=("P(die=" + str(i + 1) + ")"))
plt.axhline(y=0.167, color='black', linestyle='dashed')
plt.gca().set_xlabel('实验次数')
plt.gca().set_ylabel('估算概率')
plt.legend()
plt.show()
|
[
"matplotlib.pyplot.axhline",
"torch.ones",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"torch.distributions.multinomial.Multinomial",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca"
] |
[((528, 556), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4.5)'}), '(figsize=(6, 4.5))\n', (538, 556), True, 'from matplotlib import pyplot as plt\n'), ((778, 833), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0.167)', 'color': '"""black"""', 'linestyle': '"""dashed"""'}), "(y=0.167, color='black', linestyle='dashed')\n", (789, 833), True, 'from matplotlib import pyplot as plt\n'), ((895, 907), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (905, 907), True, 'from matplotlib import pyplot as plt\n'), ((909, 919), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (917, 919), True, 'from matplotlib import pyplot as plt\n'), ((112, 127), 'torch.ones', 'torch.ones', (['[6]'], {}), '([6])\n', (122, 127), False, 'import torch\n'), ((269, 310), 'torch.distributions.multinomial.Multinomial', 'multinomial.Multinomial', (['(1000)', 'fair_probs'], {}), '(1000, fair_probs)\n', (292, 310), False, 'from torch.distributions import multinomial\n'), ((362, 401), 'torch.distributions.multinomial.Multinomial', 'multinomial.Multinomial', (['(10)', 'fair_probs'], {}), '(10, fair_probs)\n', (385, 401), False, 'from torch.distributions import multinomial\n'), ((835, 844), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (842, 844), True, 'from matplotlib import pyplot as plt\n'), ((865, 874), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (872, 874), True, 'from matplotlib import pyplot as plt\n'), ((147, 185), 'torch.distributions.multinomial.Multinomial', 'multinomial.Multinomial', (['(1)', 'fair_probs'], {}), '(1, fair_probs)\n', (170, 185), False, 'from torch.distributions import multinomial\n'), ((209, 248), 'torch.distributions.multinomial.Multinomial', 'multinomial.Multinomial', (['(10)', 'fair_probs'], {}), '(10, fair_probs)\n', (232, 248), False, 'from torch.distributions import multinomial\n')]
|
#! /usr/bin/env python3
import csv
import requests
import glob
import os
def read_csv(file):
data = []
with open(file, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
# map the country fields
if "Country_Region" in row:
row["country"] = row["Country_Region"]
elif "Country/Region" in row:
row["country"] = row["Country/Region"]
else:
continue
data.append(row)
return data
# API endpoint
API_ENDPOINT = 'http://localhost:5000/api/reports/'
# daily covid-19 reports location
DATA_LOCATION = os.getenv("DATA_LOCATION", None)
if not DATA_LOCATION:
print("Please set DATA_LOCATION to the correct CSV files location.")
exit(1)
# parse all files in this directory
reports = list(filter(os.path.isfile, glob.glob(DATA_LOCATION + "*")))
reports.sort(key=lambda x: os.path.getmtime(x))
for report_name in reports:
data = read_csv(report_name)
if not data:
print("No data parsed! {}".format(report_name))
continue
# report date extraction
try:
month, day, year = report_name.split('/')[-1].split('.')[0].split('-')
report_date = "{}-{}-{}".format(year, month, day)
except:
print("Cannot parse the report date {}.".format(report_name))
continue
body = {
"report_date": report_date,
"country_report": data
}
r = requests.post(url=API_ENDPOINT, json=body)
print(r.json())
|
[
"csv.DictReader",
"os.path.getmtime",
"glob.glob",
"requests.post",
"os.getenv"
] |
[((671, 703), 'os.getenv', 'os.getenv', (['"""DATA_LOCATION"""', 'None'], {}), "('DATA_LOCATION', None)\n", (680, 703), False, 'import os\n'), ((1492, 1534), 'requests.post', 'requests.post', ([], {'url': 'API_ENDPOINT', 'json': 'body'}), '(url=API_ENDPOINT, json=body)\n', (1505, 1534), False, 'import requests\n'), ((175, 199), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (189, 199), False, 'import csv\n'), ((887, 917), 'glob.glob', 'glob.glob', (["(DATA_LOCATION + '*')"], {}), "(DATA_LOCATION + '*')\n", (896, 917), False, 'import glob\n'), ((947, 966), 'os.path.getmtime', 'os.path.getmtime', (['x'], {}), '(x)\n', (963, 966), False, 'import os\n')]
|
# Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import unittest
import random
import string
from hfc.fabric_ca.caservice import CAService
from test.integration.utils import cli_call
with open(os.path.join(os.path.dirname(__file__),
"../fixtures/ca/enroll-csr.pem")) as f:
test_pem = f.read()
ENROLLMENT_ID = "admin"
ENROLLMENT_SECRET = "adminpw"
def get_random_username():
return ''.join(
[random.choice(string.ascii_letters + string.digits)
for n in range(9)])
class IdentityServiceTest(unittest.TestCase):
"""Test for ca module. """
def setUp(self):
self._enrollment_id = ENROLLMENT_ID
self._enrollment_secret = ENROLLMENT_SECRET
if os.getenv("CA_ADDR"):
self._ca_server_address = os.getenv("CA_ADDR")
else:
self._ca_server_address = "localhost:7054"
self.compose_file_path = os.path.normpath(
os.path.join(os.path.dirname(__file__),
"../fixtures/ca/docker-compose.yml")
)
self.start_test_env()
self._ca_service = CAService("http://" + self._ca_server_address)
id = self._enrollment_id
secret = self._enrollment_secret
self._adminEnrollment = self._ca_service.enroll(id, secret)
self._identityService = self._ca_service.newIdentityService()
def tearDown(self):
self.shutdown_test_env()
def start_test_env(self):
cli_call(["docker-compose", "-f", self.compose_file_path, "up", "-d"])
time.sleep(5)
def shutdown_test_env(self):
cli_call(["docker-compose", "-f", self.compose_file_path, "down"])
def test_create_success(self):
"""Test create success.
"""
username = get_random_username()
secret = self._identityService.create(self._adminEnrollment, username,
enrollmentSecret='pass')
self.assertTrue(secret == 'pass')
def test_getOne_success(self):
"""Test getOne success.
"""
username = get_random_username()
self._identityService.create(self._adminEnrollment, username)
res = self._identityService.getOne(username, self._adminEnrollment)
self.assertTrue(res['result']['id'] == username)
self.assertTrue(res['success'] is True)
def test_getAll_success(self):
"""Test getAll success.
"""
username = get_random_username()
self._identityService.create(self._adminEnrollment, username)
res = self._identityService.getAll(self._adminEnrollment)
self.assertTrue(len(res['result']['identities']) > 0)
self.assertTrue(res['success'] is True)
def test_delete_success(self):
"""Test delete success.
"""
username = get_random_username()
self._identityService.create(self._adminEnrollment, username)
res = self._identityService.delete(username, self._adminEnrollment)
self.assertTrue(res['success'] is True)
def test_update_success(self):
"""Test update success.
"""
username = get_random_username()
self._identityService.create(self._adminEnrollment, username)
res = self._identityService.update(username, self._adminEnrollment,
maxEnrollments=3)
self.assertTrue(res['result']['id'] == username)
self.assertTrue(res['result']['max_enrollments'] == 3)
self.assertTrue(res['success'] is True)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.path.dirname",
"random.choice",
"time.sleep",
"hfc.fabric_ca.caservice.CAService",
"os.getenv",
"test.integration.utils.cli_call"
] |
[((4138, 4153), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4151, 4153), False, 'import unittest\n'), ((1294, 1314), 'os.getenv', 'os.getenv', (['"""CA_ADDR"""'], {}), "('CA_ADDR')\n", (1303, 1314), False, 'import os\n'), ((1678, 1724), 'hfc.fabric_ca.caservice.CAService', 'CAService', (["('http://' + self._ca_server_address)"], {}), "('http://' + self._ca_server_address)\n", (1687, 1724), False, 'from hfc.fabric_ca.caservice import CAService\n'), ((2034, 2104), 'test.integration.utils.cli_call', 'cli_call', (["['docker-compose', '-f', self.compose_file_path, 'up', '-d']"], {}), "(['docker-compose', '-f', self.compose_file_path, 'up', '-d'])\n", (2042, 2104), False, 'from test.integration.utils import cli_call\n'), ((2113, 2126), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2123, 2126), False, 'import time\n'), ((2169, 2235), 'test.integration.utils.cli_call', 'cli_call', (["['docker-compose', '-f', self.compose_file_path, 'down']"], {}), "(['docker-compose', '-f', self.compose_file_path, 'down'])\n", (2177, 2235), False, 'from test.integration.utils import cli_call\n'), ((778, 803), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (793, 803), False, 'import os\n'), ((1005, 1056), 'random.choice', 'random.choice', (['(string.ascii_letters + string.digits)'], {}), '(string.ascii_letters + string.digits)\n', (1018, 1056), False, 'import random\n'), ((1354, 1374), 'os.getenv', 'os.getenv', (['"""CA_ADDR"""'], {}), "('CA_ADDR')\n", (1363, 1374), False, 'import os\n'), ((1520, 1545), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1535, 1545), False, 'import os\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import time
import cv2
import os
import sqlite3
import pandas as pd
import matplotlib.pyplot as plt
import copy
import random
import numpy as np
import seaborn as sns
import csv
from IPython.display import HTML
from PIL import Image
# In[2]:
def saveFrames(video_path, dest_path, video_id, all_frames=True):
cap = cv2.VideoCapture(video_path)
video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1
start_time = time.time()
selected_frames = []
for i in range(0, video_length, 30):
selected_frames.append(i + random.randint(0, 30))
count = 0
while cap.isOpened():
ret, frame = cap.read()
out_path = os.path.join(dest_path, video_id + "_%#05d.jpg" % (count + 1))
if count in selected_frames:
cv2.imwrite(out_path, frame)
count = count + 1
if (count > (video_length-1)):
end_time = time.time()
cap.release()
break
print("Generating test set")
test_dir = "insert_testset_directory_here"
dest_dir = "insert_output_directory_here"
d = 0
for dir_name in os.listdir(test_dir):
print("test")
data_path = os.path.join(test_dir, dir_name)
if os.path.isdir(data_path):
for video_name in os.listdir(data_path):
d += 1
video_id = video_name[:-4]
video_path = os.path.join(data_path, video_name)
dest_path = os.path.join(dest_dir, dir_name)
os.makedirs(dest_path, exist_ok=True)
saveFrames(video_path, dest_path, video_id, all_frames=False)
print(d)
# In[6]:
train_list = []
dest_dir = "insert_output_directory_here"
for dir_name in os.listdir(dest_dir):
data_path = os.path.join(dest_dir, dir_name)
if os.path.isdir(data_path):
for image_name in os.listdir(data_path):
im_id = image_name[:-4]
if dir_name == "dangerous":
train_list.append([im_id, "dangerous"])
else:
train_list.append([im_id, "non-dangerous"])
# In[7]:
random.shuffle(train_list)
train_list.insert(0, ["id", "category"])
# In[8]:
train_path = "dir/train.csv"
with open(train_path, "w") as f:
wr = csv.writer(f, delimiter=",")
wr.writerows(train_list)
# In[9]:
test_list = []
dest_dir = "insert_test_directory_here"
for dir_name in os.listdir(dest_dir):
data_path = os.path.join(dest_dir, dir_name)
if os.path.isdir(data_path):
for image_name in os.listdir(data_path):
im_id = image_name[:-4]
if dir_name == "dangerous":
test_list.append([im_id, "dangerous"])
else:
test_list.append([im_id, "non-dangerous"])
# In[10]:
random.shuffle(test_list)
test_list.insert(0, ["id", "category"])
# In[11]:
test_path = "dir/test-gt.csv"
with open(test_path, "w") as f:
wr = csv.writer(f, delimiter=",")
wr.writerows(test_list)
# In[12]:
l1 = []
dest_dir = "insert_train_directory_here"
for image_name in os.listdir(dest_dir):
im_id = image_name[:-10]
l1.append(im_id)
myset = set(l1)
print(len(myset))
# In[13]:
l1 = []
dest_dir = "insert_test_directory_here"
for image_name in os.listdir(dest_dir):
im_id = image_name[:-10]
l1.append(im_id)
myset = set(l1)
print(len(myset))
# In[ ]:
|
[
"csv.writer",
"os.makedirs",
"random.randint",
"os.path.isdir",
"random.shuffle",
"cv2.imwrite",
"time.time",
"cv2.VideoCapture",
"os.path.join",
"os.listdir"
] |
[((1130, 1150), 'os.listdir', 'os.listdir', (['test_dir'], {}), '(test_dir)\n', (1140, 1150), False, 'import os\n'), ((1709, 1729), 'os.listdir', 'os.listdir', (['dest_dir'], {}), '(dest_dir)\n', (1719, 1729), False, 'import os\n'), ((2085, 2111), 'random.shuffle', 'random.shuffle', (['train_list'], {}), '(train_list)\n', (2099, 2111), False, 'import random\n'), ((2379, 2399), 'os.listdir', 'os.listdir', (['dest_dir'], {}), '(dest_dir)\n', (2389, 2399), False, 'import os\n'), ((2754, 2779), 'random.shuffle', 'random.shuffle', (['test_list'], {}), '(test_list)\n', (2768, 2779), False, 'import random\n'), ((3043, 3063), 'os.listdir', 'os.listdir', (['dest_dir'], {}), '(dest_dir)\n', (3053, 3063), False, 'import os\n'), ((3230, 3250), 'os.listdir', 'os.listdir', (['dest_dir'], {}), '(dest_dir)\n', (3240, 3250), False, 'import os\n'), ((373, 401), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (389, 401), False, 'import cv2\n'), ((481, 492), 'time.time', 'time.time', ([], {}), '()\n', (490, 492), False, 'import time\n'), ((1186, 1218), 'os.path.join', 'os.path.join', (['test_dir', 'dir_name'], {}), '(test_dir, dir_name)\n', (1198, 1218), False, 'import os\n'), ((1226, 1250), 'os.path.isdir', 'os.path.isdir', (['data_path'], {}), '(data_path)\n', (1239, 1250), False, 'import os\n'), ((1747, 1779), 'os.path.join', 'os.path.join', (['dest_dir', 'dir_name'], {}), '(dest_dir, dir_name)\n', (1759, 1779), False, 'import os\n'), ((1787, 1811), 'os.path.isdir', 'os.path.isdir', (['data_path'], {}), '(data_path)\n', (1800, 1811), False, 'import os\n'), ((2237, 2265), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (2247, 2265), False, 'import csv\n'), ((2417, 2449), 'os.path.join', 'os.path.join', (['dest_dir', 'dir_name'], {}), '(dest_dir, dir_name)\n', (2429, 2449), False, 'import os\n'), ((2457, 2481), 'os.path.isdir', 'os.path.isdir', (['data_path'], {}), '(data_path)\n', (2470, 2481), False, 'import os\n'), ((2905, 2933), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (2915, 2933), False, 'import csv\n'), ((708, 770), 'os.path.join', 'os.path.join', (['dest_path', "(video_id + '_%#05d.jpg' % (count + 1))"], {}), "(dest_path, video_id + '_%#05d.jpg' % (count + 1))\n", (720, 770), False, 'import os\n'), ((1278, 1299), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (1288, 1299), False, 'import os\n'), ((1839, 1860), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (1849, 1860), False, 'import os\n'), ((2509, 2530), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (2519, 2530), False, 'import os\n'), ((820, 848), 'cv2.imwrite', 'cv2.imwrite', (['out_path', 'frame'], {}), '(out_path, frame)\n', (831, 848), False, 'import cv2\n'), ((937, 948), 'time.time', 'time.time', ([], {}), '()\n', (946, 948), False, 'import time\n'), ((1384, 1419), 'os.path.join', 'os.path.join', (['data_path', 'video_name'], {}), '(data_path, video_name)\n', (1396, 1419), False, 'import os\n'), ((1444, 1476), 'os.path.join', 'os.path.join', (['dest_dir', 'dir_name'], {}), '(dest_dir, dir_name)\n', (1456, 1476), False, 'import os\n'), ((1489, 1526), 'os.makedirs', 'os.makedirs', (['dest_path'], {'exist_ok': '(True)'}), '(dest_path, exist_ok=True)\n', (1500, 1526), False, 'import os\n'), ((594, 615), 'random.randint', 'random.randint', (['(0)', '(30)'], {}), '(0, 30)\n', (608, 615), False, 'import random\n')]
|
from eduvpn.menu import menu, input_int, profile_choice, search, provider_choice, write_to_nm_choice
from unittest import TestCase, mock
class TestMenu(TestCase):
def test_menu(self):
menu(institutes=[], orgs=[], search_term="test")
def test_input_int(self):
with mock.patch('builtins.input', lambda _: '1'):
input_int(max_=3)
def test_profile_choice(self):
profiles = [{'profile_id': 'internet'}]
profile_choice(profiles=profiles)
def test_search(self):
search(institutes=[], orgs=[], search_term="test")
def test_provider_choice(self):
base_uri = 'bla'
institutes = [{'display_name': 'test', 'base_uri': base_uri}]
with mock.patch('builtins.input', lambda _: '0'):
choice = provider_choice(institutes=institutes, orgs=[])
self.assertEqual(base_uri, choice)
def test_write_to_nm_choice(self):
with mock.patch('builtins.input', lambda _: '1'):
write_to_nm_choice()
|
[
"eduvpn.menu.search",
"eduvpn.menu.provider_choice",
"eduvpn.menu.menu",
"eduvpn.menu.profile_choice",
"unittest.mock.patch",
"eduvpn.menu.input_int",
"eduvpn.menu.write_to_nm_choice"
] |
[((198, 246), 'eduvpn.menu.menu', 'menu', ([], {'institutes': '[]', 'orgs': '[]', 'search_term': '"""test"""'}), "(institutes=[], orgs=[], search_term='test')\n", (202, 246), False, 'from eduvpn.menu import menu, input_int, profile_choice, search, provider_choice, write_to_nm_choice\n'), ((458, 491), 'eduvpn.menu.profile_choice', 'profile_choice', ([], {'profiles': 'profiles'}), '(profiles=profiles)\n', (472, 491), False, 'from eduvpn.menu import menu, input_int, profile_choice, search, provider_choice, write_to_nm_choice\n'), ((528, 578), 'eduvpn.menu.search', 'search', ([], {'institutes': '[]', 'orgs': '[]', 'search_term': '"""test"""'}), "(institutes=[], orgs=[], search_term='test')\n", (534, 578), False, 'from eduvpn.menu import menu, input_int, profile_choice, search, provider_choice, write_to_nm_choice\n'), ((291, 334), 'unittest.mock.patch', 'mock.patch', (['"""builtins.input"""', "(lambda _: '1')"], {}), "('builtins.input', lambda _: '1')\n", (301, 334), False, 'from unittest import TestCase, mock\n'), ((348, 365), 'eduvpn.menu.input_int', 'input_int', ([], {'max_': '(3)'}), '(max_=3)\n', (357, 365), False, 'from eduvpn.menu import menu, input_int, profile_choice, search, provider_choice, write_to_nm_choice\n'), ((724, 767), 'unittest.mock.patch', 'mock.patch', (['"""builtins.input"""', "(lambda _: '0')"], {}), "('builtins.input', lambda _: '0')\n", (734, 767), False, 'from unittest import TestCase, mock\n'), ((790, 837), 'eduvpn.menu.provider_choice', 'provider_choice', ([], {'institutes': 'institutes', 'orgs': '[]'}), '(institutes=institutes, orgs=[])\n', (805, 837), False, 'from eduvpn.menu import menu, input_int, profile_choice, search, provider_choice, write_to_nm_choice\n'), ((934, 977), 'unittest.mock.patch', 'mock.patch', (['"""builtins.input"""', "(lambda _: '1')"], {}), "('builtins.input', lambda _: '1')\n", (944, 977), False, 'from unittest import TestCase, mock\n'), ((991, 1011), 'eduvpn.menu.write_to_nm_choice', 'write_to_nm_choice', ([], {}), '()\n', (1009, 1011), False, 'from eduvpn.menu import menu, input_int, profile_choice, search, provider_choice, write_to_nm_choice\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-01-25 11:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0012_auto_20160704_1559'),
]
operations = [
migrations.AddField(
model_name='video',
name='priority',
field=models.PositiveIntegerField(blank=True, help_text='Site-wide sort order (higher numbers first)', null=True),
),
]
|
[
"django.db.models.PositiveIntegerField"
] |
[((399, 511), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'help_text': '"""Site-wide sort order (higher numbers first)"""', 'null': '(True)'}), "(blank=True, help_text=\n 'Site-wide sort order (higher numbers first)', null=True)\n", (426, 511), False, 'from django.db import migrations, models\n')]
|
from connect.eaas.extension import ProcessingResponse
from connect.toolkit.requests import RequestBuilder
from caas_ext.connect.sources import ConnectOrderSource
from caas_ext.flows import Flow
from cats_as_a_service.orders.domain.services import order_builder
class PurchaseFlow(Flow):
def process(self, request: RequestBuilder) -> ProcessingResponse:
self.logger.info(f"Processing purchase request with id {request.id()}")
# request decomposition (with parameter validation)
order = order_builder(ConnectOrderSource(request))
print(order)
return ProcessingResponse.done()
|
[
"caas_ext.connect.sources.ConnectOrderSource",
"connect.eaas.extension.ProcessingResponse.done"
] |
[((598, 623), 'connect.eaas.extension.ProcessingResponse.done', 'ProcessingResponse.done', ([], {}), '()\n', (621, 623), False, 'from connect.eaas.extension import ProcessingResponse\n'), ((531, 558), 'caas_ext.connect.sources.ConnectOrderSource', 'ConnectOrderSource', (['request'], {}), '(request)\n', (549, 558), False, 'from caas_ext.connect.sources import ConnectOrderSource\n')]
|
from pyModbusTCP.client import ModbusClient
import time
def modbus(ip):
dip = "Offline"
# Initiate modbus client
c = ModbusClient(host=ip, port=502, auto_open=True, timeout=1)
# Read Modbus outputs
reg = c.read_holding_registers(0, 100)
# Close Modbus connection
c.close()
if reg:
# Register 49 (array position 48) is dip switch settings
# Convert float response to 8 bit binary string and reverse order (all in one line boiiii)
dip = f'{reg[48]:08b}'[::-1]
# Convert to array
dip = [c for c in dip]
return(dip)
def mode(dip):
# Get MPPT mode
if dip[0] == '0':
mode = "Charge"
elif dip[0] == '1' and dip[6] == '0' and dip[7] == '0':
mode = "Load"
elif dip[0] == '1' and dip[6] == '1' and dip[7] == '0':
mode = "Diversion"
elif dip[0] == '1' and dip[6] == '0' and dip[7] == '1':
mode = "Lighting"
return(mode)
def voltage(dip):
# Get voltage setting
if dip[1] == '0' and dip[2] == '0':
voltage = "Auto"
elif dip[1] == '0' and dip[2] == '1':
voltage = "12v"
elif dip[1] == '1' and dip[2] == '0':
voltage = "24v"
elif dip[1] == '1' and dip[2] == '1':
voltage = "48v"
return(voltage)
def setpoints(dip):
# Get set point values
if dip[3] == '0' and dip[4] == '0' and dip[5] == '0':
ab = "14.00 V"
fl = "13.4 V"
eq = "None"
elif dip[3] == '0' and dip[4] == '0' and dip[5] == '1':
ab = "14.15 V"
fl = "13.4 V"
eq = "14.2 V"
elif dip[3] == '0' and dip[4] == '1' and dip[5] == '0':
ab = "14.15 V"
fl = "13.4 V"
eq = "14.4 V"
elif dip[3] == '0' and dip[4] == '1' and dip[5] == '1':
ab = "14.35 V"
fl = "13.4 V"
eq = "15.1 V"
elif dip[3] == '1' and dip[4] == '0' and dip[5] == '0':
ab = "14.40 V"
fl = "13.4 V"
eq = "15.3 V"
elif dip[3] == '1' and dip[4] == '0' and dip[5] == '1':
ab = "14.80 V"
fl = "13.4 V"
eq = "15.3 V"
elif dip[3] == '1' and dip[4] == '1' and dip[5] == '0':
ab = "15.00 V"
fl = "13.4 V"
eq = "15.3 V"
else:
ab = "Invalid"
fl = "Invalid"
eq = "Invalid"
return(ab,fl,eq)
def equalize(dip):
# Get equalize settings
if dip[6] == '0':
eq_mode = "Manual"
elif dip[6] == "1":
eq_mode = "Automatic"
return(eq_mode)
def noise(dip):
# Noise reduction setting
if dip[7] == '0':
nrm = "PWM"
elif dip[7] == "1":
nrm = "On-Off Charging"
return(nrm)
def main():
with open("export.csv", 'r') as f:
with open("dip_sticks.csv", 'w') as o:
# .csv heading
o.write('Asset,IP,DIP 1,DIP 2,DIP 3,DIP 4,DIP 5,DIP 6,DIP 7,DIP 8,Mode,Voltage,Set Point - Absorb,Set Point - Float,Set Point - Equalize,Equalize,Noise Reduction\n')
# Each line in input .csv
for idx, line in enumerate(f):
# Skip headings
if idx == 0:
continue
# Get ip and name of MPPTs
line = line.split(',')
name = line[2]
ip = line[4]
ip = ip.replace('"', '')
# Get dip switch positions through modbus function
dip = modbus(ip)
# Convert dip switch positions to actual settings
if dip != 'Offline':
_mode = mode(dip)
_voltage = voltage(dip)
_setpoints = setpoints(dip)
_equalize = equalize(dip)
_noise = noise(dip)
print(f'{name} - {dip}')
# Write to output .csv
o.write(f'{name},'
f'{ip},'
f'{dip[0]},{dip[1]},{dip[2]},{dip[3]},{dip[4]},{dip[5]},{dip[6]},{dip[7]},'
f'{_mode},'
f'{_voltage},'
f'{_setpoints[0]},'
f'{_setpoints[1]},'
f'{_setpoints[2]},'
f'{_equalize},'
f'{_noise}\n')
elif dip == "Offline":
o.write(f'{name},{ip},Offline\n')
o.close()
f.close()
if __name__ == "__main__":
main()
|
[
"pyModbusTCP.client.ModbusClient"
] |
[((134, 192), 'pyModbusTCP.client.ModbusClient', 'ModbusClient', ([], {'host': 'ip', 'port': '(502)', 'auto_open': '(True)', 'timeout': '(1)'}), '(host=ip, port=502, auto_open=True, timeout=1)\n', (146, 192), False, 'from pyModbusTCP.client import ModbusClient\n')]
|
# -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from django.contrib.auth.decorators import login_required
from django.conf.urls import url
from .views import feedbackcreate, thankyou, FeedbackListView
from .models import Feedback
urlpatterns = [
url(r'^create/?$', feedbackcreate, name='feedback-create'),
url(r'^view?$', FeedbackListView.as_view(template_name='feedback_list.html',
queryset=Feedback.objects.all()), name='feedback-list'),
url(r'^thankyou/?$', thankyou, name='thanks'),
]
|
[
"django.conf.urls.url"
] |
[((435, 492), 'django.conf.urls.url', 'url', (['"""^create/?$"""', 'feedbackcreate'], {'name': '"""feedback-create"""'}), "('^create/?$', feedbackcreate, name='feedback-create')\n", (438, 492), False, 'from django.conf.urls import url\n'), ((649, 693), 'django.conf.urls.url', 'url', (['"""^thankyou/?$"""', 'thankyou'], {'name': '"""thanks"""'}), "('^thankyou/?$', thankyou, name='thanks')\n", (652, 693), False, 'from django.conf.urls import url\n')]
|
import numpy as np
from scipy import interpolate
def clean_ibi(events, samping_rate, n=2):
ibi = _ibi(events, samping_rate)
for _ in range(n):
# detect outlier and repalce with nan
outliers = signal_outliers(ibi, samping_rate)
time = np.cumsum(ibi)
# interpolate nan
f = interpolate.interp1d(
time[~outliers], ibi[~outliers], "cubic", fill_value="extrapolate"
)
ibi = f(time) # update
return ibi
def _ibi(events, samping_rate):
"""Inter beat interval at msec scale."""
return np.diff(events) / samping_rate * 1000
def signal_outliers(signal, samping_rate):
"""Number of outliers in Inter beat intervals."""
return _rolling_mad(signal, int(0.5 * samping_rate))
def _mad(arr):
"""Median Absolute Deviation."""
med = np.median(arr)
return med, np.median(np.abs(arr - med))
def _rolling_mad(arr, window):
"""Rolling window MAD outlier detection on 1d array."""
outliers = []
for i in range(window, len(arr)):
cur = arr[(i - window) : i]
med, cur_mad = _mad(cur)
cur_out = cur > (med + cur_mad * 3)
idx = list(np.arange((i - window), i)[cur_out])
outliers += idx
outliers = list(set(outliers))
# turn index into boolean
bool_outliers = np.zeros(arr.shape[0], dtype=bool)
bool_outliers[outliers] = True
return bool_outliers
|
[
"numpy.abs",
"numpy.median",
"numpy.zeros",
"numpy.cumsum",
"numpy.diff",
"numpy.arange",
"scipy.interpolate.interp1d"
] |
[((827, 841), 'numpy.median', 'np.median', (['arr'], {}), '(arr)\n', (836, 841), True, 'import numpy as np\n'), ((1315, 1349), 'numpy.zeros', 'np.zeros', (['arr.shape[0]'], {'dtype': 'bool'}), '(arr.shape[0], dtype=bool)\n', (1323, 1349), True, 'import numpy as np\n'), ((268, 282), 'numpy.cumsum', 'np.cumsum', (['ibi'], {}), '(ibi)\n', (277, 282), True, 'import numpy as np\n'), ((321, 414), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['time[~outliers]', 'ibi[~outliers]', '"""cubic"""'], {'fill_value': '"""extrapolate"""'}), "(time[~outliers], ibi[~outliers], 'cubic', fill_value=\n 'extrapolate')\n", (341, 414), False, 'from scipy import interpolate\n'), ((569, 584), 'numpy.diff', 'np.diff', (['events'], {}), '(events)\n', (576, 584), True, 'import numpy as np\n'), ((868, 885), 'numpy.abs', 'np.abs', (['(arr - med)'], {}), '(arr - med)\n', (874, 885), True, 'import numpy as np\n'), ((1168, 1192), 'numpy.arange', 'np.arange', (['(i - window)', 'i'], {}), '(i - window, i)\n', (1177, 1192), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
def f(x):
decay = 10.0
period = decay
return (3.0+0.5*np.sin(2.*np.pi*x/period))*x*np.exp(-x/decay)
# read data from file
xdata, ydata, yerror = np.loadtxt('DecayOcsData.txt', skiprows=5, unpack=True)
# create theoretical fitting curve
x = np.linspace(0, 45, 128)
y = f(x)
# create plot
plt.figure(1, figsize = (7,4.5) )
plt.plot(x, y, 'b-', label="theory")
plt.errorbar(xdata, ydata, fmt='ro', label="data",
xerr=0.75, yerr=yerror, ecolor='k')
plt.xlabel('x')
plt.ylabel('transverse displacement')
plt.legend(loc='upper right')
# save plot to file
plt.savefig('DecayOcsData.pdf')
# display plot on screen
plt.show()
|
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.loadtxt",
"numpy.linspace",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.errorbar"
] |
[((210, 265), 'numpy.loadtxt', 'np.loadtxt', (['"""DecayOcsData.txt"""'], {'skiprows': '(5)', 'unpack': '(True)'}), "('DecayOcsData.txt', skiprows=5, unpack=True)\n", (220, 265), True, 'import numpy as np\n'), ((306, 329), 'numpy.linspace', 'np.linspace', (['(0)', '(45)', '(128)'], {}), '(0, 45, 128)\n', (317, 329), True, 'import numpy as np\n'), ((354, 385), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(7, 4.5)'}), '(1, figsize=(7, 4.5))\n', (364, 385), True, 'import matplotlib.pyplot as plt\n'), ((388, 424), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b-"""'], {'label': '"""theory"""'}), "(x, y, 'b-', label='theory')\n", (396, 424), True, 'import matplotlib.pyplot as plt\n'), ((425, 515), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['xdata', 'ydata'], {'fmt': '"""ro"""', 'label': '"""data"""', 'xerr': '(0.75)', 'yerr': 'yerror', 'ecolor': '"""k"""'}), "(xdata, ydata, fmt='ro', label='data', xerr=0.75, yerr=yerror,\n ecolor='k')\n", (437, 515), True, 'import matplotlib.pyplot as plt\n'), ((526, 541), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (536, 541), True, 'import matplotlib.pyplot as plt\n'), ((542, 579), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""transverse displacement"""'], {}), "('transverse displacement')\n", (552, 579), True, 'import matplotlib.pyplot as plt\n'), ((580, 609), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (590, 609), True, 'import matplotlib.pyplot as plt\n'), ((631, 662), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""DecayOcsData.pdf"""'], {}), "('DecayOcsData.pdf')\n", (642, 662), True, 'import matplotlib.pyplot as plt\n'), ((689, 699), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (697, 699), True, 'import matplotlib.pyplot as plt\n'), ((147, 165), 'numpy.exp', 'np.exp', (['(-x / decay)'], {}), '(-x / decay)\n', (153, 165), True, 'import numpy as np\n'), ((118, 150), 'numpy.sin', 'np.sin', (['(2.0 * np.pi * x / period)'], {}), '(2.0 * np.pi * x / period)\n', (124, 150), True, 'import numpy as np\n')]
|
from django.db import models
from django import forms
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ModelFormAdminView, DetailAdminView, ListAdminView
def get_gallery_modal():
return """
<!-- modal-gallery is the modal dialog used for the image gallery -->
<div id="modal-gallery" class="modal modal-gallery fade" tabindex="-1">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-hidden="true">×</button>
<h4 class="modal-title"></h4>
</div>
<div class="modal-body"><div class="modal-image"><h1 class="loader"><i class="icon-spinner icon-spin icon-large loader"></i></h1></div></div>
<div class="modal-footer">
<a class="btn btn-info modal-prev"><i class="icon-arrow-left icon-white"></i> <span>%s</span></a>
<a class="btn btn-primary modal-next"><span>%s</span> <i class="icon-arrow-right icon-white"></i></a>
<a class="btn btn-success modal-play modal-slideshow" data-slideshow="5000"><i class="icon-play icon-white"></i> <span>%s</span></a>
<a class="btn btn-default modal-download" target="_blank"><i class="icon-download"></i> <span>%s</span></a>
</div>
</div><!-- /.modal-content -->
</div><!-- /.modal-dialog -->
</div>
""" % (_('Previous'), _('Next'), _('Slideshow'), _('Download'))
class AdminImageField(forms.ImageField):
def widget_attrs(self, widget):
return {'label': self.label}
class AdminImageWidget(forms.FileInput):
"""
A ImageField Widget that shows its current value if it has one.
"""
def __init__(self, attrs={}):
super(AdminImageWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
output = []
if value and hasattr(value, "url"):
label = self.attrs.get('label', name)
output.append('<a href="%s" target="_blank" title="%s" data-gallery="gallery"><img src="%s" class="field_img"/></a><br/>%s ' %
(value.url, label, value.url, _('Change:')))
output.append(super(AdminImageWidget, self).render(name, value, attrs))
return mark_safe(u''.join(output))
class ModelDetailPlugin(BaseAdminPlugin):
def __init__(self, admin_view):
super(ModelDetailPlugin, self).__init__(admin_view)
self.include_image = False
def get_field_attrs(self, attrs, db_field, **kwargs):
if isinstance(db_field, models.ImageField):
attrs['widget'] = AdminImageWidget
attrs['form_class'] = AdminImageField
self.include_image = True
return attrs
def get_field_result(self, result, field_name):
if isinstance(result.field, models.ImageField):
if result.value:
img = getattr(result.obj, field_name)
result.text = mark_safe('<a href="%s" target="_blank" title="%s" data-gallery="gallery"><img src="%s" class="field_img"/></a>' % (img.url, result.label, img.url))
self.include_image = True
return result
# Media
def get_media(self, media):
if self.include_image:
media = media + self.vendor('image-gallery.js',
'image-gallery.css')
return media
def block_before_fieldsets(self, context, node):
if self.include_image:
return '<div id="gallery" data-toggle="modal-gallery" data-target="#modal-gallery">'
def block_after_fieldsets(self, context, node):
if self.include_image:
return "</div>"
def block_extrabody(self, context, node):
if self.include_image:
return get_gallery_modal()
class ModelListPlugin(BaseAdminPlugin):
list_gallery = False
def init_request(self, *args, **kwargs):
return bool(self.list_gallery)
# Media
def get_media(self, media):
return media + self.vendor('image-gallery.js', 'image-gallery.css')
def block_results_top(self, context, node):
return '<div id="gallery" data-toggle="modal-gallery" data-target="#modal-gallery">'
def block_results_bottom(self, context, node):
return "</div>"
def block_extrabody(self, context, node):
return get_gallery_modal()
site.register_plugin(ModelDetailPlugin, DetailAdminView)
site.register_plugin(ModelDetailPlugin, ModelFormAdminView)
site.register_plugin(ModelListPlugin, ListAdminView)
|
[
"django.utils.translation.ugettext",
"django.utils.safestring.mark_safe",
"xadmin.sites.site.register_plugin"
] |
[((4573, 4629), 'xadmin.sites.site.register_plugin', 'site.register_plugin', (['ModelDetailPlugin', 'DetailAdminView'], {}), '(ModelDetailPlugin, DetailAdminView)\n', (4593, 4629), False, 'from xadmin.sites import site\n'), ((4630, 4689), 'xadmin.sites.site.register_plugin', 'site.register_plugin', (['ModelDetailPlugin', 'ModelFormAdminView'], {}), '(ModelDetailPlugin, ModelFormAdminView)\n', (4650, 4689), False, 'from xadmin.sites import site\n'), ((4690, 4742), 'xadmin.sites.site.register_plugin', 'site.register_plugin', (['ModelListPlugin', 'ListAdminView'], {}), '(ModelListPlugin, ListAdminView)\n', (4710, 4742), False, 'from xadmin.sites import site\n'), ((1608, 1621), 'django.utils.translation.ugettext', '_', (['"""Previous"""'], {}), "('Previous')\n", (1609, 1621), True, 'from django.utils.translation import ugettext as _\n'), ((1623, 1632), 'django.utils.translation.ugettext', '_', (['"""Next"""'], {}), "('Next')\n", (1624, 1632), True, 'from django.utils.translation import ugettext as _\n'), ((1634, 1648), 'django.utils.translation.ugettext', '_', (['"""Slideshow"""'], {}), "('Slideshow')\n", (1635, 1648), True, 'from django.utils.translation import ugettext as _\n'), ((1650, 1663), 'django.utils.translation.ugettext', '_', (['"""Download"""'], {}), "('Download')\n", (1651, 1663), True, 'from django.utils.translation import ugettext as _\n'), ((3155, 3313), 'django.utils.safestring.mark_safe', 'mark_safe', (['(\'<a href="%s" target="_blank" title="%s" data-gallery="gallery"><img src="%s" class="field_img"/></a>\'\n % (img.url, result.label, img.url))'], {}), '(\n \'<a href="%s" target="_blank" title="%s" data-gallery="gallery"><img src="%s" class="field_img"/></a>\'\n % (img.url, result.label, img.url))\n', (3164, 3313), False, 'from django.utils.safestring import mark_safe\n'), ((2352, 2364), 'django.utils.translation.ugettext', '_', (['"""Change:"""'], {}), "('Change:')\n", (2353, 2364), True, 'from django.utils.translation import ugettext as _\n')]
|
# Remove "http://sn" login_url
import logging
# logger.basicConfig(filename=f"{__name__}.log")
logger = logging.getLogger(__name__)
sHandler = logging.StreamHandler()
sHandler.setLevel(logging.INFO)
logger.addHandler(sHandler)
logfilenode = __file__.rsplit('.')[0]
handler = logging.FileHandler(f"{logfilenode}.log")
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
import sys
import os
import ycommander as kc
from ycommander import api, params
if __name__ == '__main__':
try:
user = sys.argv[1]
except IndexError:
try:
user = os.environ['user']
except KeyError:
user = input("User:")
try:
password = sys.argv[2]
except IndexError:
try:
password = os.environ['password']
except KeyError:
from getpass import getpass
password = getpass('Password:')
# config = {'user': user, 'password': password}
params = params.KeeperParams() # config=config)
params.user = user
params.password = password
session_token = api.login(params)
TABLE_NAME = 'sn_url'
sn_url = 'http://sn'
params.sync_data = True # to update
MAX_REPEAT = 999
logger.setLevel(logging.INFO)
for repeat, uid in enumerate(params.record_cache):
if repeat >= MAX_REPEAT:
logger.info(f"Exitting because of over repeat limit {repeat}")
break
rec = api.get_record(params, uid)
if rec.login_url == sn_url:
rec.login_url = '' # set string empty
api.update_record(params, rec)
logger.info(f"sn_url is erased at {uid} : {rec.title}")
exit(0) # to suppress warning of 'Exit without exit code'
|
[
"ycommander.api.update_record",
"logging.FileHandler",
"ycommander.api.login",
"ycommander.params.KeeperParams",
"getpass.getpass",
"logging.StreamHandler",
"logging.Formatter",
"ycommander.api.get_record",
"logging.getLogger"
] |
[((104, 131), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (121, 131), False, 'import logging\n'), ((143, 166), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (164, 166), False, 'import logging\n'), ((275, 316), 'logging.FileHandler', 'logging.FileHandler', (['f"""{logfilenode}.log"""'], {}), "(f'{logfilenode}.log')\n", (294, 316), False, 'import logging\n'), ((360, 433), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (377, 433), False, 'import logging\n'), ((1072, 1093), 'ycommander.params.KeeperParams', 'params.KeeperParams', ([], {}), '()\n', (1091, 1093), False, 'from ycommander import api, params\n'), ((1186, 1203), 'ycommander.api.login', 'api.login', (['params'], {}), '(params)\n', (1195, 1203), False, 'from ycommander import api, params\n'), ((1545, 1572), 'ycommander.api.get_record', 'api.get_record', (['params', 'uid'], {}), '(params, uid)\n', (1559, 1572), False, 'from ycommander import api, params\n'), ((1671, 1701), 'ycommander.api.update_record', 'api.update_record', (['params', 'rec'], {}), '(params, rec)\n', (1688, 1701), False, 'from ycommander import api, params\n'), ((986, 1006), 'getpass.getpass', 'getpass', (['"""Password:"""'], {}), "('Password:')\n", (993, 1006), False, 'from getpass import getpass\n')]
|
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_data_science_job
short_description: Manage a Job resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a Job resource in Oracle Cloud Infrastructure
- For I(state=present), creates a job.
- "This resource has the following action operations in the M(oracle.oci.oci_data_science_job_actions) module: change_compartment."
version_added: "2.9.0"
author: Oracle (@oracle)
options:
project_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the project to associate the job with.
- Required for create using I(state=present).
type: str
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment where you want to create the job.
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
display_name:
description:
- A user-friendly display name for the resource.
- Required for create, update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
description:
description:
- A short description of the job.
- This parameter is updatable.
type: str
job_configuration_details:
description:
- ""
- Required for create using I(state=present).
type: dict
suboptions:
job_type:
description:
- The type of job.
type: str
choices:
- "DEFAULT"
required: true
environment_variables:
description:
- Environment variables to set for the job.
type: dict
command_line_arguments:
description:
- The arguments to pass to the job.
type: str
maximum_runtime_in_minutes:
description:
- A time bound for the execution of the job. Timer starts when the job becomes active.
type: int
job_infrastructure_configuration_details:
description:
- ""
- Required for create using I(state=present).
- This parameter is updatable.
type: dict
suboptions:
job_infrastructure_type:
description:
- The infrastructure type used for job run.
type: str
choices:
- "STANDALONE"
required: true
shape_name:
description:
- The shape used to launch the job run instances.
type: str
required: true
subnet_id:
description:
- The subnet to create a secondary vnic in to attach to the instance running the job
type: str
required: true
block_storage_size_in_gbs:
description:
- The size of the block storage volume to attach to the instance running the job
type: int
required: true
job_log_configuration_details:
description:
- ""
type: dict
suboptions:
enable_logging:
description:
- If customer logging is enabled for job runs.
type: bool
enable_auto_log_creation:
description:
- If automatic on-behalf-of log object creation is enabled for job runs.
type: bool
log_group_id:
description:
- The log group id for where log objects are for job runs.
type: str
log_id:
description:
- The log id the job run will push logs too.
type: str
freeform_tags:
description:
- "Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. See L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace. See L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
- This parameter is updatable.
type: dict
job_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the job.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
delete_related_job_runs:
description:
- Delete all JobRuns associated with this job.
type: bool
state:
description:
- The state of the Job.
- Use I(state=present) to create or update a Job.
- Use I(state=absent) to delete a Job.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create job
oci_data_science_job:
# required
project_id: "ocid1.project.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
job_configuration_details:
# required
job_type: DEFAULT
# optional
environment_variables: null
command_line_arguments: command_line_arguments_example
maximum_runtime_in_minutes: 56
job_infrastructure_configuration_details:
# required
job_infrastructure_type: STANDALONE
shape_name: shape_name_example
subnet_id: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
block_storage_size_in_gbs: 1024
# optional
display_name: display_name_example
description: description_example
job_log_configuration_details:
# optional
enable_logging: true
enable_auto_log_creation: true
log_group_id: "ocid1.loggroup.oc1..xxxxxxEXAMPLExxxxxx"
log_id: "ocid1.log.oc1..xxxxxxEXAMPLExxxxxx"
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update job
oci_data_science_job:
# required
job_id: "ocid1.job.oc1..xxxxxxEXAMPLExxxxxx"
# optional
display_name: display_name_example
description: description_example
job_infrastructure_configuration_details:
# required
job_infrastructure_type: STANDALONE
shape_name: shape_name_example
subnet_id: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
block_storage_size_in_gbs: 1024
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update job using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_data_science_job:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name: display_name_example
# optional
description: description_example
job_infrastructure_configuration_details:
# required
job_infrastructure_type: STANDALONE
shape_name: shape_name_example
subnet_id: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
block_storage_size_in_gbs: 1024
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Delete job
oci_data_science_job:
# required
job_id: "ocid1.job.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
# optional
delete_related_job_runs: true
- name: Delete job using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_data_science_job:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name: display_name_example
state: absent
"""
RETURN = """
job:
description:
- Details of the Job resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the job.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
time_created:
description:
- "The date and time the resource was created in the timestamp format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
Example: 2020-08-06T21:10:29.41Z"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
created_by:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the user who created the job.
returned: on success
type: str
sample: created_by_example
project_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the project to associate the job with.
returned: on success
type: str
sample: "ocid1.project.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment where you want to create the job.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- A user-friendly display name for the resource.
returned: on success
type: str
sample: display_name_example
description:
description:
- A short description of the job.
returned: on success
type: str
sample: description_example
job_configuration_details:
description:
- ""
returned: on success
type: complex
contains:
job_type:
description:
- The type of job.
returned: on success
type: str
sample: DEFAULT
environment_variables:
description:
- Environment variables to set for the job.
returned: on success
type: dict
sample: {}
command_line_arguments:
description:
- The arguments to pass to the job.
returned: on success
type: str
sample: command_line_arguments_example
maximum_runtime_in_minutes:
description:
- A time bound for the execution of the job. Timer starts when the job becomes active.
returned: on success
type: int
sample: 56
job_infrastructure_configuration_details:
description:
- ""
returned: on success
type: complex
contains:
job_infrastructure_type:
description:
- The infrastructure type used for job run.
returned: on success
type: str
sample: STANDALONE
shape_name:
description:
- The shape used to launch the job run instances.
returned: on success
type: str
sample: shape_name_example
subnet_id:
description:
- The subnet to create a secondary vnic in to attach to the instance running the job
returned: on success
type: str
sample: "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx"
block_storage_size_in_gbs:
description:
- The size of the block storage volume to attach to the instance running the job
returned: on success
type: int
sample: 1024
job_log_configuration_details:
description:
- ""
returned: on success
type: complex
contains:
enable_logging:
description:
- If customer logging is enabled for job runs.
returned: on success
type: bool
sample: true
enable_auto_log_creation:
description:
- If automatic on-behalf-of log object creation is enabled for job runs.
returned: on success
type: bool
sample: true
log_group_id:
description:
- The log group id for where log objects are for job runs.
returned: on success
type: str
sample: "ocid1.loggroup.oc1..xxxxxxEXAMPLExxxxxx"
log_id:
description:
- The log id the job run will push logs too.
returned: on success
type: str
sample: "ocid1.log.oc1..xxxxxxEXAMPLExxxxxx"
lifecycle_state:
description:
- The state of the job.
returned: on success
type: str
sample: ACTIVE
lifecycle_details:
description:
- The state of the job.
returned: on success
type: str
sample: lifecycle_details_example
freeform_tags:
description:
- "Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. See L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace. See L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2013-10-20T19:20:30+01:00",
"created_by": "created_by_example",
"project_id": "ocid1.project.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"description": "description_example",
"job_configuration_details": {
"job_type": "DEFAULT",
"environment_variables": {},
"command_line_arguments": "command_line_arguments_example",
"maximum_runtime_in_minutes": 56
},
"job_infrastructure_configuration_details": {
"job_infrastructure_type": "STANDALONE",
"shape_name": "shape_name_example",
"subnet_id": "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx",
"block_storage_size_in_gbs": 1024
},
"job_log_configuration_details": {
"enable_logging": true,
"enable_auto_log_creation": true,
"log_group_id": "ocid1.loggroup.oc1..xxxxxxEXAMPLExxxxxx",
"log_id": "ocid1.log.oc1..xxxxxxEXAMPLExxxxxx"
},
"lifecycle_state": "ACTIVE",
"lifecycle_details": "lifecycle_details_example",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.data_science import DataScienceClient
from oci.data_science.models import CreateJobDetails
from oci.data_science.models import UpdateJobDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DataScienceJobHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def get_module_resource_id_param(self):
return "job_id"
def get_module_resource_id(self):
return self.module.params.get("job_id")
def get_get_fn(self):
return self.client.get_job
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_job, job_id=self.module.params.get("job_id"),
)
def get_required_kwargs_for_list(self):
required_list_method_params = [
"compartment_id",
]
return dict(
(param, self.module.params[param]) for param in required_list_method_params
)
def get_optional_kwargs_for_list(self):
optional_list_method_params = ["project_id", "display_name"]
return dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
self._use_name_as_identifier()
or (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
)
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(self.client.list_jobs, **kwargs)
def get_create_model_class(self):
return CreateJobDetails
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_job,
call_fn_args=(),
call_fn_kwargs=dict(create_job_details=create_details,),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.CREATE_OPERATION_KEY,
),
)
def get_update_model_class(self):
return UpdateJobDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_job,
call_fn_args=(),
call_fn_kwargs=dict(
job_id=self.module.params.get("job_id"),
update_job_details=update_details,
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.UPDATE_OPERATION_KEY,
),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_job,
call_fn_args=(),
call_fn_kwargs=dict(
job_id=self.module.params.get("job_id"),
delete_related_job_runs=self.module.params.get(
"delete_related_job_runs"
),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
DataScienceJobHelperCustom = get_custom_class("DataScienceJobHelperCustom")
class ResourceHelper(DataScienceJobHelperCustom, DataScienceJobHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
project_id=dict(type="str"),
compartment_id=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
description=dict(type="str"),
job_configuration_details=dict(
type="dict",
options=dict(
job_type=dict(type="str", required=True, choices=["DEFAULT"]),
environment_variables=dict(type="dict"),
command_line_arguments=dict(type="str"),
maximum_runtime_in_minutes=dict(type="int"),
),
),
job_infrastructure_configuration_details=dict(
type="dict",
options=dict(
job_infrastructure_type=dict(
type="str", required=True, choices=["STANDALONE"]
),
shape_name=dict(type="str", required=True),
subnet_id=dict(type="str", required=True),
block_storage_size_in_gbs=dict(type="int", required=True),
),
),
job_log_configuration_details=dict(
type="dict",
options=dict(
enable_logging=dict(type="bool"),
enable_auto_log_creation=dict(type="bool"),
log_group_id=dict(type="str"),
log_id=dict(type="str"),
),
),
freeform_tags=dict(type="dict"),
defined_tags=dict(type="dict"),
job_id=dict(aliases=["id"], type="str"),
delete_related_job_runs=dict(type="bool"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="job",
service_client_class=DataScienceClient,
namespace="data_science",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
|
[
"ansible_collections.oracle.oci.plugins.module_utils.oci_common_utils.list_all_resources",
"ansible_collections.oracle.oci.plugins.module_utils.oci_common_utils.get_common_arg_spec",
"ansible_collections.oracle.oci.plugins.module_utils.oci_common_utils.get_work_request_completed_states",
"ansible.module_utils.basic.AnsibleModule",
"ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils.get_custom_class",
"ansible_collections.oracle.oci.plugins.module_utils.oci_common_utils.merge_dicts"
] |
[((22276, 22322), 'ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils.get_custom_class', 'get_custom_class', (['"""DataScienceJobHelperCustom"""'], {}), "('DataScienceJobHelperCustom')\n", (22292, 22322), False, 'from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import OCIResourceHelperBase, get_custom_class\n'), ((22441, 22519), 'ansible_collections.oracle.oci.plugins.module_utils.oci_common_utils.get_common_arg_spec', 'oci_common_utils.get_common_arg_spec', ([], {'supports_create': '(True)', 'supports_wait': '(True)'}), '(supports_create=True, supports_wait=True)\n', (22477, 22519), False, 'from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils, oci_wait_utils\n'), ((24341, 24407), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'module_args', 'supports_check_mode': '(True)'}), '(argument_spec=module_args, supports_check_mode=True)\n', (24354, 24407), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((19922, 19984), 'ansible_collections.oracle.oci.plugins.module_utils.oci_common_utils.merge_dicts', 'oci_common_utils.merge_dicts', (['required_kwargs', 'optional_kwargs'], {}), '(required_kwargs, optional_kwargs)\n', (19950, 19984), False, 'from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils, oci_wait_utils\n'), ((20000, 20068), 'ansible_collections.oracle.oci.plugins.module_utils.oci_common_utils.list_all_resources', 'oci_common_utils.list_all_resources', (['self.client.list_jobs'], {}), '(self.client.list_jobs, **kwargs)\n', (20035, 20068), False, 'from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils, oci_wait_utils\n'), ((22181, 22233), 'ansible_collections.oracle.oci.plugins.module_utils.oci_common_utils.get_work_request_completed_states', 'oci_common_utils.get_work_request_completed_states', ([], {}), '()\n', (22231, 22233), False, 'from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils, oci_wait_utils\n')]
|
from random import randint
from pygame.draw import line
class Tile():
def __init__(self, height = None):
if height != None:
self.__height = height
else:
self.__height = randint(0, 100) # percentage of maximum height in order to be simple !
self.__region = None
self.__subRegion = None
self.__caracteristics = []
def get_height(self): return self.__height
def set_height(self, height): self.__height = height
def draw(self, surface, x, y, w, h, type = "heightMap"):
if type == "continent":
if self.__height < 10:
color = (10, 5, 71)
elif self.__height < 25:
color = (27, 21, 112)
elif self.__height < 35:
color = (60, 53, 158)
elif self.__height < 50:
color = (104, 97, 198)
elif self.__height < 65:
color = (252, 208, 146)
elif self.__height < 75:
color = (216, 185, 140)
elif self.__height < 85:
color = (170, 140, 99)
elif self.__height < 90:
color = (112, 55, 6)
else:
color = (102, 30, 0)
elif type == "region":
color = self.__region.getColor()
elif type == "subRegion":
color = self.__subRegion.getColor()
else:
grayscale = int((self.__height*255)/100)
color = (grayscale, grayscale, grayscale)
surface.fill(color, (x, y, w, h))
def drawCharacteristics(self, surface, x, y, w, h):
for chars in self.__caracteristics:
chars.draw(surface, x, y, w, h)
def regionalize(self, region):self.__region = region
def getRegion(self): return self.__region
def subRegionalize(self, subRegion):self.__subRegion = subRegion
def getSubRegion(self): return self.__subRegion
def __gt__(self, other):
h1 = self.__height
h2 = other.get_height()
if h1 > h2: return True
elif h1 < h2: return False
else: return randint(0,1)
def addCharacteristic(self, chars):
self.__caracteristics.append(chars)
|
[
"random.randint"
] |
[((181, 196), 'random.randint', 'randint', (['(0)', '(100)'], {}), '(0, 100)\n', (188, 196), False, 'from random import randint\n'), ((1760, 1773), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1767, 1773), False, 'from random import randint\n')]
|
#!/home/bryan/repo/CINS465-Fall2016-Lecture-Examples/coffee/bin/python3
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates how a 1-bit BitmapImage can be used
# as a dynamically updated overlay
#
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
#
# an image viewer
class UI(Frame):
def __init__(self, master, im, value=128):
Frame.__init__(self, master)
self.image = im
self.value = value
self.canvas = Canvas(self, width=im.size[0], height=im.size[1])
self.backdrop = ImageTk.PhotoImage(im)
self.canvas.create_image(0, 0, image=self.backdrop, anchor=NW)
self.canvas.pack()
scale = Scale(self, orient=HORIZONTAL, from_=0, to=255,
resolution=1, command=self.update_scale, length=256)
scale.set(value)
scale.bind("<ButtonRelease-1>", self.redraw)
scale.pack()
# uncomment the following line for instant feedback (might
# be too slow on some platforms)
# self.redraw()
def update_scale(self, value):
self.value = float(value)
self.redraw()
def redraw(self, event=None):
# create overlay (note the explicit conversion to mode "1")
im = self.image.point(lambda v, t=self.value: v >= t, "1")
self.overlay = ImageTk.BitmapImage(im, foreground="green")
# update canvas
self.canvas.delete("overlay")
self.canvas.create_image(0, 0, image=self.overlay, anchor=NW,
tags="overlay")
# --------------------------------------------------------------------
# main
if len(sys.argv) != 2:
print("Usage: thresholder file")
sys.exit(1)
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "L":
im = im.convert("L")
# im.thumbnail((320,200))
UI(root, im).pack()
root.mainloop()
|
[
"PIL.ImageTk.BitmapImage",
"PIL.ImageTk.PhotoImage",
"sys.exit",
"PIL.Image.open"
] |
[((1791, 1814), 'PIL.Image.open', 'Image.open', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (1801, 1814), False, 'from PIL import Image, ImageTk\n'), ((1760, 1771), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1768, 1771), False, 'import sys\n'), ((610, 632), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['im'], {}), '(im)\n', (628, 632), False, 'from PIL import Image, ImageTk\n'), ((1390, 1433), 'PIL.ImageTk.BitmapImage', 'ImageTk.BitmapImage', (['im'], {'foreground': '"""green"""'}), "(im, foreground='green')\n", (1409, 1433), False, 'from PIL import Image, ImageTk\n')]
|
import logging
import os.path
import random
from string import ascii_letters, digits
import requests
from flask import render_template, request, redirect, url_for
from config import config
from app import app, fetch, utils
@app.route('/')
def index():
N = 20
state = ''.join(random.SystemRandom().choice(ascii_letters + digits) for _ in range(N))
# TODO: Set state string in SESSION variable
return render_template('index.html',
client_id=config.GITHUB_CLIENT_ID,
auth_url='http://127.0.0.1:5000/auth',
scope='read:org',
state=state)
@app.route('/auth')
def auth():
if 'code' in request.args:
code = request.args['code']
state = request.args['state']
print('CODE', code)
print('STATE', state) # TODO: Compare state to stored SESSION variable
response = fetch.post_temporary_code(code, state)
if response.status_code == requests.codes.ok:
utils.persist_access_token(response.json())
return redirect(url_for('success'))
else:
return redirect(url_for('failure'))
else:
return 'Invalid request'
@app.route('/success')
def success():
return render_template('success.html')
@app.route('/failure')
def failure():
return render_template('failure.html')
@app.route('/streaks')
def streaks():
response = fetch.orgs_for_user()
if response.status_code == requests.codes.ok:
orgs = utils.parse_orgs(response.json())
for org in orgs:
# TODO: Pagination
response = fetch.members_in_org(org['login'])
if response.status_code == requests.codes.ok:
members = utils.parse_org_members(response.json())
org['count'] = len(members)
print('Found {} members'.format(org['count']))
for member in members[-5:]:
response = fetch.member_page(member['login'])
if response.status_code == requests.codes.ok:
print('member account: ')
print(response.json())
response = fetch.member_contributions(member['login'])
if response.status_code == requests.codes.ok:
print('member contributions: ')
print(response.content)
else:
print(response.content)
orgs = [
{'login': 'rat', 'count': 1},
{'login': 'mat', 'count': 2},
{'login': 'hat', 'count': 3},
{'login': 'cat', 'count': 5},
]
return render_template('streaks.html',
orgs=orgs)
|
[
"app.fetch.post_temporary_code",
"app.app.route",
"app.fetch.member_contributions",
"random.SystemRandom",
"app.fetch.orgs_for_user",
"flask.url_for",
"flask.render_template",
"app.fetch.members_in_org",
"app.fetch.member_page"
] |
[((229, 243), 'app.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (238, 243), False, 'from app import app, fetch, utils\n'), ((665, 683), 'app.app.route', 'app.route', (['"""/auth"""'], {}), "('/auth')\n", (674, 683), False, 'from app import app, fetch, utils\n'), ((1237, 1258), 'app.app.route', 'app.route', (['"""/success"""'], {}), "('/success')\n", (1246, 1258), False, 'from app import app, fetch, utils\n'), ((1320, 1341), 'app.app.route', 'app.route', (['"""/failure"""'], {}), "('/failure')\n", (1329, 1341), False, 'from app import app, fetch, utils\n'), ((1403, 1424), 'app.app.route', 'app.route', (['"""/streaks"""'], {}), "('/streaks')\n", (1412, 1424), False, 'from app import app, fetch, utils\n'), ((420, 559), 'flask.render_template', 'render_template', (['"""index.html"""'], {'client_id': 'config.GITHUB_CLIENT_ID', 'auth_url': '"""http://127.0.0.1:5000/auth"""', 'scope': '"""read:org"""', 'state': 'state'}), "('index.html', client_id=config.GITHUB_CLIENT_ID, auth_url=\n 'http://127.0.0.1:5000/auth', scope='read:org', state=state)\n", (435, 559), False, 'from flask import render_template, request, redirect, url_for\n'), ((1285, 1316), 'flask.render_template', 'render_template', (['"""success.html"""'], {}), "('success.html')\n", (1300, 1316), False, 'from flask import render_template, request, redirect, url_for\n'), ((1368, 1399), 'flask.render_template', 'render_template', (['"""failure.html"""'], {}), "('failure.html')\n", (1383, 1399), False, 'from flask import render_template, request, redirect, url_for\n'), ((1455, 1476), 'app.fetch.orgs_for_user', 'fetch.orgs_for_user', ([], {}), '()\n', (1474, 1476), False, 'from app import app, fetch, utils\n'), ((2705, 2747), 'flask.render_template', 'render_template', (['"""streaks.html"""'], {'orgs': 'orgs'}), "('streaks.html', orgs=orgs)\n", (2720, 2747), False, 'from flask import render_template, request, redirect, url_for\n'), ((929, 967), 'app.fetch.post_temporary_code', 'fetch.post_temporary_code', (['code', 'state'], {}), '(code, state)\n', (954, 967), False, 'from app import app, fetch, utils\n'), ((1655, 1689), 'app.fetch.members_in_org', 'fetch.members_in_org', (["org['login']"], {}), "(org['login'])\n", (1675, 1689), False, 'from app import app, fetch, utils\n'), ((1108, 1126), 'flask.url_for', 'url_for', (['"""success"""'], {}), "('success')\n", (1115, 1126), False, 'from flask import render_template, request, redirect, url_for\n'), ((1170, 1188), 'flask.url_for', 'url_for', (['"""failure"""'], {}), "('failure')\n", (1177, 1188), False, 'from flask import render_template, request, redirect, url_for\n'), ((288, 309), 'random.SystemRandom', 'random.SystemRandom', ([], {}), '()\n', (307, 309), False, 'import random\n'), ((1997, 2031), 'app.fetch.member_page', 'fetch.member_page', (["member['login']"], {}), "(member['login'])\n", (2014, 2031), False, 'from app import app, fetch, utils\n'), ((2230, 2273), 'app.fetch.member_contributions', 'fetch.member_contributions', (["member['login']"], {}), "(member['login'])\n", (2256, 2273), False, 'from app import app, fetch, utils\n')]
|
#!/usr/bin/env python
# As v1, but using scipy.sparse.diags instead of spdiags
"""
Functions for solving a 1D diffusion equations of simplest types
(constant coefficient, no source term):
u_t = a*u_xx on (0,L)
with boundary conditions u=0 on x=0,L, for t in (0,T].
Initial condition: u(x,0)=I(x).
The following naming convention of variables are used.
===== ==========================================================
Name Description
===== ==========================================================
Nx The total number of mesh cells; mesh points are numbered
from 0 to Nx.
F The dimensionless number a*dt/dx**2, which implicitly
specifies the time step.
T The stop time for the simulation.
I Initial condition (Python function of x).
a Variable coefficient (constant).
L Length of the domain ([0,L]).
x Mesh points in space.
t Mesh points in time.
n Index counter in time.
u Unknown at current/new time level.
u_n u at the previous time level.
dx Constant mesh spacing in x.
dt Constant mesh spacing in t.
===== ==========================================================
user_action is a function of (u, x, t, n), u[i] is the solution at
spatial mesh point x[i] at time t[n], where the calling code
can add visualization, error computations, data analysis,
store solutions, etc.
"""
import sys
import time
# import scitools.std as plt
import scipy.sparse
import scipy.sparse.linalg
import numpy as np
def solver_FE_simple(I, a, f, L, dt, F, T):
"""
Simplest expression of the computational algorithm
using the Forward Euler method and explicit Python loops.
For this method F <= 0.5 for stability.
"""
import time
t0 = time.perf_counter() # For measuring the CPU time
Nt = int(round(T/float(dt)))
t = np.linspace(0, Nt*dt, Nt+1) # Mesh points in time
dx = np.sqrt(a*dt/F)
Nx = int(round(L/dx))
x = np.linspace(0, L, Nx+1) # Mesh points in space
# Make sure dx and dt are compatible with x and t
dx = x[1] - x[0]
dt = t[1] - t[0]
u = np.zeros(Nx+1)
u_n = np.zeros(Nx+1)
# Set initial condition u(x,0) = I(x)
for i in range(0, Nx+1):
u_n[i] = I(x[i])
for n in range(0, Nt):
# Compute u at inner mesh points
for i in range(1, Nx):
u[i] = u_n[i] + F*(u_n[i-1] - 2*u_n[i] + u_n[i+1]) + \
dt*f(x[i], t[n])
# Insert boundary conditions
u[0] = 0
u[Nx] = 0
# Switch variables before next step
# u_n[:] = u # safe, but slow
u_n, u = u, u_n
t1 = time.perf_counter()
return u_n, x, t, t1-t0 # u_n holds latest u
def solver_FE(I, a, f, L, dt, F, T,
user_action=None, version='scalar'):
"""
Vectorized implementation of solver_FE_simple.
"""
t0 = time.perf_counter() # for measuring the CPU time
Nt = int(round(T/float(dt)))
t = np.linspace(0, Nt*dt, Nt+1) # Mesh points in time
dx = np.sqrt(a*dt/F)
Nx = int(round(L/dx))
x = np.linspace(0, L, Nx+1) # Mesh points in space
# Make sure dx and dt are compatible with x and t
dx = x[1] - x[0]
dt = t[1] - t[0]
u = np.zeros(Nx+1) # solution array
u_n = np.zeros(Nx+1) # solution at t-dt
# Set initial condition
for i in range(0, Nx+1):
u_n[i] = I(x[i])
if user_action is not None:
user_action(u_n, x, t, 0)
for n in range(0, Nt):
# Update all inner points
if version == 'scalar':
for i in range(1, Nx):
u[i] = u_n[i] +\
F*(u_n[i-1] - 2*u_n[i] + u_n[i+1]) +\
dt*f(x[i], t[n])
elif version == 'vectorized':
u[1:Nx] = u_n[1:Nx] + \
F*(u_n[0:Nx-1] - 2*u_n[1:Nx] + u_n[2:Nx+1]) +\
dt*f(x[1:Nx], t[n])
else:
raise ValueError('version=%s' % version)
# Insert boundary conditions
u[0] = 0
u[Nx] = 0
if user_action is not None:
user_action(u, x, t, n+1)
# Switch variables before next step
u_n, u = u, u_n
t1 = time.perf_counter()
return t1-t0
def solver_BE_simple(I, a, f, L, dt, F, T, user_action=None):
"""
Simplest expression of the computational algorithm
for the Backward Euler method, using explicit Python loops
and a dense matrix format for the coefficient matrix.
"""
import time
t0 = time.perf_counter() # for measuring the CPU time
Nt = int(round(T/float(dt)))
t = np.linspace(0, Nt*dt, Nt+1) # Mesh points in time
dx = np.sqrt(a*dt/F)
Nx = int(round(L/dx))
x = np.linspace(0, L, Nx+1) # Mesh points in space
# Make sure dx and dt are compatible with x and t
dx = x[1] - x[0]
dt = t[1] - t[0]
u = np.zeros(Nx+1)
u_n = np.zeros(Nx+1)
# Data structures for the linear system
A = np.zeros((Nx+1, Nx+1))
b = np.zeros(Nx+1)
for i in range(1, Nx):
A[i, i-1] = -F
A[i, i+1] = -F
A[i, i] = 1 + 2*F
A[0, 0] = A[Nx, Nx] = 1
# Set initial condition u(x,0) = I(x)
for i in range(0, Nx+1):
u_n[i] = I(x[i])
if user_action is not None:
user_action(u_n, x, t, 0)
for n in range(0, Nt):
# Compute b and solve linear system
for i in range(1, Nx):
b[i] = u_n[i] + dt*f(x[i], t[n+1])
b[0] = b[Nx] = 0
u[:] = np.linalg.solve(A, b)
if user_action is not None:
user_action(u, x, t, n+1)
# Update u_n before next step
u_n, u = u, u_n
t1 = time.perf_counter()
return t1-t0
def solver_BE(I, a, f, L, dt, F, T, user_action=None):
"""
Vectorized implementation of solver_BE_simple using also
a sparse (tridiagonal) matrix for efficiency.
"""
import time
t0 = time.perf_counter() # for measuring the CPU time
Nt = int(round(T/float(dt)))
t = np.linspace(0, Nt*dt, Nt+1) # Mesh points in time
dx = np.sqrt(a*dt/F)
Nx = int(round(L/dx))
x = np.linspace(0, L, Nx+1) # Mesh points in space
# Make sure dx and dt are compatible with x and t
dx = x[1] - x[0]
dt = t[1] - t[0]
u = np.zeros(Nx+1) # Solution array at t[n+1]
u_n = np.zeros(Nx+1) # Solution at t[n]
# Representation of sparse matrix and right-hand side
diagonal = np.zeros(Nx+1)
lower = np.zeros(Nx)
upper = np.zeros(Nx)
b = np.zeros(Nx+1)
# Precompute sparse matrix
diagonal[:] = 1 + 2*F
lower[:] = -F # 1
upper[:] = -F # 1
# Insert boundary conditions
diagonal[0] = 1
upper[0] = 0
diagonal[Nx] = 1
lower[-1] = 0
A = scipy.sparse.diags(
diagonals=[diagonal, lower, upper],
offsets=[0, -1, 1], shape=(Nx+1, Nx+1),
format='csr')
print(A.todense())
# Set initial condition
for i in range(0, Nx+1):
u_n[i] = I(x[i])
if user_action is not None:
user_action(u_n, x, t, 0)
for n in range(0, Nt):
b = u_n + dt*f(x[:], t[n+1])
b[0] = b[-1] = 0.0 # boundary conditions
u[:] = scipy.sparse.linalg.spsolve(A, b)
if user_action is not None:
user_action(u, x, t, n+1)
# Update u_n before next step
# u_n[:] = u
u_n, u = u, u_n
t1 = time.perf_counter()
return t1-t0
def solver_theta(I, a, f, L, dt, F, T, theta=0.5, u_L=0, u_R=0,
user_action=None):
"""
Full solver for the model problem using the theta-rule
difference approximation in time (no restriction on F,
i.e., the time step when theta >= 0.5).
Vectorized implementation and sparse (tridiagonal)
coefficient matrix.
"""
import time
t0 = time.perf_counter() # for measuring the CPU time
Nt = int(round(T/float(dt)))
t = np.linspace(0, Nt*dt, Nt+1) # Mesh points in time
dx = np.sqrt(a*dt/F)
Nx = int(round(L/dx))
x = np.linspace(0, L, Nx+1) # Mesh points in space
# Make sure dx and dt are compatible with x and t
dx = x[1] - x[0]
dt = t[1] - t[0]
u = np.zeros(Nx+1) # solution array at t[n+1]
u_n = np.zeros(Nx+1) # solution at t[n]
# Representation of sparse matrix and right-hand side
diagonal = np.zeros(Nx+1)
lower = np.zeros(Nx)
upper = np.zeros(Nx)
b = np.zeros(Nx+1)
# Precompute sparse matrix (scipy format)
Fl = F*theta
Fr = F*(1-theta)
diagonal[:] = 1 + 2*Fl
lower[:] = -Fl # 1
upper[:] = -Fl # 1
# Insert boundary conditions
diagonal[0] = 1
upper[0] = 0
diagonal[Nx] = 1
lower[-1] = 0
diags = [0, -1, 1]
A = scipy.sparse.diags(
diagonals=[diagonal, lower, upper],
offsets=[0, -1, 1], shape=(Nx+1, Nx+1),
format='csr')
print(A.todense())
# Set initial condition
for i in range(0, Nx+1):
u_n[i] = I(x[i])
if user_action is not None:
user_action(u_n, x, t, 0)
# Time loop
for n in range(0, Nt):
b[1:-1] = u_n[1:-1] + \
Fr*(u_n[:-2] - 2*u_n[1:-1] + u_n[2:]) + \
dt*theta*f(x[1:-1], t[n+1]) + \
dt*(1-theta)*f(x[1:-1], t[n])
b[0] = u_L # Boundary conditions
b[-1] = u_R
u[:] = scipy.sparse.linalg.spsolve(A, b)
if user_action is not None:
user_action(u, x, t, n+1)
# Update u_n before next step
u_n, u = u, u_n
t1 = time.perf_counter()
return t1-t0
def viz(I, a, L, dt, F, T, umin, umax,
scheme='FE', animate=True, framefiles=True):
def plot_u(u, x, t, n):
plt.plot(x, u, 'r-', axis=[0, L, umin, umax],
title='t=%f' % t[n])
if framefiles:
plt.savefig('tmp_frame%04d.png' % n)
if t[n] == 0:
time.sleep(2)
elif not framefiles:
# It takes time to write files so pause is needed
# for screen only animation
time.sleep(0.2)
user_action = plot_u if animate else lambda u, x, t, n: None
cpu = eval('solver_'+scheme)(I, a, L, dt, F, T,
user_action=user_action)
return cpu
def plug(scheme='FE', F=0.5, Nx=50):
L = 1.
a = 1.
T = 0.1
# Compute dt from Nx and F
dx = L/Nx
dt = F/a*dx**2
def I(x):
"""Plug profile as initial condition."""
if abs(x-L/2.0) > 0.1:
return 0
else:
return 1
cpu = viz(I, a, L, dt, F, T,
umin=-0.1, umax=1.1,
scheme=scheme, animate=True, framefiles=True)
print('CPU time:', cpu)
def gaussian(scheme='FE', F=0.5, Nx=50, sigma=0.05):
L = 1.
a = 1.
T = 0.1
# Compute dt from Nx and F
dx = L/Nx
dt = F/a*dx**2
def I(x):
"""Gaussian profile as initial condition."""
return exp(-0.5*((x-L/2.0)**2)/sigma**2)
u, cpu = viz(I, a, L, dt, F, T,
umin=-0.1, umax=1.1,
scheme=scheme, animate=True, framefiles=True)
print('CPU time:', cpu)
def expsin(scheme='FE', F=0.5, m=3):
L = 10.0
a = 1
T = 1.2
def exact(x, t):
return exp(-m**2*pi**2*a/L**2*t)*sin(m*pi/L*x)
def I(x):
return exact(x, 0)
Nx = 80
# Compute dt from Nx and F
dx = L/Nx
dt = F/a*dx**2
viz(I, a, L, dt, F, T, -1, 1, scheme=scheme, animate=True,
framefiles=True)
# Convergence study
def action(u, x, t, n):
e = abs(u - exact(x, t[n])).max()
errors.append(e)
errors = []
Nx_values = [10, 20, 40, 80, 160]
for Nx in Nx_values:
eval('solver_'+scheme)(I, a, L, Nx, F, T, user_action=action)
dt = F*(L/Nx)**2/a
print(dt, errors[-1])
def test_solvers():
def u_exact(x, t):
return x*(L-x)*5*t # fulfills BC at x=0 and x=L
def I(x):
return u_exact(x, 0)
def f(x, t):
return 5*x*(L-x) + 10*a*t
a = 3.5
L = 1.5
Nx = 4
F = 0.5
# Compute dt from Nx and F
dx = L/Nx
dt = F/a*dx**2
def compare(u, x, t, n): # user_action function
"""Compare exact and computed solution."""
u_e = u_exact(x, t[n])
diff = abs(u_e - u).max()
tol = 1E-14
assert diff < tol, 'max diff: %g' % diff
import functools
s = functools.partial # object for calling a function w/args
solvers = [
s(solver_FE_simple, I=I, a=a, f=f, L=L, dt=dt, F=F, T=0.2),
s(solver_FE, I=I, a=a, f=f, L=L, dt=dt, F=F, T=2,
user_action=compare, version='scalar'),
s(solver_FE, I=I, a=a, f=f, L=L, dt=dt, F=F, T=2,
user_action=compare, version='vectorized'),
s(solver_BE_simple, I=I, a=a, f=f, L=L, dt=dt, F=F, T=2,
user_action=compare),
s(solver_BE, I=I, a=a, f=f, L=L, dt=dt, F=F, T=2,
user_action=compare),
s(solver_theta, I=I, a=a, f=f, L=L, dt=dt, F=F, T=2,
theta=0, u_L=0, u_R=0, user_action=compare),
]
# solver_FE_simple has different return from the others
u, x, t, cpu = solvers[0]()
u_e = u_exact(x, t[-1])
diff = abs(u_e - u).max()
tol = 1E-14
print(u_e)
print(u)
assert diff < tol, 'max diff solver_FE_simple: %g' % diff
for solver in solvers:
solver()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("""Usage %s function arg1 arg2 arg3 ...""" % sys.argv[0])
sys.exit(0)
cmd = '%s(%s)' % (sys.argv[1], ', '.join(sys.argv[2:]))
print(cmd)
eval(cmd)
|
[
"numpy.zeros",
"time.perf_counter",
"time.sleep",
"numpy.linspace",
"numpy.linalg.solve",
"sys.exit",
"numpy.sqrt"
] |
[((1720, 1739), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1737, 1739), False, 'import time\n'), ((1812, 1843), 'numpy.linspace', 'np.linspace', (['(0)', '(Nt * dt)', '(Nt + 1)'], {}), '(0, Nt * dt, Nt + 1)\n', (1823, 1843), True, 'import numpy as np\n'), ((1873, 1892), 'numpy.sqrt', 'np.sqrt', (['(a * dt / F)'], {}), '(a * dt / F)\n', (1880, 1892), True, 'import numpy as np\n'), ((1923, 1948), 'numpy.linspace', 'np.linspace', (['(0)', 'L', '(Nx + 1)'], {}), '(0, L, Nx + 1)\n', (1934, 1948), True, 'import numpy as np\n'), ((2081, 2097), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (2089, 2097), True, 'import numpy as np\n'), ((2106, 2122), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (2114, 2122), True, 'import numpy as np\n'), ((2612, 2631), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2629, 2631), False, 'import time\n'), ((2847, 2866), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2864, 2866), False, 'import time\n'), ((2939, 2970), 'numpy.linspace', 'np.linspace', (['(0)', '(Nt * dt)', '(Nt + 1)'], {}), '(0, Nt * dt, Nt + 1)\n', (2950, 2970), True, 'import numpy as np\n'), ((3000, 3019), 'numpy.sqrt', 'np.sqrt', (['(a * dt / F)'], {}), '(a * dt / F)\n', (3007, 3019), True, 'import numpy as np\n'), ((3050, 3075), 'numpy.linspace', 'np.linspace', (['(0)', 'L', '(Nx + 1)'], {}), '(0, L, Nx + 1)\n', (3061, 3075), True, 'import numpy as np\n'), ((3208, 3224), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (3216, 3224), True, 'import numpy as np\n'), ((3251, 3267), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (3259, 3267), True, 'import numpy as np\n'), ((4179, 4198), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4196, 4198), False, 'import time\n'), ((4497, 4516), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4514, 4516), False, 'import time\n'), ((4589, 4620), 'numpy.linspace', 'np.linspace', (['(0)', '(Nt * dt)', '(Nt + 1)'], {}), '(0, Nt * dt, Nt + 1)\n', (4600, 4620), True, 'import numpy as np\n'), ((4650, 4669), 'numpy.sqrt', 'np.sqrt', (['(a * dt / F)'], {}), '(a * dt / F)\n', (4657, 4669), True, 'import numpy as np\n'), ((4700, 4725), 'numpy.linspace', 'np.linspace', (['(0)', 'L', '(Nx + 1)'], {}), '(0, L, Nx + 1)\n', (4711, 4725), True, 'import numpy as np\n'), ((4858, 4874), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (4866, 4874), True, 'import numpy as np\n'), ((4883, 4899), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (4891, 4899), True, 'import numpy as np\n'), ((4951, 4977), 'numpy.zeros', 'np.zeros', (['(Nx + 1, Nx + 1)'], {}), '((Nx + 1, Nx + 1))\n', (4959, 4977), True, 'import numpy as np\n'), ((4982, 4998), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (4990, 4998), True, 'import numpy as np\n'), ((5648, 5667), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5665, 5667), False, 'import time\n'), ((5894, 5913), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5911, 5913), False, 'import time\n'), ((5986, 6017), 'numpy.linspace', 'np.linspace', (['(0)', '(Nt * dt)', '(Nt + 1)'], {}), '(0, Nt * dt, Nt + 1)\n', (5997, 6017), True, 'import numpy as np\n'), ((6047, 6066), 'numpy.sqrt', 'np.sqrt', (['(a * dt / F)'], {}), '(a * dt / F)\n', (6054, 6066), True, 'import numpy as np\n'), ((6097, 6122), 'numpy.linspace', 'np.linspace', (['(0)', 'L', '(Nx + 1)'], {}), '(0, L, Nx + 1)\n', (6108, 6122), True, 'import numpy as np\n'), ((6255, 6271), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (6263, 6271), True, 'import numpy as np\n'), ((6308, 6324), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (6316, 6324), True, 'import numpy as np\n'), ((6417, 6433), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (6425, 6433), True, 'import numpy as np\n'), ((6444, 6456), 'numpy.zeros', 'np.zeros', (['Nx'], {}), '(Nx)\n', (6452, 6456), True, 'import numpy as np\n'), ((6469, 6481), 'numpy.zeros', 'np.zeros', (['Nx'], {}), '(Nx)\n', (6477, 6481), True, 'import numpy as np\n'), ((6490, 6506), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (6498, 6506), True, 'import numpy as np\n'), ((7367, 7386), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7384, 7386), False, 'import time\n'), ((7788, 7807), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7805, 7807), False, 'import time\n'), ((7880, 7911), 'numpy.linspace', 'np.linspace', (['(0)', '(Nt * dt)', '(Nt + 1)'], {}), '(0, Nt * dt, Nt + 1)\n', (7891, 7911), True, 'import numpy as np\n'), ((7941, 7960), 'numpy.sqrt', 'np.sqrt', (['(a * dt / F)'], {}), '(a * dt / F)\n', (7948, 7960), True, 'import numpy as np\n'), ((7991, 8016), 'numpy.linspace', 'np.linspace', (['(0)', 'L', '(Nx + 1)'], {}), '(0, L, Nx + 1)\n', (8002, 8016), True, 'import numpy as np\n'), ((8149, 8165), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (8157, 8165), True, 'import numpy as np\n'), ((8203, 8219), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (8211, 8219), True, 'import numpy as np\n'), ((8313, 8329), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (8321, 8329), True, 'import numpy as np\n'), ((8340, 8352), 'numpy.zeros', 'np.zeros', (['Nx'], {}), '(Nx)\n', (8348, 8352), True, 'import numpy as np\n'), ((8365, 8377), 'numpy.zeros', 'np.zeros', (['Nx'], {}), '(Nx)\n', (8373, 8377), True, 'import numpy as np\n'), ((8386, 8402), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (8394, 8402), True, 'import numpy as np\n'), ((9502, 9521), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9519, 9521), False, 'import time\n'), ((5478, 5499), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (5493, 5499), True, 'import numpy as np\n'), ((13485, 13496), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (13493, 13496), False, 'import sys\n'), ((9860, 9873), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (9870, 9873), False, 'import time\n'), ((10017, 10032), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (10027, 10032), False, 'import time\n')]
|
def read_maze(file_name):
"""
Reads a .txt file that must have the following format:
First line - number_of_lines number_of_columns
Second line - line_of_origin_point column_of_origin_point
Third line - line_of_destination_point column_of_destination_point
Maze -> where 0 = empty space and -1 = wall
Example of file format:
6 11
1 1
6 11
0 0 -1 -1 0 0 0 0 0 0 0
0 -1 0 -1 -1 0 -1 0 -1 -1 0
0 0 -1 0 0 0 -1 0 0 0 0
0 -1 0 -1 0 0 0 -1 -1 0 0
0 0 0 -1 0 -1 0 0 0 -1 0
0 -1 0 0 0 0 0 0 -1 0 0
"""
file = open(file_name, 'r')
nlin, ncol = file.readline().split()
# Reads the first line which contains the number of lines, columns in the maze
nlin, ncol = int(nlin), int(ncol)
lin_origin, col_origin = file.readline().split()
# Reads the second line which contains the location of the starting point in the maze
lin_origin, col_origin = int(lin_origin), int(col_origin)
lin_end, col_end = file.readline().split()
# Reads the third line which contains the location of the ending point in the maze
lin_end, col_end = int(lin_end), int(col_end)
maze = []
line = [-1] * (ncol + 2)
for i in range(nlin + 2):
maze.append(line[:])
for i in range(nlin):
line = file.readline().split()
line = [int(value) for value in line]
maze[i + 1][1:-1] = line[:]
maze[lin_end][col_end] = 1
return maze, (lin_origin, col_origin), (lin_end, col_end)
def mark_maze(matrix, destination):
"""Marks the distance of the nodes in relation to the destination"""
marks = [destination]
start = end = 1
while start <= end:
line = marks[start - 1][0]
column = marks[start - 1][1]
num = matrix[line][column] + 1
if matrix[line - 1][column] == 0:
matrix[line - 1][column] = num
marks.append((line - 1, column))
if matrix[line + 1][column] == 0:
matrix[line + 1][column] = num
marks.append((line + 1, column))
if matrix[line][column - 1] == 0:
matrix[line][column - 1] = num
marks.append((line, column - 1))
if matrix[line][column + 1] == 0:
matrix[line][column + 1] = num
marks.append((line, column + 1))
start += 1
end = len(marks)
return matrix
def solve_maze(matrix, origin, destination, export=False):
"""Find a path that links the origin with the destination
in a maze in the format of a matrix"""
path = [origin]
origin_line, origin_col = origin[0], origin[1]
destination_line, destination_col = destination[0], destination[1]
last_value = matrix[origin_line][origin_col]
while last_value > 1:
line = path[-1][0]
column = path[-1][1]
if matrix[line - 1][column] == last_value - 1:
last_value = matrix[line - 1][column]
path.append((line - 1, column))
elif matrix[line + 1][column] == last_value - 1:
last_value = matrix[line + 1][column]
path.append((line + 1, column))
elif matrix[line][column - 1] == last_value - 1:
last_value = matrix[line][column - 1]
path.append((line, column - 1))
elif matrix[line][column + 1] == last_value - 1:
last_value = matrix[line][column + 1]
path.append((line, column + 1))
if len(path) == 1 or path[-1] != destination: # No solution
return None
wall = 'X'
solution = '+'
for lin in range(len(matrix)):
for col in range(len(matrix[lin])):
if matrix[lin][col] == -1:
matrix[lin][col] = wall
elif matrix[lin][col] > -1:
matrix[lin][col] = ' '
for node in path:
lin_node = node[0]
col_node = node[1]
matrix[lin_node][col_node] = solution
matrix[origin_line][origin_col] = 'A'
matrix[destination_line][destination_col] = 'B'
if export:
export_maze(matrix)
return matrix
def export_maze(maze):
"""Export the maze solution to a .txt file"""
file_name = input('Output file name: ')
with open(file_name, 'w') as file:
for line in maze:
for pos, value in enumerate(line):
if pos == len(line) - 1:
file.write(value)
else:
file.write(value + '\t')
file.write('\n')
def color_path(point):
return '\033[1;31;40m' + point + '\033[0m' # Colors the terminal in red
def print_solution(solved_maze):
"""Prints the solved maze with a red path to the terminal"""
for line in solved_maze:
for pos, value in enumerate(line):
if value == '+':
print(color_path(value), end='\t')
else:
print(value, end='\t')
print()
def run_maze_solver(export=False):
import os
maze_file = input('Path of the .txt containing the maze: ')
file_extension = os.path.splitext(maze_file)[1]
while not os.path.exists(maze_file) or file_extension != '.txt':
if not os.path.exists(maze_file):
print(f'There is no file with this path "{maze_file}"')
else:
print('Your file must be in .txt format!')
print()
maze_file = input('Path of the .txt containing the maze: ')
maze, origin, destination = read_maze(maze_file)
marked_maze = mark_maze(matrix=maze, destination=destination)
solved_maze = solve_maze(matrix=marked_maze, origin=origin, destination=destination, export=export)
if solved_maze: # Check if there is a solution to the maze
print_solution(solved_maze=solved_maze)
else:
print(f'There is no solution for the maze "{maze_file}".')
if __name__ == '__main__':
run_maze_solver(export=False)
|
[
"os.path.exists",
"os.path.splitext"
] |
[((5282, 5309), 'os.path.splitext', 'os.path.splitext', (['maze_file'], {}), '(maze_file)\n', (5298, 5309), False, 'import os\n'), ((5328, 5353), 'os.path.exists', 'os.path.exists', (['maze_file'], {}), '(maze_file)\n', (5342, 5353), False, 'import os\n'), ((5399, 5424), 'os.path.exists', 'os.path.exists', (['maze_file'], {}), '(maze_file)\n', (5413, 5424), False, 'import os\n')]
|
import copy
from typing import List
import numpy as np
from numpy.core import ndarray
from .hg import Histogram
# import matplotlib.pyplot as plt
to_print = False
class NewForest:
"""This creates a forest of trees, given the following list of parameters:
1) n_trees: the number of trees
2) max_depth: the depth of the trees
3) max_samples: number of samples per tree
4) max_buckets: maximum number of buckets used by the histogram
5) epsilon: accuracy of the histogram"""
dim = ... # type: int
size = ... # type: int
points = ... # type: ndarray
start = ... # type: ndarray
end = ... # type: ndarray
def __init__(self, **kwargs):
self.n_trees = kwargs['n_trees']
self.max_depth = kwargs['max_depth']
self.max_samples = kwargs['max_samples']
self.max_buckets = kwargs['max_buckets']
self.epsilon = kwargs['epsilon']
self.sample_axis = kwargs['sample_axis']
self.threshold = kwargs['threshold']
self.tree = []
self.bucket_profile = np.zeros(self.max_buckets)
self.num_leaves = 0
def fit(self, pts):
self.points = pts
self.dim, self.size = np.shape(self.points)
if int(self.sample_axis * self.dim) == 0:
print("sample_axis is too low")
return
self.start = np.zeros((self.dim, 1))
self.end = np.zeros((self.dim, 1))
for axis in range(self.dim):
val = np.unique(np.array(self.points[axis]))
if len(val) <= 1:
print("No entropy in dimension :", axis)
return
self.start[axis] = (3 * val[0] - val[1]) / 2
self.end[axis] = (3 * val[-1] - val[-2]) / 2
k_args = {'depth': 0, 'forest': self}
sample = np.random.choice(self.size, self.max_depth * 50, replace=False)
for i in range(self.n_trees):
k_args['indices'] = np.random.choice(self.size, self.max_samples, replace=False)
root_node = Node(**k_args)
root_node.compute_density(sample)
self.tree.append(root_node)
def plt_scores(self, pts):
_, n_pts = np.shape(pts)
n_show = int(2 * self.n_trees / 3)
scores = np.zeros((self.n_trees, n_pts)) # need to do this streaming
indices = [i for i in range(n_pts)]
for i in range(self.n_trees):
self.tree[i].compute_split(pts, indices, scores[i])
for i in range(n_pts):
plt.plot(np.sort(scores[:, i])[:n_show])
plt.show()
def predict(self, pts, err=0.1, pct=50):
_, n_pts = np.shape(pts)
scores = np.zeros((self.n_trees, n_pts)) # need to do this streaming
indices = np.arange(n_pts)
for i in range(self.n_trees):
self.tree[i].compute_split(pts, indices, scores[i])
n_err = int(err * n_pts)
min_score = np.percentile(scores, pct, axis=0)
top_indices = np.argsort(min_score)[:n_err]
anom_pts = {}
anom_scores = {}
anom_pct = {}
for i in range(n_err):
anom_pts[top_indices[i]] = pts[:, top_indices[i]]
anom_scores[top_indices[i]] = scores[:, top_indices[i]]
anom_pct[top_indices[i]] = min_score[top_indices[i]]
return top_indices, anom_pts, anom_scores, anom_pct
class PointSet:
def __init__(self, node, indices):
self.node = node
self.indices = indices
self.val = []
self.count = []
self.gap = []
for axis in range(self.node.forest.dim):
val, count = np.unique(np.array(self.node.forest.points[axis, self.indices]), return_counts=True)
self.val.append(val)
self.count.append(count)
if len(val) <= 1:
gap = [0]
else:
gap = np.zeros(len(val))
gap[0] = (val[0] + val[1]) / 2 - self.node.cube.start[axis]
gap[-1] = self.node.cube.end[axis] - (val[-1] + val[-2]) / 2
for i in range(1, len(val) - 1):
gap[i] = (val[i + 1] - val[i - 1]) / 2
self.gap.append(gap)
class Cube:
def __init__(self, node, start, end):
assert isinstance(node, Node)
self.node = node
self.child = []
self.start = start
self.end = end
self.dim = len(start)
self.split_axis = -1
self.split_vals = []
self.vol = 1
for i in range(self.dim):
self.vol *= (self.end[i] - self.start[i])
def filter_indices(self, indices):
in_lb = self.node.forest.points[:, indices] >= self.start.reshape(self.dim, 1)
in_ub = self.node.forest.points[:, indices] < self.end.reshape(self.dim, 1)
return [indices[i] for i in range(len(indices)) if in_lb[:, i].all() and in_ub[:, i].all()]
def split_indices(self, pts, indices):
if not self.child:
return indices
n_child = len(self.child)
n_arr = len(indices)
if n_arr == 0:
return [[] for _ in range(n_child)]
s_arr = pts[self.split_axis]
s_start = self.start[self.split_axis]
s_end = self.end[self.split_axis]
index_split = [[] for _ in range(n_child)]
index_split[0] = [indices[ind] for ind in range(n_arr) if ((s_arr[ind] >= s_start) and
(s_arr[ind] < self.split_vals[0]))]
index_split[-1] = [indices[ind] for ind in range(n_arr) if ((s_arr[ind] >= self.split_vals[-1]) and
(s_arr[ind] < s_end))]
for k in range(1, n_child - 1):
index_split[k] = [indices[ind] for ind in range(n_arr) if (s_arr[ind] >= self.split_vals[k - 1]) and
(s_arr[ind] < self.split_vals[k])]
return index_split
class Node:
def __init__(self, depth, forest, **kwargs):
self.depth = depth
self.forest = forest
if self.depth == 0:
self.id_string = [0]
self.cube = Cube(self, self.forest.start, self.forest.end)
self.point_set = PointSet(self, kwargs['indices'])
else:
self.id_string = kwargs['id']
self.cube = Cube(self, kwargs['start'], kwargs['end'])
self.point_set = PointSet(self, self.cube.filter_indices(kwargs['indices']))
self.density = -1
self.child = []
if (self.depth < self.forest.max_depth) and (len(self.point_set.indices) > 1):
self.split_node()
def split_node(self):
imp_axis = [axis for axis in range(self.cube.dim) if len(self.point_set.val[axis]) > 1]
if not imp_axis:
return
max_axes = min(len(imp_axis), int(self.forest.sample_axis * self.cube.dim))
s_axes = np.random.choice(imp_axis, max_axes, replace=False)
buckets = {}
var_red = {}
for axis in s_axes:
hist = Histogram(self.point_set.gap[axis] / self.point_set.count[axis], self.point_set.count[axis],
self.forest.max_buckets, self.forest.epsilon)
opt_buckets, var_red[axis], buckets[axis] = hist.best_split()
if np.max(list(var_red.values())) <= self.forest.threshold:
return
self.forest.bucket_profile[opt_buckets - 1] += 1
self.forest.num_leaves += opt_buckets
split_axis = np.random.choice(s_axes, p=list(var_red.values()) / np.sum(list(var_red.values())))
self.cube.split_axis = split_axis
self.cube.split_vals = [(self.point_set.val[split_axis][i - 1] + self.point_set.val[split_axis][i]) / 2 for i in
buckets[split_axis]]
for i in range(len(self.cube.split_vals) + 1):
new_start = np.array(self.cube.start)
new_end = np.array(self.cube.end)
if 0 < i < len(self.cube.split_vals):
new_start[split_axis] = self.cube.split_vals[i - 1]
new_end[split_axis] = self.cube.split_vals[i]
elif i == 0:
new_end[split_axis] = self.cube.split_vals[0]
else: # i == len(self.cube.split_vals)
new_start[split_axis] = self.cube.split_vals[-1]
new_id = copy.deepcopy(self.id_string)
new_id.append(i)
kwargs = {'start': new_start, 'end': new_end}
kwargs.update({'indices': self.point_set.indices, 'id': new_id})
child_node = Node(self.depth + 1, self.forest, **kwargs)
self.child.append(child_node)
self.cube.child.append(child_node.cube)
def compute_density(self, indices):
if len(indices) == 0:
self.density = 0
self.child = []
self.cube.child = []
self.cube.split_axis = -1
return
n_arr = len(indices)
self.density = n_arr / self.cube.vol
if self.child:
index_split = self.cube.split_indices(self.forest.points, indices)
for i in range(len(self.child)):
self.child[i].compute_density(index_split[i])
def compute_split(self, pts, indices, scores):
if self.child:
index_split = self.cube.split_indices(pts, indices)
for i in range(len(self.child)):
if index_split[i]:
self.child[i].compute_split(pts, index_split[i], scores)
else:
scores[indices] = self.density
def __str__(self):
str_val = "Id: " + str(self.id_string) + "\n"
str_val += "Boundary: "
for i in range(self.cube.dim):
str_val += " [" + str(self.cube.start[i]) + ", " + str(self.cube.end[i]) + "]"
if i < self.cube.dim - 1:
str_val += " x"
else:
str_val += "\n"
str_val += "Indices: " + str(self.point_set.indices) + "\n"
return str_val
def print_node(self):
print_list = [self]
while print_list:
node = print_list.pop(0)
print(str(node))
print_list.extend(node.child)
|
[
"copy.deepcopy",
"numpy.zeros",
"numpy.shape",
"numpy.percentile",
"numpy.argsort",
"numpy.sort",
"numpy.arange",
"numpy.array",
"numpy.random.choice"
] |
[((1059, 1085), 'numpy.zeros', 'np.zeros', (['self.max_buckets'], {}), '(self.max_buckets)\n', (1067, 1085), True, 'import numpy as np\n'), ((1195, 1216), 'numpy.shape', 'np.shape', (['self.points'], {}), '(self.points)\n', (1203, 1216), True, 'import numpy as np\n'), ((1351, 1374), 'numpy.zeros', 'np.zeros', (['(self.dim, 1)'], {}), '((self.dim, 1))\n', (1359, 1374), True, 'import numpy as np\n'), ((1394, 1417), 'numpy.zeros', 'np.zeros', (['(self.dim, 1)'], {}), '((self.dim, 1))\n', (1402, 1417), True, 'import numpy as np\n'), ((1799, 1862), 'numpy.random.choice', 'np.random.choice', (['self.size', '(self.max_depth * 50)'], {'replace': '(False)'}), '(self.size, self.max_depth * 50, replace=False)\n', (1815, 1862), True, 'import numpy as np\n'), ((2170, 2183), 'numpy.shape', 'np.shape', (['pts'], {}), '(pts)\n', (2178, 2183), True, 'import numpy as np\n'), ((2244, 2275), 'numpy.zeros', 'np.zeros', (['(self.n_trees, n_pts)'], {}), '((self.n_trees, n_pts))\n', (2252, 2275), True, 'import numpy as np\n'), ((2623, 2636), 'numpy.shape', 'np.shape', (['pts'], {}), '(pts)\n', (2631, 2636), True, 'import numpy as np\n'), ((2654, 2685), 'numpy.zeros', 'np.zeros', (['(self.n_trees, n_pts)'], {}), '((self.n_trees, n_pts))\n', (2662, 2685), True, 'import numpy as np\n'), ((2733, 2749), 'numpy.arange', 'np.arange', (['n_pts'], {}), '(n_pts)\n', (2742, 2749), True, 'import numpy as np\n'), ((2905, 2939), 'numpy.percentile', 'np.percentile', (['scores', 'pct'], {'axis': '(0)'}), '(scores, pct, axis=0)\n', (2918, 2939), True, 'import numpy as np\n'), ((6879, 6930), 'numpy.random.choice', 'np.random.choice', (['imp_axis', 'max_axes'], {'replace': '(False)'}), '(imp_axis, max_axes, replace=False)\n', (6895, 6930), True, 'import numpy as np\n'), ((1933, 1993), 'numpy.random.choice', 'np.random.choice', (['self.size', 'self.max_samples'], {'replace': '(False)'}), '(self.size, self.max_samples, replace=False)\n', (1949, 1993), True, 'import numpy as np\n'), ((2962, 2983), 'numpy.argsort', 'np.argsort', (['min_score'], {}), '(min_score)\n', (2972, 2983), True, 'import numpy as np\n'), ((7852, 7877), 'numpy.array', 'np.array', (['self.cube.start'], {}), '(self.cube.start)\n', (7860, 7877), True, 'import numpy as np\n'), ((7900, 7923), 'numpy.array', 'np.array', (['self.cube.end'], {}), '(self.cube.end)\n', (7908, 7923), True, 'import numpy as np\n'), ((8329, 8358), 'copy.deepcopy', 'copy.deepcopy', (['self.id_string'], {}), '(self.id_string)\n', (8342, 8358), False, 'import copy\n'), ((1483, 1510), 'numpy.array', 'np.array', (['self.points[axis]'], {}), '(self.points[axis])\n', (1491, 1510), True, 'import numpy as np\n'), ((3612, 3665), 'numpy.array', 'np.array', (['self.node.forest.points[axis, self.indices]'], {}), '(self.node.forest.points[axis, self.indices])\n', (3620, 3665), True, 'import numpy as np\n'), ((2503, 2524), 'numpy.sort', 'np.sort', (['scores[:, i]'], {}), '(scores[:, i])\n', (2510, 2524), True, 'import numpy as np\n')]
|
import sys, csv, os
import math, random
import z3
import xml.etree.ElementTree as ET
#######
# XES #
#######
xes_ns = { 'xes': 'rttp://www.w3.org/2001/XMLSchema' }
def node_from_key(root, key):
for atr in root:
if 'key' in atr.attrib and atr.attrib['key'] == key:
return atr
return None
def value_from_key(root, key):
for atr in root:
if 'key' in atr.attrib and atr.attrib['key'] == key:
return atr.attrib['value']
return None
#########
# UTILS #
#########
def Hellinger_distance(P, Q):
"""
Hellinger_distance between two probability distribution.
"""
dist = 0.0
for p, q in zip(P, Q):
dist += (math.sqrt(p) - math.sqrt(q)) ** 2
dist = math.sqrt(dist)
dist /= math.sqrt(2)
return dist
def manhattan_distance(x1, y1, x2, y2):
return abs(x1 - x2) + abs(y1 - y2)
def euclidean_distance(x1, y1, x2, y2):
return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)
def to_real(x):
"""
Convert Z3 Fractional numbers into floating points
"""
return float(x.numerator_as_long()/x.denominator_as_long())
#############################
# VELOCITY REGULATION RULES #
#############################
class DummyVar:
"""
Class that represent a dummy variable introduced by the MAX-SMT step.
It contains the literal (a Boolean variable) that identify the dummy
variable inside the SMT problem and the the information related to wich
rule, run and step is codified by the variable.
"""
def __init__(self, literal, rule, run, step):
self.literal = literal
self.rule = rule
self.run = run
self.step = step
class Rock:
def __init__(self, x, y, num):
self.x = x
self.y = y
self.num = num
class RuleSynth:
"""
Synthetize rules from runs of an POMCP algorithm
"""
def __init__(self, xes_log, threshold):
self.xes_log = xes_log
self.xes_tree = ET.parse(xes_log)
self.sample_confidence = 1.0
self.threshold = threshold
self.rules = 3
self.rocknum = -1
self.size = -1
self.rocks = []
self.beliefs = []
self.actions = []
self.positions = []
self.runs = []
self.steps = []
self.collected = []
self.build_from_xes()
self.solver = z3.Optimize()
self.thresholds = [[] for i in range(self.rules)]
self.soft_constr = [[] for i in range(self.rules)]
def is_rock_pos(self, x, y):
for r in self.rocks:
if r.x == x and r.y == y:
return True
return False
def rock_number(self, x, y):
"""
return the number of the rock in position (x, y), or -1 if there is
no rock in (x, y)
"""
for r in self.rocks:
if r.x == x and r.y == y:
return r.num
return -1
def build_from_xes(self):
"""
Parse xes log and build data from traces
"""
log = self.xes_tree.getroot()
self.size = int(value_from_key(log,'Size'))
self.rocknum = int(value_from_key(log,'NumRocks'))
for rock in node_from_key(log, 'rocks'):
x = int(value_from_key(rock, 'coord x'))
y = int(value_from_key(rock, 'coord y'))
num = int(value_from_key(rock, 'number'))
self.rocks.append(Rock(x=x, y=y, num=num))
for trace in log.findall('xes:trace', xes_ns):
run = int(value_from_key(trace, 'run'))
step = 1
for event in trace.findall('xes:event', xes_ns):
if step == 1:
self.collected.append([0 for i in range(0, self.rocknum)])
else:
collected = [i for i in self.collected[-1]]
if self.actions[-1] == 'sample':
pos = self.positions[-1]
num = self.rock_number(pos[0], pos[1])
if num != -1:
collected[num] = 1
self.collected.append(collected)
self.runs.append(run)
self.steps.append(step)
step +=1
x = int(value_from_key(event,'coord x'))
y = int(value_from_key(event,'coord y'))
self.positions.append([x, y])
action = value_from_key(event,'action')
self.actions.append(action)
# belief
self.beliefs.append({})
total = {}
for i in range(0, self.rocknum):
self.beliefs[-1][i] = 0
total[i] = 0
for i in node_from_key(event, 'belief'):
state = int(i.attrib['key'])
particles = int(i.attrib['value'])
for j in range(0, self.rocknum):
total[j] += particles
if (state // (2**j)) % 2 == 1:
self.beliefs[-1][j] += particles
for j in range(0, self.rocknum):
self.beliefs[-1][j] /= total[j]
def build_sample_rule(self):
"""
Build a rule for sampling
"""
# enforce probability axioms
t = z3.Real('t_sample')
self.thresholds[0].append(t)
self.solver.add(0.0 <= t)
self.solver.add(t <= 1.0)
# hard constraint, they must be be specified by hand in this version
# e.g: x_1 >= 0.9
#self.solver.add(t > 0.6)
# build soft clauses
for i in range(0, len(self.beliefs)):
bel = self.beliefs[i]
act = self.actions[i]
pos = self.positions[i]
run = self.runs[i]
step = self.steps[i]
collected = self.collected[i]
# generate boolean var for soft constraints
soft = z3.Bool('b_sample_{}'.format(i))
self.soft_constr[0].append(DummyVar(soft, 0, run, step))
# add the rule
subrules = []
for r in self.rocks:
sub = z3.And(
pos[0] == r.x,
pos[1] == r.y,
collected[r.num] == 0,
bel[r.num] >= t
)
subrules.append(sub)
formula = z3.Or(subrules)
if act != 'sample':
formula = z3.Not(formula)
self.solver.add(z3.Or(soft, formula))
# solve MAX-SMT problem
low_threshold = 0
high_threshold = len(self.soft_constr[0])
final_threshold = -1
best_model = []
while low_threshold <= high_threshold:
self.solver.push()
threshold = (low_threshold + high_threshold) // 2
#Pble pseudo boolean less equal
self.solver.add(z3.PbLe([(soft.literal, 1)
for soft in self.soft_constr[0]], threshold))
result = self.solver.check()
if result == z3.sat:
final_threshold = threshold
best_model = self.solver.model()
high_threshold = threshold - 1
else:
low_threshold = threshold + 1
self.solver.pop()
# build tight bounds
model = best_model
# fix dummy variables
for soft in self.soft_constr[0]:
if model[soft.literal] == True:
self.solver.add(soft.literal)
elif model[soft.literal] == False:
self.solver.add(z3.Not(soft.literal))
self.solver.maximize(t)
# check if SAT or UNSAT
result = self.solver.check()
if result != z3.sat:
print("unsatisfiable")
return
model = self.solver.model()
# generate 1000 random points inside the rule
rule_points = [[to_real(model[t])]]
print('sample if in position and confidence >= {}'.format(to_real(model[t])))
self.sample_confidence = to_real(model[t])
print('fail to satisfy {} out of {} steps'.format(final_threshold, len(self.steps)))
## Hellinger distance of unsatisfiable steps
failed_rules = []
Hellinger_min = []
for num, soft in enumerate(self.soft_constr[0]):
if model[soft.literal] == False or self.actions[num] != 'sample' :
continue
failed_rules.append(num)
pos = self.positions[num]
rock = self.rock_number(pos[0], pos[1])
P = [self.beliefs[num][rock]]
hel_dst = [Hellinger_distance(P, Q) for Q in rule_points]
Hellinger_min.append(min(hel_dst))
# print unsatisfiable steps in decreasing order of hellinger distance
print('Unsatisfiable steps:')
for x, soft, hel in [[x, self.soft_constr[0][x], h] for h, x in sorted(zip(Hellinger_min, failed_rules), key=lambda pair: pair[0], reverse = True)]:
if hel > self.threshold:
print('ANOMALY: ', end='')
pos = self.positions[x]
rock = self.rock_number(pos[0], pos[1])
print('run {} step {}: action {} with belief of valuable rock = {:.3f} --- Hellinger = {}'.format(
soft.run, soft.step, self.actions[x],
self.beliefs[x][rock],
hel)
)
## Hellinger distance of unsatisfiable steps
for num, soft in enumerate(self.soft_constr[0]):
if model[soft.literal] == False or self.actions[num] == 'sample' :
continue
pos = self.positions[num]
rock = self.rock_number(pos[0], pos[1])
print('run {} step {}: action {} with belief of valuable rock = {:.3f}'.format(soft.run, soft.step, self.actions[num], self.beliefs[num][rock]))
def build_check_rule(self):
"""
Build a rule for check 1..n
"""
u1 = z3.Real('u1_sample')
self.thresholds[1].append(u1)
self.solver.add(0.0 <= u1)
self.solver.add(u1 <= 1.0)
v1 = z3.Real('v1_sample')
self.thresholds[1].append(v1)
self.solver.add(0.0 <= v1)
self.solver.add(v1 <= 1.0)
m1 = z3.Real('m1_sample')
self.thresholds[1].append(m1)
self.solver.add(m1 >= 0.0)
n1 = z3.Real('n1_sample')
self.thresholds[1].append(n1)
self.solver.add(n1 >= 0.0)
self.solver.add(v1 < u1)
self.solver.add(n1 <= m1)
#u2 = z3.Real('u2_sample')
#self.thresholds[1].append(u2)
#self.solver.add(0.0 <= u2)
#self.solver.add(u2 <= 1.0)
#v2 = z3.Real('v2_sample')
#self.thresholds[1].append(v2)
#self.solver.add(0.0 <= v2)
#self.solver.add(v2 <= 1.0)
#m2 = z3.Real('m2_sample')
#self.thresholds[1].append(m2)
#self.solver.add(m2 >= 0.0)
#n2 = z3.Real('n2_sample')
#self.thresholds[1].append(n2)
#self.solver.add(n2 >= 0.0)
#self.solver.add(v2 < u2)
#self.solver.add(n2 + 1 <= m2)
#self.solver.add(m1 <= n2)
#self.solver.add(m2 <= 8)
## hard constraint, they must be be specified by hand in this version
## e.g: x_1 >= 0.9
## build soft clauses
for i in range(0, len(self.beliefs)):
bel = self.beliefs[i]
act = self.actions[i]
pos = self.positions[i]
run = self.runs[i]
step = self.steps[i]
collected = self.collected[i]
# generate boolean var for soft constraints
soft = z3.Bool('b_check_{}'.format(i))
self.soft_constr[1].append(DummyVar(soft, 1, run, step))
if act == 'north' or act == 'south' or act == 'east' or act == 'west' or act == 'sample':
self.solver.add(z3.Or(soft, True))
continue
c = int(act.split()[1])
r = self.rocks[c]
# add the rule
formula = z3.Or(
z3.And(
euclidean_distance(pos[0], pos[1], r.x, r.y) >= n1,
euclidean_distance(pos[0], pos[1], r.x, r.y) <= m1,
bel[c] <= u1,
bel[c] >= v1
),
#z3.And(
# euclidean_distance(pos[0], pos[1], r.x, r.y) >= n2,
# euclidean_distance(pos[0], pos[1], r.x, r.y) <= m2,
# bel[c] <= u2,
# bel[c] >= v2
# )
)
self.solver.add(z3.Or(soft, formula))
# solve MAX-SMT problem
low_threshold = 0
high_threshold = len(self.soft_constr[1])
final_threshold = -1
best_model = []
while low_threshold <= high_threshold:
self.solver.push()
threshold = (low_threshold + high_threshold) // 2
#Pble pseudo boolean less equal
self.solver.add(z3.PbLe([(soft.literal, 1)
for soft in self.soft_constr[1]], threshold))
result = self.solver.check()
if result == z3.sat:
final_threshold = threshold
best_model = self.solver.model()
high_threshold = threshold - 1
else:
low_threshold = threshold + 1
self.solver.pop()
## build tight bounds
model = best_model
# fix dummy variables
for soft in self.soft_constr[1]:
if model[soft.literal] == True:
self.solver.add(soft.literal)
elif model[soft.literal] == False:
self.solver.add(z3.Not(soft.literal))
self.solver.minimize(z3.Sum(u1, m1, -v1, -n1))
#self.solver.minimize(z3.Sum(u1, m1, -v1, -n1, u2, m2, -v2, -n2))
# check if SAT or UNSAT
result = self.solver.check()
if result != z3.sat:
print("unsatisfiable")
return
model = self.solver.model()
## generate 1000 random points inside the rule
#rule_points = [[to_real(model[t])]]
print('check when: distance [{:.3f}, {:.3f}] and belief of valuable rock in [{:.3f}, {:.3f}]'.format(to_real(model[n1]), to_real(model[m1]), to_real(model[v1]), to_real(model[u1])))
#print('check when: distance [{:.3f}, {:.3f}] and belief of valuable rock in [{:.3f}, {:.3f}] OR distance [{:.3f}, {:.3f}] and belief of valuable rock in [{:.3f}, {:.3f}]'.format(to_real(model[n1]), to_real(model[m1]), to_real(model[v1]), to_real(model[u1]), to_real(model[n2]), to_real(model[m2]), to_real(model[v2]), to_real(model[u2])))
print('fail to satisfy {} out of {} steps'.format(final_threshold, len(self.steps)))
### Hellinger distance of unsatisfiable steps
#failed_rules = []
#Hellinger_min = []
#for num, soft in enumerate(self.soft_constr[1]):
# if model[soft.literal] == False or self.actions[num] != 'sample' :
# continue
# failed_rules.append(num)
# pos = self.positions[num]
# rock = self.rock_number(pos[0], pos[1])
# P = [self.beliefs[num][rock]]
# hel_dst = [Hellinger_distance(P, Q) for Q in rule_points]
# Hellinger_min.append(min(hel_dst))
## print unsatisfiable steps in decreasing order of hellinger distance
#print('Unsatisfiable steps:')
#for x, soft, hel in [[x, self.soft_constr[1][x], h] for h, x in sorted(zip(Hellinger_min, failed_rules), key=lambda pair: pair[0], reverse = True)]:
# if hel > self.threshold:
# print('ANOMALY: ', end='')
# pos = self.positions[x]
# rock = self.rock_number(pos[0], pos[1])
# print('run {} step {}: action {} with belief of valuable rock = {:.3f} --- Hellinger = {}'.format(
# soft.run, soft.step, self.actions[x],
# self.beliefs[x][rock],
# hel)
# )
### Hellinger distance of unsatisfiable steps
#for num, soft in enumerate(self.soft_constr[1]):
# if model[soft.literal] == False or self.actions[num] == 'sample' :
# continue
# pos = self.positions[num]
# rock = self.rock_number(pos[0], pos[1])
# print('run {} step {}: do not sample with belief of valuable rock = {:.3f}'.format(soft.run, soft.step, self.beliefs[num][rock]))
def build_north_south_rule(self):
"""
Build a rule for north or south
"""
c = z3.Real('valuable_confidence')
self.thresholds[2].append(c)
self.solver.add(c >= 0.0)
self.solver.add(c <= 1.0)
self.solver.add(c >= self.sample_confidence)
## build soft clauses
for i in range(0, len(self.beliefs)):
bel = self.beliefs[i]
act = self.actions[i]
pos = self.positions[i]
run = self.runs[i]
step = self.steps[i]
collected = self.collected[i]
# generate boolean var for soft constraints
soft = z3.Bool('b_north_south_{}'.format(i))
self.soft_constr[2].append(DummyVar(soft, 2, run, step))
if act != 'north' and act != 'south':
self.solver.add(z3.Or(soft, True))
continue
if act == 'north':
subrules = []
for r in self.rocks:
sub = z3.And(
pos[1] < r.y,
collected[r.num] == 0,
bel[r.num] >= c
)
subrules.append(sub)
formula = z3.Or(subrules)
self.solver.add(z3.Or(soft, formula))
else:
subrules = []
for r in self.rocks:
sub = z3.And(
pos[1] > r.y,
collected[r.num] == 0,
bel[r.num] >= c
)
subrules.append(sub)
formula = z3.Or(subrules)
self.solver.add(z3.Or(soft, formula))
# solve MAX-SMT problem
low_threshold = 0
high_threshold = len(self.soft_constr[2])
final_threshold = -1
best_model = []
while low_threshold <= high_threshold:
self.solver.push()
threshold = (low_threshold + high_threshold) // 2
#Pble pseudo boolean less equal
self.solver.add(z3.PbLe([(soft.literal, 1)
for soft in self.soft_constr[1]], threshold))
result = self.solver.check()
if result == z3.sat:
final_threshold = threshold
best_model = self.solver.model()
high_threshold = threshold - 1
else:
low_threshold = threshold + 1
self.solver.pop()
## build tight bounds
model = best_model
# fix dummy variables
for soft in self.soft_constr[2]:
if model[soft.literal] == True:
self.solver.add(soft.literal)
elif model[soft.literal] == False:
self.solver.add(z3.Not(soft.literal))
#self.solver.minimize(z3.Sum(u1, m1, -v1, -n1, u2, m2, -v2, -n2))
# check if SAT or UNSAT
result = self.solver.check()
if result != z3.sat:
print("unsatisfiable")
return
model = self.solver.model()
## generate 1000 random points inside the rule
#rule_points = [[to_real(model[t])]]
print('move north or south if confidence of treasure is >={:.3f}'.format(to_real(model[c])))
print('fail to satisfy {} out of {} steps'.format(final_threshold, len(self.steps)))
### Hellinger distance of unsatisfiable steps
#failed_rules = []
#Hellinger_min = []
#for num, soft in enumerate(self.soft_constr[1]):
# if model[soft.literal] == False or self.actions[num] != 'sample' :
# continue
# failed_rules.append(num)
# pos = self.positions[num]
# rock = self.rock_number(pos[0], pos[1])
# P = [self.beliefs[num][rock]]
# hel_dst = [Hellinger_distance(P, Q) for Q in rule_points]
# Hellinger_min.append(min(hel_dst))
## print unsatisfiable steps in decreasing order of hellinger distance
#print('Unsatisfiable steps:')
#for x, soft, hel in [[x, self.soft_constr[1][x], h] for h, x in sorted(zip(Hellinger_min, failed_rules), key=lambda pair: pair[0], reverse = True)]:
# if hel > self.threshold:
# print('ANOMALY: ', end='')
# pos = self.positions[x]
# rock = self.rock_number(pos[0], pos[1])
# print('run {} step {}: action {} with belief of valuable rock = {:.3f} --- Hellinger = {}'.format(
# soft.run, soft.step, self.actions[x],
# self.beliefs[x][rock],
# hel)
# )
### Hellinger distance of unsatisfiable steps
#for num, soft in enumerate(self.soft_constr[1]):
# if model[soft.literal] == False or self.actions[num] == 'sample' :
# continue
# pos = self.positions[num]
# rock = self.rock_number(pos[0], pos[1])
# print('run {} step {}: do not sample with belief of valuable rock = {:.3f}'.format(soft.run, soft.step, self.beliefs[num][rock]))
def synthetize_rules(self):
"""
synthetize each rule
"""
# sample rule
self.solver.push()
self.build_sample_rule()
self.solver.pop()
self.solver.push()
self.build_check_rule()
self.solver.pop()
self.solver.push()
self.build_north_south_rule()
self.solver.pop()
########
# MAIN #
########
if __name__ == "__main__":
if len(sys.argv) != 2:
print ('usage: xpomcp <log.xes>')
exit()
xes_log = str(sys.argv[1])
rs = RuleSynth(
xes_log=xes_log,
threshold=0.1
)
rs.synthetize_rules()
|
[
"xml.etree.ElementTree.parse",
"z3.PbLe",
"math.sqrt",
"z3.And",
"z3.Optimize",
"z3.Real",
"z3.Sum",
"z3.Not",
"z3.Or"
] |
[((730, 745), 'math.sqrt', 'math.sqrt', (['dist'], {}), '(dist)\n', (739, 745), False, 'import math, random\n'), ((758, 770), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (767, 770), False, 'import math, random\n'), ((920, 962), 'math.sqrt', 'math.sqrt', (['((x1 - x2) ** 2 + (y1 - y2) ** 2)'], {}), '((x1 - x2) ** 2 + (y1 - y2) ** 2)\n', (929, 962), False, 'import math, random\n'), ((1960, 1977), 'xml.etree.ElementTree.parse', 'ET.parse', (['xes_log'], {}), '(xes_log)\n', (1968, 1977), True, 'import xml.etree.ElementTree as ET\n'), ((2360, 2373), 'z3.Optimize', 'z3.Optimize', ([], {}), '()\n', (2371, 2373), False, 'import z3\n'), ((5326, 5345), 'z3.Real', 'z3.Real', (['"""t_sample"""'], {}), "('t_sample')\n", (5333, 5345), False, 'import z3\n'), ((10020, 10040), 'z3.Real', 'z3.Real', (['"""u1_sample"""'], {}), "('u1_sample')\n", (10027, 10040), False, 'import z3\n'), ((10162, 10182), 'z3.Real', 'z3.Real', (['"""v1_sample"""'], {}), "('v1_sample')\n", (10169, 10182), False, 'import z3\n'), ((10304, 10324), 'z3.Real', 'z3.Real', (['"""m1_sample"""'], {}), "('m1_sample')\n", (10311, 10324), False, 'import z3\n'), ((10411, 10431), 'z3.Real', 'z3.Real', (['"""n1_sample"""'], {}), "('n1_sample')\n", (10418, 10431), False, 'import z3\n'), ((16728, 16758), 'z3.Real', 'z3.Real', (['"""valuable_confidence"""'], {}), "('valuable_confidence')\n", (16735, 16758), False, 'import z3\n'), ((6422, 6437), 'z3.Or', 'z3.Or', (['subrules'], {}), '(subrules)\n', (6427, 6437), False, 'import z3\n'), ((13873, 13897), 'z3.Sum', 'z3.Sum', (['u1', 'm1', '(-v1)', '(-n1)'], {}), '(u1, m1, -v1, -n1)\n', (13879, 13897), False, 'import z3\n'), ((684, 696), 'math.sqrt', 'math.sqrt', (['p'], {}), '(p)\n', (693, 696), False, 'import math, random\n'), ((699, 711), 'math.sqrt', 'math.sqrt', (['q'], {}), '(q)\n', (708, 711), False, 'import math, random\n'), ((6163, 6239), 'z3.And', 'z3.And', (['(pos[0] == r.x)', '(pos[1] == r.y)', '(collected[r.num] == 0)', '(bel[r.num] >= t)'], {}), '(pos[0] == r.x, pos[1] == r.y, collected[r.num] == 0, bel[r.num] >= t)\n', (6169, 6239), False, 'import z3\n'), ((6497, 6512), 'z3.Not', 'z3.Not', (['formula'], {}), '(formula)\n', (6503, 6512), False, 'import z3\n'), ((6543, 6563), 'z3.Or', 'z3.Or', (['soft', 'formula'], {}), '(soft, formula)\n', (6548, 6563), False, 'import z3\n'), ((6942, 7013), 'z3.PbLe', 'z3.PbLe', (['[(soft.literal, 1) for soft in self.soft_constr[0]]', 'threshold'], {}), '([(soft.literal, 1) for soft in self.soft_constr[0]], threshold)\n', (6949, 7013), False, 'import z3\n'), ((12724, 12744), 'z3.Or', 'z3.Or', (['soft', 'formula'], {}), '(soft, formula)\n', (12729, 12744), False, 'import z3\n'), ((13123, 13194), 'z3.PbLe', 'z3.PbLe', (['[(soft.literal, 1) for soft in self.soft_constr[1]]', 'threshold'], {}), '([(soft.literal, 1) for soft in self.soft_constr[1]], threshold)\n', (13130, 13194), False, 'import z3\n'), ((17882, 17897), 'z3.Or', 'z3.Or', (['subrules'], {}), '(subrules)\n', (17887, 17897), False, 'import z3\n'), ((18305, 18320), 'z3.Or', 'z3.Or', (['subrules'], {}), '(subrules)\n', (18310, 18320), False, 'import z3\n'), ((18752, 18823), 'z3.PbLe', 'z3.PbLe', (['[(soft.literal, 1) for soft in self.soft_constr[1]]', 'threshold'], {}), '([(soft.literal, 1) for soft in self.soft_constr[1]], threshold)\n', (18759, 18823), False, 'import z3\n'), ((11937, 11954), 'z3.Or', 'z3.Or', (['soft', '(True)'], {}), '(soft, True)\n', (11942, 11954), False, 'import z3\n'), ((17471, 17488), 'z3.Or', 'z3.Or', (['soft', '(True)'], {}), '(soft, True)\n', (17476, 17488), False, 'import z3\n'), ((17640, 17700), 'z3.And', 'z3.And', (['(pos[1] < r.y)', '(collected[r.num] == 0)', '(bel[r.num] >= c)'], {}), '(pos[1] < r.y, collected[r.num] == 0, bel[r.num] >= c)\n', (17646, 17700), False, 'import z3\n'), ((17930, 17950), 'z3.Or', 'z3.Or', (['soft', 'formula'], {}), '(soft, formula)\n', (17935, 17950), False, 'import z3\n'), ((18063, 18123), 'z3.And', 'z3.And', (['(pos[1] > r.y)', '(collected[r.num] == 0)', '(bel[r.num] >= c)'], {}), '(pos[1] > r.y, collected[r.num] == 0, bel[r.num] >= c)\n', (18069, 18123), False, 'import z3\n'), ((18353, 18373), 'z3.Or', 'z3.Or', (['soft', 'formula'], {}), '(soft, formula)\n', (18358, 18373), False, 'import z3\n'), ((7639, 7659), 'z3.Not', 'z3.Not', (['soft.literal'], {}), '(soft.literal)\n', (7645, 7659), False, 'import z3\n'), ((13821, 13841), 'z3.Not', 'z3.Not', (['soft.literal'], {}), '(soft.literal)\n', (13827, 13841), False, 'import z3\n'), ((19450, 19470), 'z3.Not', 'z3.Not', (['soft.literal'], {}), '(soft.literal)\n', (19456, 19470), False, 'import z3\n')]
|
import keyboard
# keyboard.press_and_release('shift+s, space')
# keyboard.write('The quick brown fox jumps over the lazy dog.')
# keyboard.add_hotkey('ctrl+shift+a', print, args=('triggered', 'hotkey'))
# # Press PAGE UP then PAGE DOWN to type "foobar".
# keyboard.add_hotkey('4', lambda: keyboard.write('#4'))
# # Blocks until you press esc.
# keyboard.wait('esc')
# # Record events until 'esc' is pressed.
# recorded = keyboard.record(until='esc')
# # Then replay back at three times the speed.
# keyboard.play(recorded, speed_factor=3)
# # Type @@ then press space to replace with abbreviation.
# keyboard.add_abbreviation('@@', '<EMAIL>')
# # Block forever, like `while True`.
# keyboard.wait()
keyboard.add_hotkey('1', lambda: print('#1'))
keyboard.add_hotkey('2', lambda: print('#2'))
keyboard.add_hotkey('3', lambda: print('#3'))
keyboard.add_hotkey('4', lambda: print('#4'))
# # Blocks until you press esc.
keyboard.wait('esc')
print('start')
|
[
"keyboard.wait"
] |
[((925, 945), 'keyboard.wait', 'keyboard.wait', (['"""esc"""'], {}), "('esc')\n", (938, 945), False, 'import keyboard\n')]
|
from pathlib import Path
from typing import Any, Dict, Optional, Union
from kedro.pipeline import Pipeline
from kedro_mlflow.pipeline.pipeline_ml import PipelineML
def pipeline_ml(
training: Pipeline,
inference: Pipeline,
input_name: str = None,
conda_env: Optional[Union[str, Path, Dict[str, Any]]] = None,
model_name: Optional[str] = "model",
) -> PipelineML:
"""[summary]
Args:
training (Pipeline): The `Pipeline` object that creates
all mlflow artifacts for prediction (the model,
but also encoders, binarizers, tokenizers...).
These artifacts must be persisted in the catalog.yml.
inference (Pipeline): A `Pipeline` object which will be
stored in mlflow and use the output(s)
of the training pipeline (namely, the model)
to predict the outcome.
input_name (str, optional): The name of the dataset in
the catalog.yml which the model's user must provide
for prediction (i.e. the data). Defaults to None.
conda_env (Union[str, Path, Dict[str, Any]], optional):
The minimal conda environment necessary for the
inference `Pipeline`. It can be either :
- a path to a "requirements.txt": In this case
the packages are parsed and a conda env with
your current python_version and these
dependencies is returned.
- a path to an "environment.yml" : the file is
uploaded "as is".
- a Dict : used as the environment
- None: a base conda environment with your
current python version and your project
version at training time.
Defaults to None.
model_name (Union[str, None], optional): The name of
the folder where the model will be stored in
remote mlflow. Defaults to "model".
Returns:
PipelineML: A `PipelineML` which is automatically
discovered by the `MlflowPipelineHook` and
contains all the information for logging the
inference pipeline as a Mlflow Model.
"""
pipeline = PipelineML(
nodes=training.nodes,
inference=inference,
input_name=input_name,
conda_env=conda_env,
model_name=model_name,
)
return pipeline
|
[
"kedro_mlflow.pipeline.pipeline_ml.PipelineML"
] |
[((2240, 2364), 'kedro_mlflow.pipeline.pipeline_ml.PipelineML', 'PipelineML', ([], {'nodes': 'training.nodes', 'inference': 'inference', 'input_name': 'input_name', 'conda_env': 'conda_env', 'model_name': 'model_name'}), '(nodes=training.nodes, inference=inference, input_name=input_name,\n conda_env=conda_env, model_name=model_name)\n', (2250, 2364), False, 'from kedro_mlflow.pipeline.pipeline_ml import PipelineML\n')]
|
#_*_coding:utf-8 _*_
import pygame
from pygame.locals import *
from sys import exit
pygame.init()
screen=pygame.display.set_mode((800,800),0,32)
pygame.display.set_caption("中国AI象棋")
screen.fill((255,255,255))
screen.set_clip(0, 0, 600, 650)
#竖线
pygame.draw.line(screen, (222, 125, 44), (75, 0), (75, 280), 1)
pygame.draw.line(screen, (222, 125, 44), (150, 0), (150, 280), 1)
pygame.draw.line(screen, (222, 125, 44), (225, 0), (225, 280), 1)
pygame.draw.line(screen, (222, 125, 44), (300, 0), (300, 280), 1)
pygame.draw.line(screen, (222, 125, 44), (375, 0), (375, 280), 1)
pygame.draw.line(screen, (222, 125, 44), (450, 0), (450, 280), 1)
pygame.draw.line(screen, (222, 125, 44), (525, 0), (525, 280), 1)
pygame.draw.line(screen, (222, 125, 44), (75, 370), (75, 650), 1)
pygame.draw.line(screen, (222, 125, 44), (150, 370), (150, 650), 1)
pygame.draw.line(screen, (222, 125, 44), (225, 370), (225, 650), 1)
pygame.draw.line(screen, (222, 125, 44), (300, 370), (300, 650), 1)
pygame.draw.line(screen, (222, 125, 44), (375, 370), (375, 650), 1)
pygame.draw.line(screen, (222, 125, 44), (450, 370), (450, 650), 1)
pygame.draw.line(screen, (222, 125, 44), (525, 370), (525, 650), 1)
#横线
pygame.draw.line(screen, (0, 0, 255), (0, 70), (600, 70), 1)
pygame.draw.line(screen, (0, 0, 255), (0, 140), (600, 140), 1)
pygame.draw.line(screen, (0, 0, 255), (0, 210), (600, 210), 1)
pygame.draw.line(screen, (0, 0, 255), (0, 280), (600, 280), 1)
pygame.draw.line(screen, (0, 0, 255), (0, 370), (600, 370), 1)
pygame.draw.line(screen, (0, 0, 255), (0, 440), (600, 440), 1)
pygame.draw.line(screen, (0, 0, 255), (0, 510), (600, 510), 1)
pygame.draw.line(screen, (0, 0, 255), (0, 580), (600, 580), 1)
#斜线
pygame.draw.line(screen, (222, 125, 44), (225, 0), (375, 140), 1)
pygame.draw.line(screen, (222, 125, 44), (375, 0), (225, 140), 1)
pygame.draw.line(screen, (222, 125, 44), (225, 510), (375, 650), 1)
pygame.draw.line(screen, (222, 125, 44), (375, 510), (225, 650), 1)
#边框
pygame.draw.rect(screen,(0,0,0),((0,0),(600,650)),3)
while True:
for event in pygame.event.get():
if event.type==QUIT:
pygame.display.quit()
exit()
pygame.display.update()
|
[
"pygame.draw.line",
"pygame.draw.rect",
"pygame.display.set_mode",
"pygame.event.get",
"pygame.init",
"pygame.display.update",
"pygame.display.quit",
"pygame.display.set_caption",
"sys.exit"
] |
[((85, 98), 'pygame.init', 'pygame.init', ([], {}), '()\n', (96, 98), False, 'import pygame\n'), ((106, 148), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(800, 800)', '(0)', '(32)'], {}), '((800, 800), 0, 32)\n', (129, 148), False, 'import pygame\n'), ((146, 182), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""中国AI象棋"""'], {}), "('中国AI象棋')\n", (172, 182), False, 'import pygame\n'), ((246, 309), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(75, 0)', '(75, 280)', '(1)'], {}), '(screen, (222, 125, 44), (75, 0), (75, 280), 1)\n', (262, 309), False, 'import pygame\n'), ((310, 375), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(150, 0)', '(150, 280)', '(1)'], {}), '(screen, (222, 125, 44), (150, 0), (150, 280), 1)\n', (326, 375), False, 'import pygame\n'), ((376, 441), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(225, 0)', '(225, 280)', '(1)'], {}), '(screen, (222, 125, 44), (225, 0), (225, 280), 1)\n', (392, 441), False, 'import pygame\n'), ((442, 507), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(300, 0)', '(300, 280)', '(1)'], {}), '(screen, (222, 125, 44), (300, 0), (300, 280), 1)\n', (458, 507), False, 'import pygame\n'), ((508, 573), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(375, 0)', '(375, 280)', '(1)'], {}), '(screen, (222, 125, 44), (375, 0), (375, 280), 1)\n', (524, 573), False, 'import pygame\n'), ((574, 639), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(450, 0)', '(450, 280)', '(1)'], {}), '(screen, (222, 125, 44), (450, 0), (450, 280), 1)\n', (590, 639), False, 'import pygame\n'), ((640, 705), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(525, 0)', '(525, 280)', '(1)'], {}), '(screen, (222, 125, 44), (525, 0), (525, 280), 1)\n', (656, 705), False, 'import pygame\n'), ((707, 772), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(75, 370)', '(75, 650)', '(1)'], {}), '(screen, (222, 125, 44), (75, 370), (75, 650), 1)\n', (723, 772), False, 'import pygame\n'), ((773, 840), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(150, 370)', '(150, 650)', '(1)'], {}), '(screen, (222, 125, 44), (150, 370), (150, 650), 1)\n', (789, 840), False, 'import pygame\n'), ((841, 908), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(225, 370)', '(225, 650)', '(1)'], {}), '(screen, (222, 125, 44), (225, 370), (225, 650), 1)\n', (857, 908), False, 'import pygame\n'), ((909, 976), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(300, 370)', '(300, 650)', '(1)'], {}), '(screen, (222, 125, 44), (300, 370), (300, 650), 1)\n', (925, 976), False, 'import pygame\n'), ((977, 1044), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(375, 370)', '(375, 650)', '(1)'], {}), '(screen, (222, 125, 44), (375, 370), (375, 650), 1)\n', (993, 1044), False, 'import pygame\n'), ((1045, 1112), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(450, 370)', '(450, 650)', '(1)'], {}), '(screen, (222, 125, 44), (450, 370), (450, 650), 1)\n', (1061, 1112), False, 'import pygame\n'), ((1113, 1180), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(525, 370)', '(525, 650)', '(1)'], {}), '(screen, (222, 125, 44), (525, 370), (525, 650), 1)\n', (1129, 1180), False, 'import pygame\n'), ((1185, 1245), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(0, 0, 255)', '(0, 70)', '(600, 70)', '(1)'], {}), '(screen, (0, 0, 255), (0, 70), (600, 70), 1)\n', (1201, 1245), False, 'import pygame\n'), ((1246, 1308), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(0, 0, 255)', '(0, 140)', '(600, 140)', '(1)'], {}), '(screen, (0, 0, 255), (0, 140), (600, 140), 1)\n', (1262, 1308), False, 'import pygame\n'), ((1309, 1371), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(0, 0, 255)', '(0, 210)', '(600, 210)', '(1)'], {}), '(screen, (0, 0, 255), (0, 210), (600, 210), 1)\n', (1325, 1371), False, 'import pygame\n'), ((1372, 1434), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(0, 0, 255)', '(0, 280)', '(600, 280)', '(1)'], {}), '(screen, (0, 0, 255), (0, 280), (600, 280), 1)\n', (1388, 1434), False, 'import pygame\n'), ((1435, 1497), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(0, 0, 255)', '(0, 370)', '(600, 370)', '(1)'], {}), '(screen, (0, 0, 255), (0, 370), (600, 370), 1)\n', (1451, 1497), False, 'import pygame\n'), ((1498, 1560), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(0, 0, 255)', '(0, 440)', '(600, 440)', '(1)'], {}), '(screen, (0, 0, 255), (0, 440), (600, 440), 1)\n', (1514, 1560), False, 'import pygame\n'), ((1561, 1623), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(0, 0, 255)', '(0, 510)', '(600, 510)', '(1)'], {}), '(screen, (0, 0, 255), (0, 510), (600, 510), 1)\n', (1577, 1623), False, 'import pygame\n'), ((1624, 1686), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(0, 0, 255)', '(0, 580)', '(600, 580)', '(1)'], {}), '(screen, (0, 0, 255), (0, 580), (600, 580), 1)\n', (1640, 1686), False, 'import pygame\n'), ((1691, 1756), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(225, 0)', '(375, 140)', '(1)'], {}), '(screen, (222, 125, 44), (225, 0), (375, 140), 1)\n', (1707, 1756), False, 'import pygame\n'), ((1757, 1822), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(375, 0)', '(225, 140)', '(1)'], {}), '(screen, (222, 125, 44), (375, 0), (225, 140), 1)\n', (1773, 1822), False, 'import pygame\n'), ((1824, 1891), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(225, 510)', '(375, 650)', '(1)'], {}), '(screen, (222, 125, 44), (225, 510), (375, 650), 1)\n', (1840, 1891), False, 'import pygame\n'), ((1892, 1959), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(222, 125, 44)', '(375, 510)', '(225, 650)', '(1)'], {}), '(screen, (222, 125, 44), (375, 510), (225, 650), 1)\n', (1908, 1959), False, 'import pygame\n'), ((1964, 2024), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(0, 0, 0)', '((0, 0), (600, 650))', '(3)'], {}), '(screen, (0, 0, 0), ((0, 0), (600, 650)), 3)\n', (1980, 2024), False, 'import pygame\n'), ((2043, 2061), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2059, 2061), False, 'import pygame\n'), ((2122, 2145), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2143, 2145), False, 'import pygame\n'), ((2089, 2110), 'pygame.display.quit', 'pygame.display.quit', ([], {}), '()\n', (2108, 2110), False, 'import pygame\n'), ((2114, 2120), 'sys.exit', 'exit', ([], {}), '()\n', (2118, 2120), False, 'from sys import exit\n')]
|
import importlib
import logging
from typing import Iterable, Any # noqa: F401
from pyhocon import ConfigTree # noqa: F401
from databuilder.extractor.base_extractor import Extractor
LOGGER = logging.getLogger(__name__)
class DBAPIExtractor(Extractor):
"""
Generic DB API extractor.
"""
CONNECTION_CONFIG_KEY = 'connection'
SQL_CONFIG_KEY = 'sql'
def init(self, conf):
# type: (ConfigTree) -> None
"""
Receives a {Connection} object and {sql} to execute.
An optional model class can be passed, in which, sql result row
would be converted to a class instance and returned to calling
function
:param conf:
:return:
"""
self.conf = conf
self.connection = conf.get(DBAPIExtractor.CONNECTION_CONFIG_KEY) # type: Any
self.cursor = self.connection.cursor()
self.sql = conf.get(DBAPIExtractor.SQL_CONFIG_KEY)
model_class = conf.get('model_class', None)
if model_class:
module_name, class_name = model_class.rsplit(".", 1)
mod = importlib.import_module(module_name)
self.model_class = getattr(mod, class_name)
self._iter = iter(self._execute_query())
def _execute_query(self):
# type: () -> Iterable[Any]
"""
Use cursor to execute the {sql}
:return:
"""
self.cursor.execute(self.sql)
return self.cursor.fetchall()
def extract(self):
# type: () -> Any
"""
Fetch one sql result row, convert to {model_class} if specified before
returning.
:return:
"""
try:
result = next(self._iter)
except StopIteration:
return None
if hasattr(self, 'model_class'):
obj = self.model_class(*result[:len(result)])
return obj
else:
return result
def close(self):
# type: () -> None
"""
close cursor and connection handlers
:return:
"""
try:
self.cursor.close()
self.connection.close()
except Exception:
LOGGER.exception("Exception encountered while closing up connection handler!")
def get_scope(self):
# type: () -> str
return 'extractor.dbapi'
|
[
"importlib.import_module",
"logging.getLogger"
] |
[((196, 223), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (213, 223), False, 'import logging\n'), ((1097, 1133), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (1120, 1133), False, 'import importlib\n')]
|
# -*- coding: utf-8 -*-
"""The check functions."""
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import operator
from distutils.version import LooseVersion
import os.path as op
import numpy as np
from ._logging import warn
def _ensure_int(x, name='unknown', must_be='an int'):
"""Ensure a variable is an integer."""
# This is preferred over numbers.Integral, see:
# https://github.com/scipy/scipy/pull/7351#issuecomment-299713159
try:
x = int(operator.index(x))
except TypeError:
raise TypeError('%s must be %s, got %s' % (name, must_be, type(x)))
return x
def check_fname(fname, filetype, endings, endings_err=()):
"""Enforce MNE filename conventions.
Parameters
----------
fname : str
Name of the file.
filetype : str
Type of file. e.g., ICA, Epochs etc.
endings : tuple
Acceptable endings for the filename.
endings_err : tuple
Obligatory possible endings for the filename.
"""
if len(endings_err) > 0 and not fname.endswith(endings_err):
print_endings = ' or '.join([', '.join(endings_err[:-1]),
endings_err[-1]])
raise IOError('The filename (%s) for file type %s must end with %s'
% (fname, filetype, print_endings))
print_endings = ' or '.join([', '.join(endings[:-1]), endings[-1]])
if not fname.endswith(endings):
warn('This filename (%s) does not conform to MNE naming conventions. '
'All %s files should end with %s'
% (fname, filetype, print_endings))
def check_version(library, min_version):
r"""Check minimum library version required.
Parameters
----------
library : str
The library name to import. Must have a ``__version__`` property.
min_version : str
The minimum version string. Anything that matches
``'(\d+ | [a-z]+ | \.)'``. Can also be empty to skip version
check (just check for library presence).
Returns
-------
ok : bool
True if the library exists with at least the specified version.
"""
ok = True
try:
library = __import__(library)
except ImportError:
ok = False
else:
if min_version:
this_version = LooseVersion(library.__version__)
if this_version < min_version:
ok = False
return ok
def _check_mayavi_version(min_version='4.3.0'):
"""Check mayavi version."""
if not check_version('mayavi', min_version):
raise RuntimeError("Need mayavi >= %s" % min_version)
def _check_pyface_backend():
"""Check the currently selected Pyface backend.
Returns
-------
backend : str
Name of the backend.
result : 0 | 1 | 2
0: the backend has been tested and works.
1: the backend has not been tested.
2: the backend not been tested.
Notes
-----
See also http://docs.enthought.com/pyface/.
"""
try:
from traits.trait_base import ETSConfig
except ImportError:
return None, 2
backend = ETSConfig.toolkit
if backend == 'qt4':
status = 0
else:
status = 1
return backend, status
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance.
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (int, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def _check_event_id(event_id, events):
"""Check event_id and convert to default format."""
# check out event_id dict
if event_id is None: # convert to int to make typing-checks happy
event_id = list(np.unique(events[:, 2]))
if isinstance(event_id, dict):
for key in event_id.keys():
_validate_type(key, str, 'Event names')
event_id = dict((key, _ensure_int(val, 'event_id[%s]' % key))
for key, val in event_id.items())
elif isinstance(event_id, list):
event_id = [_ensure_int(v, 'event_id[%s]' % vi)
for vi, v in enumerate(event_id)]
event_id = dict(zip((str(i) for i in event_id), event_id))
else:
event_id = _ensure_int(event_id, 'event_id')
event_id = {str(event_id): event_id}
return event_id
def _check_fname(fname, overwrite=False, must_exist=False):
"""Check for file existence."""
_validate_type(fname, 'str', 'fname')
from mne.utils import logger
if must_exist and not op.isfile(fname):
raise IOError('File "%s" does not exist' % fname)
if op.isfile(fname):
if not overwrite:
raise IOError('Destination file exists. Please use option '
'"overwrite=True" to force overwriting.')
elif overwrite != 'read':
logger.info('Overwriting existing file.')
def _check_subject(class_subject, input_subject, raise_error=True):
"""Get subject name from class."""
if input_subject is not None:
_validate_type(input_subject, 'str', "subject input")
return input_subject
elif class_subject is not None:
_validate_type(class_subject, 'str',
"Either subject input or class subject attribute")
return class_subject
else:
if raise_error is True:
raise ValueError('Neither subject input nor class subject '
'attribute was a string')
return None
def _check_preload(inst, msg):
"""Ensure data are preloaded."""
from ..epochs import BaseEpochs
from ..evoked import Evoked
from ..time_frequency import _BaseTFR
if isinstance(inst, (_BaseTFR, Evoked)):
pass
else:
name = "epochs" if isinstance(inst, BaseEpochs) else 'raw'
if not inst.preload:
raise RuntimeError(
"By default, MNE does not load data into main memory to "
"conserve resources. " + msg + ' requires %s data to be '
'loaded. Use preload=True (or string) in the constructor or '
'%s.load_data().' % (name, name))
def _check_compensation_grade(inst, inst2, name, name2, ch_names=None):
"""Ensure that objects have same compensation_grade."""
from ..io.pick import pick_channels, pick_info
from ..io.compensator import get_current_comp
if None in [inst.info, inst2.info]:
return
if ch_names is None:
grade = inst.compensation_grade
grade2 = inst2.compensation_grade
else:
info = inst.info.copy()
info2 = inst2.info.copy()
# pick channels
for t_info in [info, info2]:
if t_info['comps']:
t_info['comps'] = []
picks = pick_channels(t_info['ch_names'], ch_names)
pick_info(t_info, picks, copy=False)
# get compensation grades
grade = get_current_comp(info)
grade2 = get_current_comp(info2)
# perform check
if grade != grade2:
msg = 'Compensation grade of %s (%d) and %s (%d) don\'t match'
raise RuntimeError(msg % (name, inst.compensation_grade,
name2, inst2.compensation_grade))
def _check_pandas_installed(strict=True):
"""Aux function."""
try:
import pandas
return pandas
except ImportError:
if strict is True:
raise RuntimeError('For this functionality to work, the Pandas '
'library is required.')
else:
return False
def _check_pandas_index_arguments(index, defaults):
"""Check pandas index arguments."""
if not any(isinstance(index, k) for k in (list, tuple)):
index = [index]
invalid_choices = [e for e in index if e not in defaults]
if invalid_choices:
options = [', '.join(e) for e in [invalid_choices, defaults]]
raise ValueError('[%s] is not an valid option. Valid index'
'values are \'None\' or %s' % tuple(options))
def _check_ch_locs(chs):
"""Check if channel locations exist.
Parameters
----------
chs : dict
The channels from info['chs']
"""
locs3d = np.array([ch['loc'][:3] for ch in chs])
return not ((locs3d == 0).all() or
(~np.isfinite(locs3d)).all() or
np.allclose(locs3d, 0.))
def _check_type_picks(picks):
"""Guarantee type integrity of picks."""
err_msg = 'picks must be None, a list or an array of integers'
if picks is None:
pass
elif isinstance(picks, list):
for pick in picks:
_validate_type(pick, 'int', 'Each pick')
picks = np.array(picks)
elif isinstance(picks, np.ndarray):
if not picks.dtype.kind == 'i':
raise TypeError(err_msg)
else:
raise TypeError(err_msg)
return picks
def _is_numeric(n):
return isinstance(n, (np.integer, np.floating, int, float))
def _validate_type(item, types=None, item_name=None, type_name=None):
"""Validate that `item` is an instance of `types`.
Parameters
----------
item : obj
The thing to be checked.
types : type | tuple of types | str
The types to be checked against. If str, must be one of 'str', 'int',
'numeric'.
"""
if types == "int":
_ensure_int(item, name=item_name)
return # terminate prematurely
elif types == "str":
types = str
type_name = "str" if type_name is None else type_name
elif types == "numeric":
types = (np.integer, np.floating, int, float)
type_name = "numeric" if type_name is None else type_name
elif types == "info":
from mne.io import Info as types
type_name = "Info" if type_name is None else type_name
item_name = "Info" if item_name is None else item_name
if type_name is None:
iter_types = ([types] if not isinstance(types, (list, tuple))
else types)
type_name = ', '.join(cls.__name__ for cls in iter_types)
if not isinstance(item, types):
raise TypeError('%s must be an instance of %s, got %s instead'
% (item_name, type_name, type(item),))
def _check_if_nan(data, msg=" to be plotted"):
"""Raise if any of the values are NaN."""
if not np.isfinite(data).all():
raise ValueError("Some of the values {} are NaN.".format(msg))
|
[
"operator.index",
"mne.utils.logger.info",
"distutils.version.LooseVersion",
"numpy.allclose",
"numpy.isfinite",
"numpy.random.RandomState",
"os.path.isfile",
"numpy.array",
"numpy.unique"
] |
[((5048, 5064), 'os.path.isfile', 'op.isfile', (['fname'], {}), '(fname)\n', (5057, 5064), True, 'import os.path as op\n'), ((8651, 8690), 'numpy.array', 'np.array', (["[ch['loc'][:3] for ch in chs]"], {}), "([ch['loc'][:3] for ch in chs])\n", (8659, 8690), True, 'import numpy as np\n'), ((3713, 3740), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (3734, 3740), True, 'import numpy as np\n'), ((483, 500), 'operator.index', 'operator.index', (['x'], {}), '(x)\n', (497, 500), False, 'import operator\n'), ((2302, 2335), 'distutils.version.LooseVersion', 'LooseVersion', (['library.__version__'], {}), '(library.__version__)\n', (2314, 2335), False, 'from distutils.version import LooseVersion\n'), ((4148, 4171), 'numpy.unique', 'np.unique', (['events[:, 2]'], {}), '(events[:, 2])\n', (4157, 4171), True, 'import numpy as np\n'), ((4965, 4981), 'os.path.isfile', 'op.isfile', (['fname'], {}), '(fname)\n', (4974, 4981), True, 'import os.path as op\n'), ((8794, 8818), 'numpy.allclose', 'np.allclose', (['locs3d', '(0.0)'], {}), '(locs3d, 0.0)\n', (8805, 8818), True, 'import numpy as np\n'), ((9128, 9143), 'numpy.array', 'np.array', (['picks'], {}), '(picks)\n', (9136, 9143), True, 'import numpy as np\n'), ((5278, 5319), 'mne.utils.logger.info', 'logger.info', (['"""Overwriting existing file."""'], {}), "('Overwriting existing file.')\n", (5289, 5319), False, 'from mne.utils import logger\n'), ((10787, 10804), 'numpy.isfinite', 'np.isfinite', (['data'], {}), '(data)\n', (10798, 10804), True, 'import numpy as np\n'), ((8748, 8767), 'numpy.isfinite', 'np.isfinite', (['locs3d'], {}), '(locs3d)\n', (8759, 8767), True, 'import numpy as np\n')]
|
# --------------
#Importing header files
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
# Code starts here
data = pd.read_csv(path)
X = data.drop(['customer.id','paid.back.loan'],axis =1 )
y = data['paid.back.loan']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3, random_state = 0 )
# Code ends here
# --------------
#Importing header files
import matplotlib.pyplot as plt
# Code starts here
fully_paid = y_train.value_counts()
fully_paid.plot(kind = "Bar")
# Code ends here
# --------------
#Importing header files
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
X_train['int.rate'] = X_train['int.rate'].str.replace("%","")
X_train['int.rate'] = X_train['int.rate'].apply(float)
X_train['int.rate'] = X_train['int.rate']/100
X_test['int.rate'] = X_test['int.rate'].str.replace("%","")
X_test['int.rate'] = X_test['int.rate'].apply(float)
X_test['int.rate'] = X_test['int.rate']/100
num_df= X_train.select_dtypes(exclude = ['object'])
cat_df = X_train.select_dtypes(include = ['object'])
# Code ends here
# --------------
#Importing header files
import seaborn as sns
# Code starts here
cols = list(num_df.columns)
fig,axes = plt.subplots(nrows = 9 , ncols = 1)
for i in range (0,9):
sns.boxplot(x=y_train, y=num_df[cols[i]] , ax=axes[i])
# Code ends here
# --------------
# Code starts here
cols = list(cat_df.columns)
fig,axes = plt.subplots(nrows = 2 , ncols = 2)
for i in range (0,2):
for j in range(0,2):
sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j])
# Code ends here
# --------------
#Importing header files
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
# Code starts here
le = LabelEncoder()
X_train['credit.policy'] = le.fit_transform(X_train['credit.policy'])
X_train['purpose'] = le.fit_transform(X_train['purpose'])
X_train['inq.last.6mths'] = le.fit_transform(X_train['inq.last.6mths'])
X_train['delinq.2yrs'] = le.fit_transform(X_train['delinq.2yrs'])
X_test['credit.policy'] = le.fit_transform(X_test['credit.policy'])
X_test['purpose'] = le.fit_transform(X_test['purpose'])
X_test['inq.last.6mths'] = le.fit_transform(X_test['inq.last.6mths'])
X_test['delinq.2yrs'] = le.fit_transform(X_test['delinq.2yrs'])
y_train= y_train.str.replace("No","0")
y_train = y_train.str.replace("Yes","1")
y_test =y_test.str.replace("No","0")
y_test = y_test.str.replace("Yes","1")
model = tree.DecisionTreeClassifier(random_state=0)
model.fit(X_train,y_train)
acc = model.score(X_test,y_test)
print(acc)
# Code ends here
# --------------
#Importing header files
from sklearn.model_selection import GridSearchCV
#Parameter grid
parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)}
# Code starts here
model_2 = tree.DecisionTreeClassifier(random_state=0)
p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid,cv=5)
p_tree.fit(X_train,y_train)
acc_2= p_tree.score(X_test,y_test)
print(acc_2)
# Code ends here
# --------------
#Importing header files
from io import StringIO
from sklearn.tree import export_graphviz
from sklearn import tree
from sklearn import metrics
from IPython.display import Image
import pydotplus
# Code starts here
dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True , class_names=['loan_paid_back_yes','loan_paid_back_no'])
graph_big = pydotplus.graph_from_dot_data(dot_data)
# show graph - do not delete/modify the code below this line
img_path = user_data_dir+'/file.png'
graph_big.write_png(img_path)
plt.figure(figsize=(20,15))
plt.imshow(plt.imread(img_path))
plt.axis('off')
plt.show()
# Code ends here
|
[
"sklearn.model_selection.GridSearchCV",
"matplotlib.pyplot.show",
"warnings.filterwarnings",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.axis",
"sklearn.preprocessing.LabelEncoder",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.tree.export_graphviz",
"pydotplus.graph_from_dot_data",
"matplotlib.pyplot.figure",
"seaborn.boxplot",
"numpy.arange",
"seaborn.countplot",
"matplotlib.pyplot.imread",
"matplotlib.pyplot.subplots"
] |
[((80, 113), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (103, 113), False, 'import warnings\n'), ((196, 213), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (207, 213), True, 'import pandas as pd\n'), ((330, 383), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (346, 383), False, 'from sklearn.model_selection import train_test_split\n'), ((1285, 1315), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(9)', 'ncols': '(1)'}), '(nrows=9, ncols=1)\n', (1297, 1315), True, 'import matplotlib.pyplot as plt\n'), ((1499, 1529), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)'}), '(nrows=2, ncols=2)\n', (1511, 1529), True, 'import matplotlib.pyplot as plt\n'), ((1816, 1830), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1828, 1830), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2522, 2565), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2549, 2565), False, 'from sklearn import tree\n'), ((2878, 2921), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2905, 2921), False, 'from sklearn import tree\n'), ((2932, 2996), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'model_2', 'param_grid': 'parameter_grid', 'cv': '(5)'}), '(estimator=model_2, param_grid=parameter_grid, cv=5)\n', (2944, 2996), False, 'from sklearn.model_selection import GridSearchCV\n'), ((3334, 3505), 'sklearn.tree.export_graphviz', 'export_graphviz', ([], {'decision_tree': 'p_tree.best_estimator_', 'out_file': 'None', 'feature_names': 'X.columns', 'filled': '(True)', 'class_names': "['loan_paid_back_yes', 'loan_paid_back_no']"}), "(decision_tree=p_tree.best_estimator_, out_file=None,\n feature_names=X.columns, filled=True, class_names=['loan_paid_back_yes',\n 'loan_paid_back_no'])\n", (3349, 3505), False, 'from sklearn.tree import export_graphviz\n'), ((3512, 3551), 'pydotplus.graph_from_dot_data', 'pydotplus.graph_from_dot_data', (['dot_data'], {}), '(dot_data)\n', (3541, 3551), False, 'import pydotplus\n'), ((3683, 3711), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (3693, 3711), True, 'import matplotlib.pyplot as plt\n'), ((3744, 3759), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3752, 3759), True, 'import matplotlib.pyplot as plt\n'), ((3760, 3770), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3768, 3770), True, 'import matplotlib.pyplot as plt\n'), ((1349, 1402), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': 'y_train', 'y': 'num_df[cols[i]]', 'ax': 'axes[i]'}), '(x=y_train, y=num_df[cols[i]], ax=axes[i])\n', (1360, 1402), True, 'import seaborn as sns\n'), ((2794, 2810), 'numpy.arange', 'np.arange', (['(3)', '(10)'], {}), '(3, 10)\n', (2803, 2810), True, 'import numpy as np\n'), ((3722, 3742), 'matplotlib.pyplot.imread', 'plt.imread', (['img_path'], {}), '(img_path)\n', (3732, 3742), True, 'import matplotlib.pyplot as plt\n'), ((1592, 1661), 'seaborn.countplot', 'sns.countplot', ([], {'x': 'X_train[cols[i * 2 + j]]', 'hue': 'y_train', 'ax': 'axes[i, j]'}), '(x=X_train[cols[i * 2 + j]], hue=y_train, ax=axes[i, j])\n', (1605, 1661), True, 'import seaborn as sns\n')]
|
import asyncio
import datetime
import logging
import random
import re
import sys
import traceback
import typing
import warnings
import weakref
from typing import Iterable, Union
import HABApp
import HABApp.core
import HABApp.openhab
import HABApp.rule_manager
import HABApp.util
from HABApp.core.events import AllEvents
from HABApp.core.items.base_item import BaseItem, TYPE_ITEM, TYPE_ITEM_CLS
from HABApp.rule import interfaces
from HABApp.rule.scheduler import HABAppSchedulerView as _HABAppSchedulerView
from .interfaces import async_subprocess_exec
log = logging.getLogger('HABApp.Rule')
# Func to log deprecation warnings
def send_warnings_to_log(message, category, filename, lineno, file=None, line=None):
log.warning('%s:%s: %s:%s' % (filename, lineno, category.__name__, message))
return
# Setup deprecation warnings
warnings.simplefilter('default')
warnings.showwarning = send_warnings_to_log
class Rule:
def __init__(self):
# get the variables from the caller
depth = 1
while True:
try:
__vars = sys._getframe(depth).f_globals
except ValueError:
raise RuntimeError('Rule files are not meant to be executed directly! '
'Put the file in the HABApp "rule" folder and HABApp will load it automatically.')
depth += 1
if '__HABAPP__RUNTIME__' in __vars:
__runtime__ = __vars['__HABAPP__RUNTIME__']
__rule_file__ = __vars['__HABAPP__RULE_FILE__']
break
# variable vor unittests
test = __vars.get('__UNITTEST__', False)
# this is a list which contains all rules of this file
__vars['__HABAPP__RULES'].append(self)
assert isinstance(__runtime__, HABApp.runtime.Runtime)
self.__runtime: HABApp.runtime.Runtime = __runtime__
if not test:
assert isinstance(__rule_file__, HABApp.rule_manager.RuleFile)
self.__rule_file: HABApp.rule_manager.RuleFile = __rule_file__
self.__event_listener: typing.List[HABApp.core.EventBusListener] = []
self.__unload_functions: typing.List[typing.Callable[[], None]] = []
self.__cancel_objs: weakref.WeakSet = weakref.WeakSet()
# schedule cleanup of this rule
self.register_on_unload(self.__cleanup_rule)
self.register_on_unload(self.__cleanup_objs)
# scheduler
self.run: _HABAppSchedulerView = _HABAppSchedulerView(self)
# suggest a rule name if it is not
self.rule_name: str = self.__rule_file.suggest_rule_name(self)
# interfaces
self.async_http = interfaces.http
self.mqtt: HABApp.mqtt.interface = HABApp.mqtt.interface
self.oh: HABApp.openhab.interface = HABApp.openhab.interface
self.openhab: HABApp.openhab.interface = self.oh
@HABApp.core.wrapper.log_exception
def __cleanup_objs(self):
while self.__cancel_objs:
# we log each error as warning
with HABApp.core.wrapper.ExceptionToHABApp(log, logging.WARNING):
obj = self.__cancel_objs.pop()
obj.cancel()
@HABApp.core.wrapper.log_exception
def __cleanup_rule(self):
# Important: set the dicts to None so we don't schedule a future event during _cleanup.
# If dict is set to None we will crash instead but it is no problem because everything gets unloaded anyhow
event_listeners = self.__event_listener
self.__event_listener = None
# Actually remove the listeners/events
for listener in event_listeners:
HABApp.core.EventBus.remove_listener(listener)
# Unload the scheduler
self.run._scheduler.cancel_all()
return None
def post_event(self, name, event):
"""
Post an event to the event bus
:param name: name or item to post event to
:param event: Event class to be used (must be class instance)
:return:
"""
assert isinstance(name, (str, HABApp.core.items.BaseValueItem)), type(name)
return HABApp.core.EventBus.post_event(
name.name if isinstance(name, HABApp.core.items.BaseValueItem) else name,
event
)
def listen_event(self, name: Union[HABApp.core.items.BaseValueItem, str],
callback: typing.Callable[[typing.Any], typing.Any],
event_type: Union[typing.Type['HABApp.core.events.AllEvents'],
'HABApp.core.events.EventFilter', typing.Any] = AllEvents
) -> HABApp.core.EventBusListener:
"""
Register an event listener
:param name: item or name to listen to. Use None to listen to all events
:param callback: callback that accepts one parameter which will contain the event
:param event_type: Event filter. This is typically :class:`~HABApp.core.events.ValueUpdateEvent` or
:class:`~HABApp.core.events.ValueChangeEvent` which will also trigger on changes/update from openhab
or mqtt. Additionally it can be an instance of :class:`~HABApp.core.events.EventFilter` which additionally
filters on the values of the event. There are also templates for the most common filters, e.g.
:class:`~HABApp.core.events.ValueUpdateEventFilter` and :class:`~HABApp.core.events.ValueChangeEventFilter`
"""
cb = HABApp.core.WrappedFunction(callback, name=self._get_cb_name(callback))
name = name.name if isinstance(name, HABApp.core.items.BaseValueItem) else name
if isinstance(event_type, HABApp.core.events.EventFilter):
listener = event_type.create_event_listener(name, cb)
else:
listener = HABApp.core.EventBusListener(name, cb, event_type)
self.__event_listener.append(listener)
HABApp.core.EventBus.add_listener(listener)
return listener
def execute_subprocess(self, callback, program, *args, capture_output=True):
"""Run another program
:param callback: |param_scheduled_cb| after process has finished. First parameter will
be an instance of :class:`~HABApp.rule.FinishedProcessInfo`
:param program: program or path to program to run
:param args: |param_scheduled_cb_args|
:param capture_output: Capture program output, set to `False` to only capture return code
:return:
"""
assert isinstance(program, str), type(program)
cb = HABApp.core.WrappedFunction(callback, name=self._get_cb_name(callback))
asyncio.run_coroutine_threadsafe(
async_subprocess_exec(cb.run, program, *args, capture_output=capture_output),
HABApp.core.const.loop
)
def get_rule(self, rule_name: str) -> 'Union[Rule, typing.List[Rule]]':
assert rule_name is None or isinstance(rule_name, str), type(rule_name)
return self.__runtime.rule_manager.get_rule(rule_name)
def register_on_unload(self, func: typing.Callable[[], typing.Any]):
"""Register a function with no parameters which will be called when the rule is unloaded.
Use this for custom cleanup functions.
:param func: function which will be called
"""
assert callable(func)
assert func not in self.__unload_functions, 'Function was already registered!'
self.__unload_functions.append(func)
def register_cancel_obj(self, obj):
"""Add a ``weakref`` to an obj which has a ``cancel`` function.
When the rule gets unloaded the cancel function will be called (if the obj was not already garbage collected)
:param obj:
"""
self.__cancel_objs.add(obj)
@staticmethod
def get_items(type: Union[typing.Tuple[TYPE_ITEM_CLS, ...], TYPE_ITEM_CLS] = None,
name: Union[str, typing.Pattern[str]] = None,
tags: Union[str, Iterable[str]] = None,
groups: Union[str, Iterable[str]] = None,
metadata: Union[str, typing.Pattern[str]] = None,
metadata_value: Union[str, typing.Pattern[str]] = None,
) -> Union[typing.List[TYPE_ITEM], typing.List[BaseItem]]:
"""Search the HABApp item registry and return the found items.
:param type: item has to be an instance of this class
:param name: str (will be compiled) or regex that is used to search the Name
:param tags: item must have these tags (will return only instances of OpenhabItem)
:param groups: item must be a member of these groups (will return only instances of OpenhabItem)
:param metadata: str (will be compiled) or regex that is used to search the metadata (e.g. 'homekit')
:param metadata_value: str (will be compiled) or regex that is used to search the metadata value
(e.g. 'TargetTemperature')
:return: Items that match all the passed criteria
"""
if name is not None and isinstance(name, str):
name = re.compile(name, re.IGNORECASE)
if metadata is not None and isinstance(metadata, str):
metadata = re.compile(metadata, re.IGNORECASE)
if metadata_value is not None and isinstance(metadata_value, str):
metadata_value = re.compile(metadata_value, re.IGNORECASE)
_tags, _groups = None, None
if tags is not None:
_tags = set(tags) if not isinstance(tags, str) else {tags}
if groups is not None:
_groups = set(groups) if not isinstance(groups, str) else {groups}
OpenhabItem = HABApp.openhab.items.OpenhabItem
if _tags or _groups or metadata or metadata_value:
if type is None:
type = OpenhabItem
if not issubclass(type, OpenhabItem):
raise ValueError('Searching for tags, groups and metadata only works for OpenhabItem or its Subclasses')
ret = []
for item in HABApp.core.Items.get_all_items(): # type: HABApp.core.items.base_valueitem.BaseItem
if type is not None and not isinstance(item, type):
continue
if name is not None and not name.search(item.name):
continue
if _tags is not None and not _tags.issubset(item.tags):
continue
if _groups is not None and not _groups.issubset(item.groups):
continue
if metadata is not None and not any(map(metadata.search, item.metadata)):
continue
if metadata_value is not None and not any(
map(metadata_value.search, map(lambda x: x[0], item.metadata.values()))):
continue
ret.append(item)
return ret
# -----------------------------------------------------------------------------------------------------------------
# deprecated functions
# -----------------------------------------------------------------------------------------------------------------
def run_every(self, time, interval: Union[int, datetime.timedelta], callback, *args, **kwargs):
warnings.warn('self.run_every is deprecated. Please use self.run.every', DeprecationWarning)
return self.run.every(time, interval, callback, *args, **kwargs)
def run_on_sun(self, sun_event: str, callback, *args, **kwargs):
warnings.warn('self.run_on_sun is deprecated. Please use self.run.on_sunrise, self.run.on_sunset, ...',
DeprecationWarning)
func = {'sunset': self.run.on_sunset, 'sunrise': self.run.on_sunrise,
'dusk': self.run.on_sun_dusk, 'dawn': self.run.on_sun_dawn}
return func[sun_event](callback, *args, **kwargs)
def run_on_day_of_week(self, time: datetime.time, weekdays, callback, *args, **kwargs):
warnings.warn('self.run_on_day_of_week is deprecated. Please use self.run.on_day_of_week', DeprecationWarning)
return self.run.on_day_of_week(time, weekdays, callback, *args, **kwargs)
def run_on_every_day(self, time: datetime.time, callback, *args, **kwargs):
warnings.warn('self.run_on_every_day is deprecated. Please use self.run.on_every_day', DeprecationWarning)
return self.run.on_every_day(time, callback, *args, **kwargs)
def run_on_workdays(self, time: datetime.time, callback, *args, **kwargs):
warnings.warn('self.run_on_workdays is deprecated. Please use self.run.on_workdays', DeprecationWarning)
return self.run.on_workdays(time, callback, *args, **kwargs)
def run_on_weekends(self, time: datetime.time, callback, *args, **kwargs):
warnings.warn('self.run_on_weekends is deprecated. Please use self.run.on_weekends', DeprecationWarning)
return self.run.on_weekends(time, callback, *args, **kwargs)
def run_daily(self, callback, *args, **kwargs):
warnings.warn('self.run_hourly is deprecated. Please use self.run.every', DeprecationWarning)
start = datetime.timedelta(seconds=random.randint(0, 24 * 3600 - 1))
return self.run.every(start, datetime.timedelta(days=1), callback, *args, **kwargs)
def run_hourly(self, callback, *args, **kwargs) :
warnings.warn('self.run_hourly is deprecated. Please use self.run.every_hour', DeprecationWarning)
return self.run.every_hour(callback, *args, **kwargs)
def run_minutely(self, callback, *args, **kwargs):
warnings.warn('self.run_minutely is deprecated. Please use self.run.every_minute', DeprecationWarning)
return self.run.every_minute(callback, *args, **kwargs)
def run_at(self, date_time, callback, *args, **kwargs):
warnings.warn('self.run_at is deprecated. Please use self.run.at', DeprecationWarning)
return self.run.at(date_time, callback, *args, **kwargs)
def run_in(self, seconds: Union[int, datetime.timedelta], callback, *args, **kwargs):
warnings.warn('self.run_in is deprecated. Please use self.run.at', DeprecationWarning)
return self.run.at(seconds, callback, *args, **kwargs)
def run_soon(self, callback, *args, **kwargs):
warnings.warn('self.run_soon is deprecated. Please use self.run.soon', DeprecationWarning)
return self.run.soon(callback, *args, **kwargs)
# -----------------------------------------------------------------------------------------------------------------
# internal functions
# -----------------------------------------------------------------------------------------------------------------
def _get_cb_name(self, callback):
return f'{self.rule_name}.{callback.__name__}' if self.rule_name else None
def _add_event_listener(self, listener: HABApp.core.EventBusListener) -> HABApp.core.EventBusListener:
self.__event_listener.append(listener)
HABApp.core.EventBus.add_listener(listener)
return listener
@HABApp.core.wrapper.log_exception
def _check_rule(self):
# We need items if we want to run the test
if HABApp.core.Items.get_all_items():
# Check if we have a valid item for all listeners
for listener in self.__event_listener:
# Internal topics - don't warn there
if listener.topic in HABApp.core.const.topics.ALL:
continue
# check if specific item exists
if not HABApp.core.Items.item_exists(listener.topic):
log.warning(f'Item "{listener.topic}" does not exist (yet)! '
f'self.listen_event in "{self.rule_name}" may not work as intended.')
# enable the scheduler
self.run._scheduler.resume()
@HABApp.core.wrapper.log_exception
def _unload(self):
# unload all functions
for func in self.__unload_functions:
try:
func()
except Exception as e:
# try getting function name
try:
name = f' in "{func.__name__}"'
except AttributeError:
name = ''
log.error(f'Error{name} while unloading "{self.rule_name}": {e}')
# log traceback
lines = traceback.format_exc().splitlines()
del lines[1:3] # see implementation in wrappedfunction.py why we do this
for line in lines:
log.error(line)
@HABApp.core.wrapper.log_exception
def get_parent_rule() -> Rule:
depth = 1
while True:
try:
frm = sys._getframe(depth)
except ValueError:
raise RuntimeError('Could not find parent rule!') from None
__vars = frm.f_locals
depth += 1
if 'self' in __vars:
rule = __vars['self']
if isinstance(rule, Rule):
return rule
|
[
"warnings.simplefilter",
"random.randint",
"HABApp.core.EventBus.remove_listener",
"sys._getframe",
"weakref.WeakSet",
"HABApp.core.Items.item_exists",
"HABApp.core.EventBus.add_listener",
"datetime.timedelta",
"traceback.format_exc",
"HABApp.core.Items.get_all_items",
"warnings.warn",
"HABApp.core.wrapper.ExceptionToHABApp",
"HABApp.core.EventBusListener",
"HABApp.rule.scheduler.HABAppSchedulerView",
"logging.getLogger",
"re.compile"
] |
[((562, 594), 'logging.getLogger', 'logging.getLogger', (['"""HABApp.Rule"""'], {}), "('HABApp.Rule')\n", (579, 594), False, 'import logging\n'), ((840, 872), 'warnings.simplefilter', 'warnings.simplefilter', (['"""default"""'], {}), "('default')\n", (861, 872), False, 'import warnings\n'), ((2255, 2272), 'weakref.WeakSet', 'weakref.WeakSet', ([], {}), '()\n', (2270, 2272), False, 'import weakref\n'), ((2482, 2508), 'HABApp.rule.scheduler.HABAppSchedulerView', '_HABAppSchedulerView', (['self'], {}), '(self)\n', (2502, 2508), True, 'from HABApp.rule.scheduler import HABAppSchedulerView as _HABAppSchedulerView\n'), ((5915, 5958), 'HABApp.core.EventBus.add_listener', 'HABApp.core.EventBus.add_listener', (['listener'], {}), '(listener)\n', (5948, 5958), False, 'import HABApp\n'), ((10068, 10101), 'HABApp.core.Items.get_all_items', 'HABApp.core.Items.get_all_items', ([], {}), '()\n', (10099, 10101), False, 'import HABApp\n'), ((11239, 11335), 'warnings.warn', 'warnings.warn', (['"""self.run_every is deprecated. Please use self.run.every"""', 'DeprecationWarning'], {}), "('self.run_every is deprecated. Please use self.run.every',\n DeprecationWarning)\n", (11252, 11335), False, 'import warnings\n'), ((11483, 11616), 'warnings.warn', 'warnings.warn', (['"""self.run_on_sun is deprecated. Please use self.run.on_sunrise, self.run.on_sunset, ..."""', 'DeprecationWarning'], {}), "(\n 'self.run_on_sun is deprecated. Please use self.run.on_sunrise, self.run.on_sunset, ...'\n , DeprecationWarning)\n", (11496, 11616), False, 'import warnings\n'), ((11942, 12062), 'warnings.warn', 'warnings.warn', (['"""self.run_on_day_of_week is deprecated. Please use self.run.on_day_of_week"""', 'DeprecationWarning'], {}), "(\n 'self.run_on_day_of_week is deprecated. Please use self.run.on_day_of_week'\n , DeprecationWarning)\n", (11955, 12062), False, 'import warnings\n'), ((12224, 12339), 'warnings.warn', 'warnings.warn', (['"""self.run_on_every_day is deprecated. Please use self.run.on_every_day"""', 'DeprecationWarning'], {}), "(\n 'self.run_on_every_day is deprecated. Please use self.run.on_every_day',\n DeprecationWarning)\n", (12237, 12339), False, 'import warnings\n'), ((12489, 12602), 'warnings.warn', 'warnings.warn', (['"""self.run_on_workdays is deprecated. Please use self.run.on_workdays"""', 'DeprecationWarning'], {}), "(\n 'self.run_on_workdays is deprecated. Please use self.run.on_workdays',\n DeprecationWarning)\n", (12502, 12602), False, 'import warnings\n'), ((12751, 12864), 'warnings.warn', 'warnings.warn', (['"""self.run_on_weekends is deprecated. Please use self.run.on_weekends"""', 'DeprecationWarning'], {}), "(\n 'self.run_on_weekends is deprecated. Please use self.run.on_weekends',\n DeprecationWarning)\n", (12764, 12864), False, 'import warnings\n'), ((12986, 13083), 'warnings.warn', 'warnings.warn', (['"""self.run_hourly is deprecated. Please use self.run.every"""', 'DeprecationWarning'], {}), "('self.run_hourly is deprecated. Please use self.run.every',\n DeprecationWarning)\n", (12999, 13083), False, 'import warnings\n'), ((13312, 13414), 'warnings.warn', 'warnings.warn', (['"""self.run_hourly is deprecated. Please use self.run.every_hour"""', 'DeprecationWarning'], {}), "('self.run_hourly is deprecated. Please use self.run.every_hour',\n DeprecationWarning)\n", (13325, 13414), False, 'import warnings\n'), ((13537, 13648), 'warnings.warn', 'warnings.warn', (['"""self.run_minutely is deprecated. Please use self.run.every_minute"""', 'DeprecationWarning'], {}), "(\n 'self.run_minutely is deprecated. Please use self.run.every_minute',\n DeprecationWarning)\n", (13550, 13648), False, 'import warnings\n'), ((13773, 13863), 'warnings.warn', 'warnings.warn', (['"""self.run_at is deprecated. Please use self.run.at"""', 'DeprecationWarning'], {}), "('self.run_at is deprecated. Please use self.run.at',\n DeprecationWarning)\n", (13786, 13863), False, 'import warnings\n'), ((14024, 14114), 'warnings.warn', 'warnings.warn', (['"""self.run_in is deprecated. Please use self.run.at"""', 'DeprecationWarning'], {}), "('self.run_in is deprecated. Please use self.run.at',\n DeprecationWarning)\n", (14037, 14114), False, 'import warnings\n'), ((14234, 14328), 'warnings.warn', 'warnings.warn', (['"""self.run_soon is deprecated. Please use self.run.soon"""', 'DeprecationWarning'], {}), "('self.run_soon is deprecated. Please use self.run.soon',\n DeprecationWarning)\n", (14247, 14328), False, 'import warnings\n'), ((14931, 14974), 'HABApp.core.EventBus.add_listener', 'HABApp.core.EventBus.add_listener', (['listener'], {}), '(listener)\n', (14964, 14974), False, 'import HABApp\n'), ((15128, 15161), 'HABApp.core.Items.get_all_items', 'HABApp.core.Items.get_all_items', ([], {}), '()\n', (15159, 15161), False, 'import HABApp\n'), ((3648, 3694), 'HABApp.core.EventBus.remove_listener', 'HABApp.core.EventBus.remove_listener', (['listener'], {}), '(listener)\n', (3684, 3694), False, 'import HABApp\n'), ((5808, 5858), 'HABApp.core.EventBusListener', 'HABApp.core.EventBusListener', (['name', 'cb', 'event_type'], {}), '(name, cb, event_type)\n', (5836, 5858), False, 'import HABApp\n'), ((9133, 9164), 're.compile', 're.compile', (['name', 're.IGNORECASE'], {}), '(name, re.IGNORECASE)\n', (9143, 9164), False, 'import re\n'), ((9251, 9286), 're.compile', 're.compile', (['metadata', 're.IGNORECASE'], {}), '(metadata, re.IGNORECASE)\n', (9261, 9286), False, 'import re\n'), ((9391, 9432), 're.compile', 're.compile', (['metadata_value', 're.IGNORECASE'], {}), '(metadata_value, re.IGNORECASE)\n', (9401, 9432), False, 'import re\n'), ((13194, 13220), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (13212, 13220), False, 'import datetime\n'), ((16668, 16688), 'sys._getframe', 'sys._getframe', (['depth'], {}), '(depth)\n', (16681, 16688), False, 'import sys\n'), ((3043, 3102), 'HABApp.core.wrapper.ExceptionToHABApp', 'HABApp.core.wrapper.ExceptionToHABApp', (['log', 'logging.WARNING'], {}), '(log, logging.WARNING)\n', (3080, 3102), False, 'import HABApp\n'), ((13123, 13155), 'random.randint', 'random.randint', (['(0)', '(24 * 3600 - 1)'], {}), '(0, 24 * 3600 - 1)\n', (13137, 13155), False, 'import random\n'), ((1080, 1100), 'sys._getframe', 'sys._getframe', (['depth'], {}), '(depth)\n', (1093, 1100), False, 'import sys\n'), ((15499, 15544), 'HABApp.core.Items.item_exists', 'HABApp.core.Items.item_exists', (['listener.topic'], {}), '(listener.topic)\n', (15528, 15544), False, 'import HABApp\n'), ((16342, 16364), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (16362, 16364), False, 'import traceback\n')]
|
# Some tests for the Cylindrical coordinates handler
import numpy as np
from yt.testing import \
fake_amr_ds, \
assert_equal, \
assert_almost_equal
# Our canonical tests are that we can access all of our fields and we can
# compute our volume correctly.
def test_cylindrical_coordinates():
# We're going to load up a simple AMR grid and check its volume
# calculations and path length calculations.
ds = fake_amr_ds(geometry="cylindrical")
axes = ["r", "z", "theta"]
for i, axis in enumerate(axes):
dd = ds.all_data()
fi = ("index", axis)
fd = ("index", "d%s" % axis)
ma = np.argmax(dd[fi])
assert_equal(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d)
mi = np.argmin(dd[fi])
assert_equal(dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d)
assert_equal(dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d)
assert_almost_equal(dd["cell_volume"].sum(dtype="float64"),
np.pi*ds.domain_width[0]**2 * ds.domain_width[1])
assert_equal(dd["index", "path_element_r"], dd["index", "dr"])
assert_equal(dd["index", "path_element_z"], dd["index", "dz"])
assert_equal(dd["index", "path_element_theta"],
dd["index", "r"] * dd["index", "dtheta"])
|
[
"yt.testing.fake_amr_ds",
"yt.testing.assert_equal",
"numpy.argmin",
"numpy.argmax"
] |
[((432, 467), 'yt.testing.fake_amr_ds', 'fake_amr_ds', ([], {'geometry': '"""cylindrical"""'}), "(geometry='cylindrical')\n", (443, 467), False, 'from yt.testing import fake_amr_ds, assert_equal, assert_almost_equal\n'), ((1069, 1131), 'yt.testing.assert_equal', 'assert_equal', (["dd['index', 'path_element_r']", "dd['index', 'dr']"], {}), "(dd['index', 'path_element_r'], dd['index', 'dr'])\n", (1081, 1131), False, 'from yt.testing import fake_amr_ds, assert_equal, assert_almost_equal\n'), ((1136, 1198), 'yt.testing.assert_equal', 'assert_equal', (["dd['index', 'path_element_z']", "dd['index', 'dz']"], {}), "(dd['index', 'path_element_z'], dd['index', 'dz'])\n", (1148, 1198), False, 'from yt.testing import fake_amr_ds, assert_equal, assert_almost_equal\n'), ((1203, 1297), 'yt.testing.assert_equal', 'assert_equal', (["dd['index', 'path_element_theta']", "(dd['index', 'r'] * dd['index', 'dtheta'])"], {}), "(dd['index', 'path_element_theta'], dd['index', 'r'] * dd[\n 'index', 'dtheta'])\n", (1215, 1297), False, 'from yt.testing import fake_amr_ds, assert_equal, assert_almost_equal\n'), ((641, 658), 'numpy.argmax', 'np.argmax', (['dd[fi]'], {}), '(dd[fi])\n', (650, 658), True, 'import numpy as np\n'), ((667, 737), 'yt.testing.assert_equal', 'assert_equal', (['(dd[fi][ma] + dd[fd][ma] / 2.0)', 'ds.domain_right_edge[i].d'], {}), '(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d)\n', (679, 737), False, 'from yt.testing import fake_amr_ds, assert_equal, assert_almost_equal\n'), ((751, 768), 'numpy.argmin', 'np.argmin', (['dd[fi]'], {}), '(dd[fi])\n', (760, 768), True, 'import numpy as np\n'), ((777, 846), 'yt.testing.assert_equal', 'assert_equal', (['(dd[fi][mi] - dd[fd][mi] / 2.0)', 'ds.domain_left_edge[i].d'], {}), '(dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d)\n', (789, 846), False, 'from yt.testing import fake_amr_ds, assert_equal, assert_almost_equal\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 the HERA Project
# Licensed under the MIT License
import os
import argparse
import pyuvdata
from hera_cal.version import version_info
import pyuvdata.utils as uvutils
import numpy as np
parser = argparse.ArgumentParser(description='Extract HERA hex antennas from data '
'file, and save with new extension.')
parser.add_argument('--extension', type=str, help='Extension to be appended to '
'filename for output. Default="HH".', default='HH')
parser.add_argument('--filetype', type=str, help='Input and output file type. '
'Allowed values are "miriad" (default), and "uvfits".',
default='miriad')
parser.add_argument('--fixuvws', action='store_true', help='Optional flag to '
'use antenna positions to replace uvws.')
parser.add_argument('--overwrite', action='store_true', default=False,
help='Optional flag to overwrite output file if it exists.')
parser.add_argument('files', metavar='files', type=str, nargs='+',
help='Files to be processed.')
parser.add_argument('--ex_ants_file', type=str, help='Text file with list of antennas'
' which are excluded downstream in RTP. Generally, these are '
'antennas which are being actively commissioned, or known as bad.'
' Note these values are only stored in the history, not actually '
'flagged at this step.',
default=None)
args = parser.parse_args()
for filename in args.files:
uv = pyuvdata.UVData()
if args.filetype == 'miriad':
uv.read_miriad(filename)
elif args.filetype == 'uvfits':
uv.read_uvfits(filename)
else:
raise ValueError('Unrecognized file type ' + str(args.filetype))
st_type_str = uv.extra_keywords.pop('st_type').replace('\x00', '')
st_type_list = st_type_str[1:-1].split(', ')
ind = [i for i, x in enumerate(st_type_list) if x[:7] == 'herahex' or x == 'heraringa' or x == 'heraringb']
uv.select(antenna_nums=uv.antenna_numbers[ind])
st_type_list = list(np.array(st_type_list)[np.array(ind, dtype=int)])
uv.extra_keywords['st_type'] = '[' + ', '.join(st_type_list) + ']'
uv.history += ' Hera Hex antennas selected'
if args.fixuvws:
antpos = uv.antenna_positions + uv.telescope_location
antpos = uvutils.ENU_from_ECEF(antpos.T, *uv.telescope_location_lat_lon_alt).T
antmap = -np.ones(np.max(uv.antenna_numbers) + 1, dtype=int)
for i, ant in enumerate(uv.antenna_numbers):
antmap[ant] = i
uv.uvw_array = antpos[antmap[uv.ant_2_array], :] - antpos[antmap[uv.ant_1_array], :]
uv.history += ' and uvws corrected'
uv.history += ' with hera_cal/scripts/extract_hh.py, hera_cal version: ' +\
str(version_info) + '.'
if args.ex_ants_file:
ex_ants = np.loadtxt(args.ex_ants_file, dtype=int)
ex_ants = [str(ant) for ant in ex_ants if ant in uv.get_ants()]
uv.history += ' Antennas to exclude in RTP: ' + ','.join(ex_ants) + '.'
if args.filetype == 'miriad':
base, ext = os.path.splitext(filename)
uv.write_miriad(base + '.' + args.extension + ext, clobber=args.overwrite)
else:
base, ext = os.path.splitext(filename)
uv.write_uvfits(base + args.extension + ext, clobber=args.overwrite)
del(uv) # Reset for next loop
|
[
"pyuvdata.UVData",
"argparse.ArgumentParser",
"pyuvdata.utils.ENU_from_ECEF",
"numpy.max",
"numpy.array",
"os.path.splitext",
"numpy.loadtxt"
] |
[((261, 375), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract HERA hex antennas from data file, and save with new extension."""'}), "(description=\n 'Extract HERA hex antennas from data file, and save with new extension.')\n", (284, 375), False, 'import argparse\n'), ((1653, 1670), 'pyuvdata.UVData', 'pyuvdata.UVData', ([], {}), '()\n', (1668, 1670), False, 'import pyuvdata\n'), ((2990, 3030), 'numpy.loadtxt', 'np.loadtxt', (['args.ex_ants_file'], {'dtype': 'int'}), '(args.ex_ants_file, dtype=int)\n', (3000, 3030), True, 'import numpy as np\n'), ((3237, 3263), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (3253, 3263), False, 'import os\n'), ((3377, 3403), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (3393, 3403), False, 'import os\n'), ((2198, 2220), 'numpy.array', 'np.array', (['st_type_list'], {}), '(st_type_list)\n', (2206, 2220), True, 'import numpy as np\n'), ((2221, 2245), 'numpy.array', 'np.array', (['ind'], {'dtype': 'int'}), '(ind, dtype=int)\n', (2229, 2245), True, 'import numpy as np\n'), ((2467, 2534), 'pyuvdata.utils.ENU_from_ECEF', 'uvutils.ENU_from_ECEF', (['antpos.T', '*uv.telescope_location_lat_lon_alt'], {}), '(antpos.T, *uv.telescope_location_lat_lon_alt)\n', (2488, 2534), True, 'import pyuvdata.utils as uvutils\n'), ((2563, 2589), 'numpy.max', 'np.max', (['uv.antenna_numbers'], {}), '(uv.antenna_numbers)\n', (2569, 2589), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import sys
import os
import shutil
if sys.version_info[0] < 3:
print("Python 3 is required to run the script and not Python "+str(sys.version_info[0]))
sys.exit(1)
def fatal_error(message):
sys.stderr.write(message+"\n")
sys.stderr.write("\n\nBUILD FAILED!!! LOOK AT MESSAGES ABOVE\n\n")
sys.exit(1)
def run_command(command):
if os.system(command):
fatal_error("Failed to execute '"+command+"'")
def check_cd(path):
if os.chdir(path):
fatal_error("Failed to cd into '"+path+"'")
project_home = os.path.dirname(os.path.realpath(__file__))
if os.path.isdir("build_release"):
shutil.rmtree("build_release")
os.makedirs("build_release")
check_cd("build_release")
run_command("cmake -DCMAKE_BUILD_TYPE=Release ..")
run_command("make -j8")
check_cd(os.path.join(project_home,"jni"))
run_command("./gradlew -Pnative_build_location=../build_release install")
# Have a command line argument for sending it to maven central
#run_command("./gradlew -Pnative_build_location=../build_release uploadArchives")
|
[
"os.makedirs",
"os.path.isdir",
"os.path.realpath",
"os.system",
"shutil.rmtree",
"sys.stderr.write",
"os.path.join",
"os.chdir",
"sys.exit"
] |
[((615, 645), 'os.path.isdir', 'os.path.isdir', (['"""build_release"""'], {}), "('build_release')\n", (628, 645), False, 'import os\n'), ((682, 710), 'os.makedirs', 'os.makedirs', (['"""build_release"""'], {}), "('build_release')\n", (693, 710), False, 'import os\n'), ((185, 196), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (193, 196), False, 'import sys\n'), ((228, 260), 'sys.stderr.write', 'sys.stderr.write', (["(message + '\\n')"], {}), "(message + '\\n')\n", (244, 260), False, 'import sys\n'), ((263, 329), 'sys.stderr.write', 'sys.stderr.write', (['"""\n\nBUILD FAILED!!! LOOK AT MESSAGES ABOVE\n\n"""'], {}), '("""\n\nBUILD FAILED!!! LOOK AT MESSAGES ABOVE\n\n""")\n', (279, 329), False, 'import sys\n'), ((334, 345), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (342, 345), False, 'import sys\n'), ((380, 398), 'os.system', 'os.system', (['command'], {}), '(command)\n', (389, 398), False, 'import os\n'), ((483, 497), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (491, 497), False, 'import os\n'), ((583, 609), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (599, 609), False, 'import os\n'), ((651, 681), 'shutil.rmtree', 'shutil.rmtree', (['"""build_release"""'], {}), "('build_release')\n", (664, 681), False, 'import shutil\n'), ((821, 854), 'os.path.join', 'os.path.join', (['project_home', '"""jni"""'], {}), "(project_home, 'jni')\n", (833, 854), False, 'import os\n')]
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
Demo ward clustering on a graph: various ways of forming clusters and dendrogram
Requires matplotlib
"""
print(__doc__)
import numpy as np
from numpy.random import randn, rand
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nipy.algorithms.graph import knn
from nipy.algorithms.clustering.hierarchical_clustering import ward
# n = number of points, k = number of nearest neighbours
n = 100
k = 5
# Set verbose to True to see more printed output
verbose = False
X = randn(n, 2)
X[:np.ceil(n / 3)] += 3
G = knn(X, 5)
tree = ward(G, X, verbose)
threshold = .5 * n
u = tree.partition(threshold)
plt.figure(figsize=(12, 6))
plt.subplot(1, 3, 1)
for i in range(u.max()+1):
plt.plot(X[u == i, 0], X[u == i, 1], 'o', color=(rand(), rand(), rand()))
plt.axis('tight')
plt.axis('off')
plt.title('clustering into clusters \n of inertia < %g' % threshold)
u = tree.split(k)
plt.subplot(1, 3, 2)
for e in range(G.E):
plt.plot([X[G.edges[e, 0], 0], X[G.edges[e, 1], 0]],
[X[G.edges[e, 0], 1], X[G.edges[e, 1], 1]], 'k')
for i in range(u.max() + 1):
plt.plot(X[u == i, 0], X[u == i, 1], 'o', color=(rand(), rand(), rand()))
plt.axis('tight')
plt.axis('off')
plt.title('clustering into 5 clusters')
nl = np.sum(tree.isleaf())
validleaves = np.zeros(n)
validleaves[:np.ceil(n / 4)] = 1
valid = np.zeros(tree.V, 'bool')
valid[tree.isleaf()] = validleaves.astype('bool')
nv = np.sum(validleaves)
nv0 = 0
while nv > nv0:
nv0 = nv
for v in range(tree.V):
if valid[v]:
valid[tree.parents[v]]=1
nv = np.sum(valid)
ax = plt.subplot(1, 3, 3)
ax = tree.plot(ax)
ax.set_title('Dendrogram')
ax.set_visible(True)
plt.show()
if verbose:
print('List of sub trees')
print(tree.list_of_subtrees())
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.sum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"numpy.ceil",
"nipy.algorithms.clustering.hierarchical_clustering.ward",
"numpy.zeros",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"nipy.algorithms.graph.knn",
"numpy.random.rand"
] |
[((772, 783), 'numpy.random.randn', 'randn', (['n', '(2)'], {}), '(n, 2)\n', (777, 783), False, 'from numpy.random import randn, rand\n'), ((812, 821), 'nipy.algorithms.graph.knn', 'knn', (['X', '(5)'], {}), '(X, 5)\n', (815, 821), False, 'from nipy.algorithms.graph import knn\n'), ((829, 848), 'nipy.algorithms.clustering.hierarchical_clustering.ward', 'ward', (['G', 'X', 'verbose'], {}), '(G, X, verbose)\n', (833, 848), False, 'from nipy.algorithms.clustering.hierarchical_clustering import ward\n'), ((900, 927), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (910, 927), True, 'import matplotlib.pyplot as plt\n'), ((928, 948), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (939, 948), True, 'import matplotlib.pyplot as plt\n'), ((1055, 1072), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (1063, 1072), True, 'import matplotlib.pyplot as plt\n'), ((1073, 1088), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1081, 1088), True, 'import matplotlib.pyplot as plt\n'), ((1089, 1160), 'matplotlib.pyplot.title', 'plt.title', (['("""clustering into clusters \n of inertia < %g""" % threshold)'], {}), '("""clustering into clusters \n of inertia < %g""" % threshold)\n', (1098, 1160), True, 'import matplotlib.pyplot as plt\n'), ((1177, 1197), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (1188, 1197), True, 'import matplotlib.pyplot as plt\n'), ((1444, 1461), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (1452, 1461), True, 'import matplotlib.pyplot as plt\n'), ((1462, 1477), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1470, 1477), True, 'import matplotlib.pyplot as plt\n'), ((1478, 1517), 'matplotlib.pyplot.title', 'plt.title', (['"""clustering into 5 clusters"""'], {}), "('clustering into 5 clusters')\n", (1487, 1517), True, 'import matplotlib.pyplot as plt\n'), ((1560, 1571), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1568, 1571), True, 'import numpy as np\n'), ((1613, 1637), 'numpy.zeros', 'np.zeros', (['tree.V', '"""bool"""'], {}), "(tree.V, 'bool')\n", (1621, 1637), True, 'import numpy as np\n'), ((1693, 1712), 'numpy.sum', 'np.sum', (['validleaves'], {}), '(validleaves)\n', (1699, 1712), True, 'import numpy as np\n'), ((1869, 1889), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (1880, 1889), True, 'import matplotlib.pyplot as plt\n'), ((1957, 1967), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1965, 1967), True, 'import matplotlib.pyplot as plt\n'), ((1223, 1328), 'matplotlib.pyplot.plot', 'plt.plot', (['[X[G.edges[e, 0], 0], X[G.edges[e, 1], 0]]', '[X[G.edges[e, 0], 1], X[G.edges[e, 1], 1]]', '"""k"""'], {}), "([X[G.edges[e, 0], 0], X[G.edges[e, 1], 0]], [X[G.edges[e, 0], 1],\n X[G.edges[e, 1], 1]], 'k')\n", (1231, 1328), True, 'import matplotlib.pyplot as plt\n'), ((787, 801), 'numpy.ceil', 'np.ceil', (['(n / 3)'], {}), '(n / 3)\n', (794, 801), True, 'import numpy as np\n'), ((1585, 1599), 'numpy.ceil', 'np.ceil', (['(n / 4)'], {}), '(n / 4)\n', (1592, 1599), True, 'import numpy as np\n'), ((1849, 1862), 'numpy.sum', 'np.sum', (['valid'], {}), '(valid)\n', (1855, 1862), True, 'import numpy as np\n'), ((1029, 1035), 'numpy.random.rand', 'rand', ([], {}), '()\n', (1033, 1035), False, 'from numpy.random import randn, rand\n'), ((1037, 1043), 'numpy.random.rand', 'rand', ([], {}), '()\n', (1041, 1043), False, 'from numpy.random import randn, rand\n'), ((1045, 1051), 'numpy.random.rand', 'rand', ([], {}), '()\n', (1049, 1051), False, 'from numpy.random import randn, rand\n'), ((1419, 1425), 'numpy.random.rand', 'rand', ([], {}), '()\n', (1423, 1425), False, 'from numpy.random import randn, rand\n'), ((1427, 1433), 'numpy.random.rand', 'rand', ([], {}), '()\n', (1431, 1433), False, 'from numpy.random import randn, rand\n'), ((1435, 1441), 'numpy.random.rand', 'rand', ([], {}), '()\n', (1439, 1441), False, 'from numpy.random import randn, rand\n')]
|
import os
import sys
import yaml
import logging
from typing import Optional, Union, TypeVar
T = TypeVar("T")
def _parse_with_warning(var_name: str, default_value: T) -> Union[T, str]:
if var_name in os.environ:
return os.environ[var_name]
else:
logging.warning(f"{var_name} not found in environment variable, default value={default_value} will be used.")
return default_value
# load settings from yaml
SETTINGS: dict = {}
try:
with open("settings.yml") as f:
SETTINGS = yaml.safe_load(f)
except (yaml.YAMLError, OSError) as e:
logging.warning(e)
logging.warning("Default settings will be applied.")
# SCVMM connection login
CONN_LOGIN: Optional[str] = _parse_with_warning("CONN_LOGIN", None)
# SCVMM connection password
CONN_PASSWORD: Optional[str] = _parse_with_warning("CONN_PASSWORD", None)
# SCVMM connection host
if "CONN_HOST" not in os.environ:
logging.error("CONN_HOST not specified, exiting...")
sys.exit(1)
CONN_HOST: Optional[str] = os.environ["CONN_HOST"]
|
[
"logging.error",
"logging.warning",
"yaml.safe_load",
"typing.TypeVar",
"sys.exit"
] |
[((97, 109), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (104, 109), False, 'from typing import Optional, Union, TypeVar\n'), ((916, 968), 'logging.error', 'logging.error', (['"""CONN_HOST not specified, exiting..."""'], {}), "('CONN_HOST not specified, exiting...')\n", (929, 968), False, 'import logging\n'), ((973, 984), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (981, 984), False, 'import sys\n'), ((272, 391), 'logging.warning', 'logging.warning', (['f"""{var_name} not found in environment variable, default value={default_value} will be used."""'], {}), "(\n f'{var_name} not found in environment variable, default value={default_value} will be used.'\n )\n", (287, 391), False, 'import logging\n'), ((519, 536), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (533, 536), False, 'import yaml\n'), ((580, 598), 'logging.warning', 'logging.warning', (['e'], {}), '(e)\n', (595, 598), False, 'import logging\n'), ((603, 655), 'logging.warning', 'logging.warning', (['"""Default settings will be applied."""'], {}), "('Default settings will be applied.')\n", (618, 655), False, 'import logging\n')]
|
# MIT License
#
# Copyright (c) 2018 <NAME>, <EMAIL>, <NAME> <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from eosetl.mappers.action_mapper import EosActionMapper
from eosetl.mappers.block_mapper import EosBlockMapper
from eosetl.mappers.transaction_mapper import EosTransactionMapper
class EosService(object):
def __init__(self, eos_rpc):
self.eos_rpc = eos_rpc
self.block_mapper = EosBlockMapper()
self.transaction_mapper = EosTransactionMapper()
self.action_mapper = EosActionMapper()
def get_block(self, block_number):
return self.eos_rpc.getblock(block_number)
def get_genesis_block(self):
return self.get_block(1)
def get_latest_block(self):
last_irreversible_block_id = self.eos_rpc.get_info()["last_irreversible_block_id"]
return self.get_block(last_irreversible_block_id)
def get_blocks(self, block_number_batch):
if not block_number_batch:
return []
return [self.get_block(x) for x in block_number_batch]
|
[
"eosetl.mappers.action_mapper.EosActionMapper",
"eosetl.mappers.transaction_mapper.EosTransactionMapper",
"eosetl.mappers.block_mapper.EosBlockMapper"
] |
[((1426, 1442), 'eosetl.mappers.block_mapper.EosBlockMapper', 'EosBlockMapper', ([], {}), '()\n', (1440, 1442), False, 'from eosetl.mappers.block_mapper import EosBlockMapper\n'), ((1477, 1499), 'eosetl.mappers.transaction_mapper.EosTransactionMapper', 'EosTransactionMapper', ([], {}), '()\n', (1497, 1499), False, 'from eosetl.mappers.transaction_mapper import EosTransactionMapper\n'), ((1529, 1546), 'eosetl.mappers.action_mapper.EosActionMapper', 'EosActionMapper', ([], {}), '()\n', (1544, 1546), False, 'from eosetl.mappers.action_mapper import EosActionMapper\n')]
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Tests for the ConsolidateBlocks transpiler pass.
"""
import unittest
import numpy as np
from qiskit.circuit import QuantumCircuit, QuantumRegister
from qiskit.circuit.library import U2Gate
from qiskit.extensions import UnitaryGate
from qiskit.converters import circuit_to_dag
from qiskit.transpiler.passes import ConsolidateBlocks
from qiskit.quantum_info.operators import Operator
from qiskit.quantum_info.operators.measures import process_fidelity
from qiskit.test import QiskitTestCase
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Collect2qBlocks
class TestConsolidateBlocks(QiskitTestCase):
"""
Tests to verify that consolidating blocks of gates into unitaries
works correctly.
"""
def test_consolidate_small_block(self):
"""test a small block of gates can be turned into a unitary on same wires"""
qr = QuantumRegister(2, "qr")
qc = QuantumCircuit(qr)
qc.p(0.5, qr[0])
qc.u(1.5708, 0.2, 0.6, qr[1])
qc.cx(qr[0], qr[1])
dag = circuit_to_dag(qc)
pass_ = ConsolidateBlocks(force_consolidate=True)
pass_.property_set['block_list'] = [list(dag.topological_op_nodes())]
new_dag = pass_.run(dag)
unitary = Operator(qc)
self.assertEqual(len(new_dag.op_nodes()), 1)
fidelity = process_fidelity(Operator(new_dag.op_nodes()[0].op), unitary)
self.assertAlmostEqual(fidelity, 1.0, places=7)
def test_wire_order(self):
"""order of qubits and the corresponding unitary is correct"""
qr = QuantumRegister(2, "qr")
qc = QuantumCircuit(qr)
qc.cx(qr[1], qr[0])
dag = circuit_to_dag(qc)
pass_ = ConsolidateBlocks(force_consolidate=True)
pass_.property_set['block_list'] = [dag.op_nodes()]
new_dag = pass_.run(dag)
new_node = new_dag.op_nodes()[0]
self.assertEqual(new_node.qargs, [qr[0], qr[1]])
unitary = Operator(qc)
fidelity = process_fidelity(Operator(new_node.op), unitary)
self.assertAlmostEqual(fidelity, 1.0, places=7)
def test_topological_order_preserved(self):
"""the original topological order of nodes is preserved
______
q0:--[p]-------.---- q0:-------------| |--
| ______ | U2 |
q1:--[u2]--(+)-(+)-- = q1:---| |--|______|--
| | U1 |
q2:---------.------- q2:---|______|------------
"""
qr = QuantumRegister(3, "qr")
qc = QuantumCircuit(qr)
qc.p(0.5, qr[0])
qc.u(1.5708, 0.2, 0.6, qr[1])
qc.cx(qr[2], qr[1])
qc.cx(qr[0], qr[1])
dag = circuit_to_dag(qc)
pass_ = ConsolidateBlocks(force_consolidate=True)
topo_ops = list(dag.topological_op_nodes())
block_1 = [topo_ops[1], topo_ops[2]]
block_2 = [topo_ops[0], topo_ops[3]]
pass_.property_set['block_list'] = [block_1, block_2]
new_dag = pass_.run(dag)
new_topo_ops = [i for i in new_dag.topological_op_nodes() if i.type == 'op']
self.assertEqual(len(new_topo_ops), 2)
self.assertEqual(new_topo_ops[0].qargs, [qr[1], qr[2]])
self.assertEqual(new_topo_ops[1].qargs, [qr[0], qr[1]])
def test_3q_blocks(self):
"""blocks of more than 2 qubits work."""
qr = QuantumRegister(3, "qr")
qc = QuantumCircuit(qr)
qc.p(0.5, qr[0])
qc.u(1.5708, 0.2, 0.6, qr[1])
qc.cx(qr[2], qr[1])
qc.cx(qr[0], qr[1])
dag = circuit_to_dag(qc)
pass_ = ConsolidateBlocks(force_consolidate=True)
pass_.property_set['block_list'] = [list(dag.topological_op_nodes())]
new_dag = pass_.run(dag)
unitary = Operator(qc)
self.assertEqual(len(new_dag.op_nodes()), 1)
fidelity = process_fidelity(Operator(new_dag.op_nodes()[0].op), unitary)
self.assertAlmostEqual(fidelity, 1.0, places=7)
def test_block_spanning_two_regs(self):
"""blocks spanning wires on different quantum registers work."""
qr0 = QuantumRegister(1, "qr0")
qr1 = QuantumRegister(1, "qr1")
qc = QuantumCircuit(qr0, qr1)
qc.p(0.5, qr0[0])
qc.u(1.5708, 0.2, 0.6, qr1[0])
qc.cx(qr0[0], qr1[0])
dag = circuit_to_dag(qc)
pass_ = ConsolidateBlocks(force_consolidate=True)
pass_.property_set['block_list'] = [list(dag.topological_op_nodes())]
new_dag = pass_.run(dag)
unitary = Operator(qc)
self.assertEqual(len(new_dag.op_nodes()), 1)
fidelity = process_fidelity(Operator(new_dag.op_nodes()[0].op), unitary)
self.assertAlmostEqual(fidelity, 1.0, places=7)
def test_block_spanning_two_regs_different_index(self):
"""blocks spanning wires on different quantum registers work when the wires
could have conflicting indices. This was raised in #2806 when a CX was applied
across multiple registers and their indices collided, raising an error."""
qr0 = QuantumRegister(1, "qr0")
qr1 = QuantumRegister(2, "qr1")
qc = QuantumCircuit(qr0, qr1)
qc.cx(qr0[0], qr1[1])
dag = circuit_to_dag(qc)
pass_ = ConsolidateBlocks(force_consolidate=True)
pass_.property_set['block_list'] = [list(dag.topological_op_nodes())]
new_dag = pass_.run(dag)
original_unitary = UnitaryGate(Operator(qc))
from qiskit.converters import dag_to_circuit
new_unitary = UnitaryGate(Operator(dag_to_circuit(new_dag)))
self.assertEqual(original_unitary, new_unitary)
def test_node_added_before_block(self):
"""Test that a node before a block remains before the block
This issue was raised in #2737 where the measure was moved
to be after the 2nd ID gate, as the block was added when the
first node in the block was seen.
blocks = [['id', 'cx', 'id']]
┌────┐┌───┐
q_0: |0>┤ Id ├┤ X ├──────
└┬─┬─┘└─┬─┘┌────┐
q_1: |0>─┤M├────■──┤ Id ├
└╥┘ └────┘
c_0: 0 ══╩══════════════
"""
qc = QuantumCircuit(2, 1)
qc.i(0)
qc.measure(1, 0)
qc.cx(1, 0)
qc.i(1)
# can't just add all the nodes to one block as in other tests
# as we are trying to test the block gets added in the correct place
# so use a pass to collect the blocks instead
pass_manager = PassManager()
pass_manager.append(Collect2qBlocks())
pass_manager.append(ConsolidateBlocks())
qc1 = pass_manager.run(qc)
self.assertEqual(qc, qc1)
def test_consolidate_blocks_big(self):
"""Test ConsolidateBlocks with U2(<big numbers>)
https://github.com/Qiskit/qiskit-terra/issues/3637#issuecomment-612954865
┌────────────────┐ ┌───┐
q_0: ┤ U2(-804.15,pi) ├──■──┤ X ├
├────────────────┤┌─┴─┐└─┬─┘
q_1: ┤ U2(-6433.2,pi) ├┤ X ├──■──
└────────────────┘└───┘
"""
circuit = QuantumCircuit(2)
circuit.append(U2Gate(-804.15, np.pi), [0])
circuit.append(U2Gate(-6433.2, np.pi), [1])
circuit.cx(0, 1)
circuit.cx(1, 0)
pass_manager = PassManager()
pass_manager.append(Collect2qBlocks())
pass_manager.append(ConsolidateBlocks())
result = pass_manager.run(circuit)
self.assertEqual(circuit, result)
def test_node_added_after_block(self):
"""Test that a node after the block remains after the block
This example was raised in #2764, and checks that the final CX
stays after the main block, even though one of the nodes in the
block was declared after it. This occurred when the block was
added when the last node in the block was seen.
blocks = [['cx', 'id', 'id']]
q_0: |0>─────────────■──
┌────┐┌─┴─┐
q_1: |0>──■──┤ Id ├┤ X ├
┌─┴─┐├────┤└───┘
q_2: |0>┤ X ├┤ Id ├─────
└───┘└────┘
"""
qc = QuantumCircuit(3)
qc.cx(1, 2)
qc.i(1)
qc.cx(0, 1)
qc.i(2)
pass_manager = PassManager()
pass_manager.append(Collect2qBlocks())
pass_manager.append(ConsolidateBlocks())
qc1 = pass_manager.run(qc)
self.assertEqual(qc, qc1)
def test_node_middle_of_blocks(self):
"""Test that a node surrounded by blocks stays in the same place
This is a larger test to ensure multiple blocks can all be collected
and added back in the correct order.
blocks = [['cx', 'id'], ['cx', 'id'], ['id', 'cx'], ['id', 'cx']]
q_0: |0>──■───────────────────■──
┌─┴─┐┌────┐ ┌────┐┌─┴─┐
q_1: |0>┤ X ├┤ Id ├─X─┤ Id ├┤ X ├
├───┤├────┤ │ ├────┤├───┤
q_2: |0>┤ X ├┤ Id ├─X─┤ Id ├┤ X ├
└─┬─┘└────┘ └────┘└─┬─┘
q_3: |0>──■───────────────────■──
"""
qc = QuantumCircuit(4)
qc.cx(0, 1)
qc.cx(3, 2)
qc.i(1)
qc.i(2)
qc.swap(1, 2)
qc.i(1)
qc.i(2)
qc.cx(0, 1)
qc.cx(3, 2)
pass_manager = PassManager()
pass_manager.append(Collect2qBlocks())
pass_manager.append(ConsolidateBlocks())
qc1 = pass_manager.run(qc)
self.assertEqual(qc, qc1)
def test_classical_conditions_maintained(self):
"""Test that consolidate blocks doesn't drop the classical conditions
This issue was raised in #2752
"""
qc = QuantumCircuit(1, 1)
qc.h(0).c_if(qc.cregs[0], 1)
qc.measure(0, 0)
pass_manager = PassManager()
pass_manager.append(Collect2qBlocks())
pass_manager.append(ConsolidateBlocks())
qc1 = pass_manager.run(qc)
self.assertEqual(qc, qc1)
def test_no_kak_in_basis(self):
"""Test that pass just returns the input dag without a KAK gate."""
qc = QuantumCircuit(1)
qc.h(0)
dag = circuit_to_dag(qc)
consolidate_blocks_pass = ConsolidateBlocks(basis_gates=['u3'])
res = consolidate_blocks_pass.run(dag)
self.assertEqual(res, dag)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"qiskit.transpiler.passes.Collect2qBlocks",
"qiskit.transpiler.passes.ConsolidateBlocks",
"qiskit.circuit.library.U2Gate",
"qiskit.converters.dag_to_circuit",
"qiskit.circuit.QuantumRegister",
"qiskit.circuit.QuantumCircuit",
"qiskit.quantum_info.operators.Operator",
"qiskit.converters.circuit_to_dag",
"qiskit.transpiler.PassManager"
] |
[((10883, 10898), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10896, 10898), False, 'import unittest\n'), ((1371, 1395), 'qiskit.circuit.QuantumRegister', 'QuantumRegister', (['(2)', '"""qr"""'], {}), "(2, 'qr')\n", (1386, 1395), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((1409, 1427), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['qr'], {}), '(qr)\n', (1423, 1427), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((1533, 1551), 'qiskit.converters.circuit_to_dag', 'circuit_to_dag', (['qc'], {}), '(qc)\n', (1547, 1551), False, 'from qiskit.converters import circuit_to_dag\n'), ((1569, 1610), 'qiskit.transpiler.passes.ConsolidateBlocks', 'ConsolidateBlocks', ([], {'force_consolidate': '(True)'}), '(force_consolidate=True)\n', (1586, 1610), False, 'from qiskit.transpiler.passes import ConsolidateBlocks\n'), ((1741, 1753), 'qiskit.quantum_info.operators.Operator', 'Operator', (['qc'], {}), '(qc)\n', (1749, 1753), False, 'from qiskit.quantum_info.operators import Operator\n'), ((2060, 2084), 'qiskit.circuit.QuantumRegister', 'QuantumRegister', (['(2)', '"""qr"""'], {}), "(2, 'qr')\n", (2075, 2084), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((2098, 2116), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['qr'], {}), '(qr)\n', (2112, 2116), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((2159, 2177), 'qiskit.converters.circuit_to_dag', 'circuit_to_dag', (['qc'], {}), '(qc)\n', (2173, 2177), False, 'from qiskit.converters import circuit_to_dag\n'), ((2195, 2236), 'qiskit.transpiler.passes.ConsolidateBlocks', 'ConsolidateBlocks', ([], {'force_consolidate': '(True)'}), '(force_consolidate=True)\n', (2212, 2236), False, 'from qiskit.transpiler.passes import ConsolidateBlocks\n'), ((2447, 2459), 'qiskit.quantum_info.operators.Operator', 'Operator', (['qc'], {}), '(qc)\n', (2455, 2459), False, 'from qiskit.quantum_info.operators import Operator\n'), ((3077, 3101), 'qiskit.circuit.QuantumRegister', 'QuantumRegister', (['(3)', '"""qr"""'], {}), "(3, 'qr')\n", (3092, 3101), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((3115, 3133), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['qr'], {}), '(qr)\n', (3129, 3133), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((3267, 3285), 'qiskit.converters.circuit_to_dag', 'circuit_to_dag', (['qc'], {}), '(qc)\n', (3281, 3285), False, 'from qiskit.converters import circuit_to_dag\n'), ((3303, 3344), 'qiskit.transpiler.passes.ConsolidateBlocks', 'ConsolidateBlocks', ([], {'force_consolidate': '(True)'}), '(force_consolidate=True)\n', (3320, 3344), False, 'from qiskit.transpiler.passes import ConsolidateBlocks\n'), ((3936, 3960), 'qiskit.circuit.QuantumRegister', 'QuantumRegister', (['(3)', '"""qr"""'], {}), "(3, 'qr')\n", (3951, 3960), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((3974, 3992), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['qr'], {}), '(qr)\n', (3988, 3992), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((4126, 4144), 'qiskit.converters.circuit_to_dag', 'circuit_to_dag', (['qc'], {}), '(qc)\n', (4140, 4144), False, 'from qiskit.converters import circuit_to_dag\n'), ((4162, 4203), 'qiskit.transpiler.passes.ConsolidateBlocks', 'ConsolidateBlocks', ([], {'force_consolidate': '(True)'}), '(force_consolidate=True)\n', (4179, 4203), False, 'from qiskit.transpiler.passes import ConsolidateBlocks\n'), ((4334, 4346), 'qiskit.quantum_info.operators.Operator', 'Operator', (['qc'], {}), '(qc)\n', (4342, 4346), False, 'from qiskit.quantum_info.operators import Operator\n'), ((4669, 4694), 'qiskit.circuit.QuantumRegister', 'QuantumRegister', (['(1)', '"""qr0"""'], {}), "(1, 'qr0')\n", (4684, 4694), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((4709, 4734), 'qiskit.circuit.QuantumRegister', 'QuantumRegister', (['(1)', '"""qr1"""'], {}), "(1, 'qr1')\n", (4724, 4734), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((4748, 4772), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['qr0', 'qr1'], {}), '(qr0, qr1)\n', (4762, 4772), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((4882, 4900), 'qiskit.converters.circuit_to_dag', 'circuit_to_dag', (['qc'], {}), '(qc)\n', (4896, 4900), False, 'from qiskit.converters import circuit_to_dag\n'), ((4918, 4959), 'qiskit.transpiler.passes.ConsolidateBlocks', 'ConsolidateBlocks', ([], {'force_consolidate': '(True)'}), '(force_consolidate=True)\n', (4935, 4959), False, 'from qiskit.transpiler.passes import ConsolidateBlocks\n'), ((5090, 5102), 'qiskit.quantum_info.operators.Operator', 'Operator', (['qc'], {}), '(qc)\n', (5098, 5102), False, 'from qiskit.quantum_info.operators import Operator\n'), ((5622, 5647), 'qiskit.circuit.QuantumRegister', 'QuantumRegister', (['(1)', '"""qr0"""'], {}), "(1, 'qr0')\n", (5637, 5647), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((5662, 5687), 'qiskit.circuit.QuantumRegister', 'QuantumRegister', (['(2)', '"""qr1"""'], {}), "(2, 'qr1')\n", (5677, 5687), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((5701, 5725), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['qr0', 'qr1'], {}), '(qr0, qr1)\n', (5715, 5725), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((5770, 5788), 'qiskit.converters.circuit_to_dag', 'circuit_to_dag', (['qc'], {}), '(qc)\n', (5784, 5788), False, 'from qiskit.converters import circuit_to_dag\n'), ((5806, 5847), 'qiskit.transpiler.passes.ConsolidateBlocks', 'ConsolidateBlocks', ([], {'force_consolidate': '(True)'}), '(force_consolidate=True)\n', (5823, 5847), False, 'from qiskit.transpiler.passes import ConsolidateBlocks\n'), ((6748, 6768), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['(2)', '(1)'], {}), '(2, 1)\n', (6762, 6768), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((7071, 7084), 'qiskit.transpiler.PassManager', 'PassManager', ([], {}), '()\n', (7082, 7084), False, 'from qiskit.transpiler import PassManager\n'), ((7670, 7687), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['(2)'], {}), '(2)\n', (7684, 7687), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((7866, 7879), 'qiskit.transpiler.PassManager', 'PassManager', ([], {}), '()\n', (7877, 7879), False, 'from qiskit.transpiler import PassManager\n'), ((8702, 8719), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['(3)'], {}), '(3)\n', (8716, 8719), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((8816, 8829), 'qiskit.transpiler.PassManager', 'PassManager', ([], {}), '()\n', (8827, 8829), False, 'from qiskit.transpiler import PassManager\n'), ((9631, 9648), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['(4)'], {}), '(4)\n', (9645, 9648), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((9841, 9854), 'qiskit.transpiler.PassManager', 'PassManager', ([], {}), '()\n', (9852, 9854), False, 'from qiskit.transpiler import PassManager\n'), ((10216, 10236), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['(1)', '(1)'], {}), '(1, 1)\n', (10230, 10236), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((10323, 10336), 'qiskit.transpiler.PassManager', 'PassManager', ([], {}), '()\n', (10334, 10336), False, 'from qiskit.transpiler import PassManager\n'), ((10629, 10646), 'qiskit.circuit.QuantumCircuit', 'QuantumCircuit', (['(1)'], {}), '(1)\n', (10643, 10646), False, 'from qiskit.circuit import QuantumCircuit, QuantumRegister\n'), ((10677, 10695), 'qiskit.converters.circuit_to_dag', 'circuit_to_dag', (['qc'], {}), '(qc)\n', (10691, 10695), False, 'from qiskit.converters import circuit_to_dag\n'), ((10730, 10767), 'qiskit.transpiler.passes.ConsolidateBlocks', 'ConsolidateBlocks', ([], {'basis_gates': "['u3']"}), "(basis_gates=['u3'])\n", (10747, 10767), False, 'from qiskit.transpiler.passes import ConsolidateBlocks\n'), ((2496, 2517), 'qiskit.quantum_info.operators.Operator', 'Operator', (['new_node.op'], {}), '(new_node.op)\n', (2504, 2517), False, 'from qiskit.quantum_info.operators import Operator\n'), ((5999, 6011), 'qiskit.quantum_info.operators.Operator', 'Operator', (['qc'], {}), '(qc)\n', (6007, 6011), False, 'from qiskit.quantum_info.operators import Operator\n'), ((7113, 7130), 'qiskit.transpiler.passes.Collect2qBlocks', 'Collect2qBlocks', ([], {}), '()\n', (7128, 7130), False, 'from qiskit.transpiler.passes import Collect2qBlocks\n'), ((7160, 7179), 'qiskit.transpiler.passes.ConsolidateBlocks', 'ConsolidateBlocks', ([], {}), '()\n', (7177, 7179), False, 'from qiskit.transpiler.passes import ConsolidateBlocks\n'), ((7711, 7733), 'qiskit.circuit.library.U2Gate', 'U2Gate', (['(-804.15)', 'np.pi'], {}), '(-804.15, np.pi)\n', (7717, 7733), False, 'from qiskit.circuit.library import U2Gate\n'), ((7763, 7785), 'qiskit.circuit.library.U2Gate', 'U2Gate', (['(-6433.2)', 'np.pi'], {}), '(-6433.2, np.pi)\n', (7769, 7785), False, 'from qiskit.circuit.library import U2Gate\n'), ((7908, 7925), 'qiskit.transpiler.passes.Collect2qBlocks', 'Collect2qBlocks', ([], {}), '()\n', (7923, 7925), False, 'from qiskit.transpiler.passes import Collect2qBlocks\n'), ((7955, 7974), 'qiskit.transpiler.passes.ConsolidateBlocks', 'ConsolidateBlocks', ([], {}), '()\n', (7972, 7974), False, 'from qiskit.transpiler.passes import ConsolidateBlocks\n'), ((8858, 8875), 'qiskit.transpiler.passes.Collect2qBlocks', 'Collect2qBlocks', ([], {}), '()\n', (8873, 8875), False, 'from qiskit.transpiler.passes import Collect2qBlocks\n'), ((8905, 8924), 'qiskit.transpiler.passes.ConsolidateBlocks', 'ConsolidateBlocks', ([], {}), '()\n', (8922, 8924), False, 'from qiskit.transpiler.passes import ConsolidateBlocks\n'), ((9883, 9900), 'qiskit.transpiler.passes.Collect2qBlocks', 'Collect2qBlocks', ([], {}), '()\n', (9898, 9900), False, 'from qiskit.transpiler.passes import Collect2qBlocks\n'), ((9930, 9949), 'qiskit.transpiler.passes.ConsolidateBlocks', 'ConsolidateBlocks', ([], {}), '()\n', (9947, 9949), False, 'from qiskit.transpiler.passes import ConsolidateBlocks\n'), ((10365, 10382), 'qiskit.transpiler.passes.Collect2qBlocks', 'Collect2qBlocks', ([], {}), '()\n', (10380, 10382), False, 'from qiskit.transpiler.passes import Collect2qBlocks\n'), ((10412, 10431), 'qiskit.transpiler.passes.ConsolidateBlocks', 'ConsolidateBlocks', ([], {}), '()\n', (10429, 10431), False, 'from qiskit.transpiler.passes import ConsolidateBlocks\n'), ((6110, 6133), 'qiskit.converters.dag_to_circuit', 'dag_to_circuit', (['new_dag'], {}), '(new_dag)\n', (6124, 6133), False, 'from qiskit.converters import dag_to_circuit\n')]
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
@app.route('/home/')
def home():
# Connecting to a template (html file)
return render_template('home.html')
@app.route('/puppy/<name>')
def pup_name(name):
return render_template('puppy.html',name=name)
@app.route('/about/')
def about():
return render_template('about.html')
if __name__ == '__main__':
app.run(debug=True)
|
[
"flask.Flask",
"flask.render_template"
] |
[((48, 63), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (53, 63), False, 'from flask import Flask, render_template\n'), ((166, 194), 'flask.render_template', 'render_template', (['"""home.html"""'], {}), "('home.html')\n", (181, 194), False, 'from flask import Flask, render_template\n'), ((252, 292), 'flask.render_template', 'render_template', (['"""puppy.html"""'], {'name': 'name'}), "('puppy.html', name=name)\n", (267, 292), False, 'from flask import Flask, render_template\n'), ((338, 367), 'flask.render_template', 'render_template', (['"""about.html"""'], {}), "('about.html')\n", (353, 367), False, 'from flask import Flask, render_template\n')]
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for convert_video_to_dataset."""
from absl.testing import absltest
import tensorflow as tf
from uflow.data import generic_flow_dataset
from uflow.misc import convert_video_to_dataset
class ConvertVideoToDatasetTest(absltest.TestCase):
def test_video_parsing(self):
"""Test that we can convert a video to a dataset and load it correctly."""
filepath = 'uflow/files/billiard_clip.mp4'
output_dir = '/tmp/dataset'
convert_video_to_dataset.convert_video(
video_file_path=filepath,
output_folder=output_dir)
dataset = generic_flow_dataset.make_dataset(path=output_dir, mode='test')
data_iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
count = 0
for element in data_iterator:
image1, image2 = element
count += 1
self.assertEqual(image1.shape[0], image2.shape[0])
self.assertEqual(image1.shape[1], image2.shape[1])
self.assertEqual(image1.shape[2], 3)
self.assertEqual(count, 299)
if __name__ == '__main__':
absltest.main()
|
[
"absl.testing.absltest.main",
"uflow.data.generic_flow_dataset.make_dataset",
"uflow.misc.convert_video_to_dataset.convert_video",
"tensorflow.compat.v1.data.make_one_shot_iterator"
] |
[((1623, 1638), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (1636, 1638), False, 'from absl.testing import absltest\n'), ((1051, 1145), 'uflow.misc.convert_video_to_dataset.convert_video', 'convert_video_to_dataset.convert_video', ([], {'video_file_path': 'filepath', 'output_folder': 'output_dir'}), '(video_file_path=filepath,\n output_folder=output_dir)\n', (1089, 1145), False, 'from uflow.misc import convert_video_to_dataset\n'), ((1173, 1236), 'uflow.data.generic_flow_dataset.make_dataset', 'generic_flow_dataset.make_dataset', ([], {'path': 'output_dir', 'mode': '"""test"""'}), "(path=output_dir, mode='test')\n", (1206, 1236), False, 'from uflow.data import generic_flow_dataset\n'), ((1257, 1306), 'tensorflow.compat.v1.data.make_one_shot_iterator', 'tf.compat.v1.data.make_one_shot_iterator', (['dataset'], {}), '(dataset)\n', (1297, 1306), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tests for the QueryBuilder."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import unittest
import warnings
from six.moves import range, zip
from aiida import orm
from aiida.backends import settings
from aiida.backends.testbase import AiidaTestCase
from aiida.common.links import LinkType
# pylint: disable=invalid-name,missing-docstring,too-many-lines
class TestQueryBuilder(AiidaTestCase):
def setUp(self):
super(TestQueryBuilder, self).setUp()
self.clean_db()
self.insert_data()
def test_ormclass_type_classification(self):
"""
This tests the classifications of the QueryBuilder
"""
# pylint: disable=protected-access
from aiida.common.exceptions import DbContentError
qb = orm.QueryBuilder()
# Asserting that improper declarations of the class type raise an error
with self.assertRaises(DbContentError):
qb._get_ormclass(None, 'data')
with self.assertRaises(DbContentError):
qb._get_ormclass(None, 'data.Data')
with self.assertRaises(DbContentError):
qb._get_ormclass(None, '.')
# Asserting that the query type string and plugin type string are returned:
for _cls, classifiers in (
qb._get_ormclass(orm.StructureData, None),
qb._get_ormclass(None, 'data.structure.StructureData.'),
):
self.assertEqual(classifiers['ormclass_type_string'], orm.StructureData._plugin_type_string) # pylint: disable=no-member
for _cls, classifiers in (
qb._get_ormclass(orm.Group, None),
qb._get_ormclass(None, 'group'),
qb._get_ormclass(None, 'Group'),
):
self.assertEqual(classifiers['ormclass_type_string'], 'group')
for _cls, classifiers in (
qb._get_ormclass(orm.User, None),
qb._get_ormclass(None, "user"),
qb._get_ormclass(None, "User"),
):
self.assertEqual(classifiers['ormclass_type_string'], 'user')
for _cls, classifiers in (
qb._get_ormclass(orm.Computer, None),
qb._get_ormclass(None, 'computer'),
qb._get_ormclass(None, 'Computer'),
):
self.assertEqual(classifiers['ormclass_type_string'], 'computer')
for _cls, classifiers in (
qb._get_ormclass(orm.Data, None),
qb._get_ormclass(None, 'data.Data.'),
):
self.assertEqual(classifiers['ormclass_type_string'], orm.Data._plugin_type_string) # pylint: disable=no-member
def test_process_type_classification(self):
"""
This tests the classifications of the QueryBuilder
"""
from aiida.engine import WorkChain
from aiida.plugins import CalculationFactory
ArithmeticAdd = CalculationFactory('arithmetic.add')
qb = orm.QueryBuilder()
# pylint: disable=protected-access
# When passing a WorkChain class, it should return the type of the corresponding Node
# including the appropriate filter on the process_type
_cls, classifiers = qb._get_ormclass(WorkChain, None)
self.assertEqual(classifiers['ormclass_type_string'], 'process.workflow.workchain.WorkChainNode.')
self.assertEqual(classifiers['process_type_string'], 'aiida.engine.processes.workchains.workchain.WorkChain')
# When passing a WorkChainNode, no process_type filter is applied
_cls, classifiers = qb._get_ormclass(orm.WorkChainNode, None)
self.assertEqual(classifiers['ormclass_type_string'], 'process.workflow.workchain.WorkChainNode.')
self.assertEqual(classifiers['process_type_string'], None)
# Same tests for a calculation
_cls, classifiers = qb._get_ormclass(ArithmeticAdd, None)
self.assertEqual(classifiers['ormclass_type_string'], 'process.calculation.calcjob.CalcJobNode.')
self.assertEqual(classifiers['process_type_string'], 'aiida.calculations:arithmetic.add')
def test_process_query(self):
"""
Test querying for a process class.
"""
from aiida.engine import run, WorkChain, if_, return_, ExitCode
from aiida.common.warnings import AiidaEntryPointWarning
class PotentialFailureWorkChain(WorkChain):
EXIT_STATUS = 1
EXIT_MESSAGE = 'Well you did ask for it'
OUTPUT_LABEL = 'optional_output'
OUTPUT_VALUE = 144
@classmethod
def define(cls, spec):
super(PotentialFailureWorkChain, cls).define(spec)
spec.input('success', valid_type=orm.Bool)
spec.input('through_return', valid_type=orm.Bool, default=orm.Bool(False))
spec.input('through_exit_code', valid_type=orm.Bool, default=orm.Bool(False))
spec.exit_code(cls.EXIT_STATUS, 'EXIT_STATUS', cls.EXIT_MESSAGE)
spec.outline(if_(cls.should_return_out_of_outline)(return_(cls.EXIT_STATUS)), cls.failure, cls.success)
spec.output('optional', required=False)
def should_return_out_of_outline(self):
return self.inputs.through_return.value
def failure(self):
# pylint: disable=no-else-return
if self.inputs.success.value is False:
# Returning either 0 or ExitCode with non-zero status should terminate the workchain
if self.inputs.through_exit_code.value is False:
return self.EXIT_STATUS
else:
return self.exit_codes.EXIT_STATUS # pylint: disable=no-member
else:
# Returning 0 or ExitCode with zero status should *not* terminate the workchain
if self.inputs.through_exit_code.value is False:
return 0
else:
return ExitCode()
def success(self):
self.out(self.OUTPUT_LABEL, orm.Int(self.OUTPUT_VALUE).store())
class DummyWorkChain(WorkChain):
pass
# Run a simple test WorkChain
_result = run(PotentialFailureWorkChain, success=orm.Bool(True))
# Query for nodes associated with this type of WorkChain
qb = orm.QueryBuilder()
with warnings.catch_warnings(record=True) as w: # pylint: disable=no-member
# Cause all warnings to always be triggered.
warnings.simplefilter("always") # pylint: disable=no-member
qb.append(PotentialFailureWorkChain)
# Verify some things
assert len(w) == 1
assert issubclass(w[-1].category, AiidaEntryPointWarning)
# There should be one result of type WorkChainNode
self.assertEqual(qb.count(), 1)
self.assertTrue(isinstance(qb.all()[0][0], orm.WorkChainNode))
# Query for nodes of a different type of WorkChain
qb = orm.QueryBuilder()
with warnings.catch_warnings(record=True) as w: # pylint: disable=no-member
# Cause all warnings to always be triggered.
warnings.simplefilter("always") # pylint: disable=no-member
qb.append(DummyWorkChain)
# Verify some things
assert len(w) == 1
assert issubclass(w[-1].category, AiidaEntryPointWarning)
# There should be no result
self.assertEqual(qb.count(), 0)
# Query for all WorkChain nodes
qb = orm.QueryBuilder()
qb.append(WorkChain)
# There should be one result
self.assertEqual(qb.count(), 1)
def test_simple_query_1(self):
"""
Testing a simple query
"""
# pylint: disable=too-many-statements
n1 = orm.Data()
n1.label = 'node1'
n1.set_attribute('foo', ['hello', 'goodbye'])
n1.store()
n2 = orm.CalculationNode()
n2.label = 'node2'
n2.set_attribute('foo', 1)
n2.store()
n3 = orm.Data()
n3.label = 'node3'
n3.set_attribute('foo', 1.0000) # Stored as fval
n3.store()
n4 = orm.CalculationNode()
n4.label = 'node4'
n4.set_attribute('foo', 'bar')
n4.store()
n5 = orm.Data()
n5.label = 'node5'
n5.set_attribute('foo', None)
n5.store()
n2.add_incoming(n1, link_type=LinkType.INPUT_CALC, link_label='link1')
n3.add_incoming(n2, link_type=LinkType.CREATE, link_label='link2')
n4.add_incoming(n3, link_type=LinkType.INPUT_CALC, link_label='link3')
n5.add_incoming(n4, link_type=LinkType.CREATE, link_label='link4')
qb1 = orm.QueryBuilder()
qb1.append(orm.Node, filters={'attributes.foo': 1.000})
self.assertEqual(len(qb1.all()), 2)
qb2 = orm.QueryBuilder()
qb2.append(orm.Data)
self.assertEqual(qb2.count(), 3)
qb2 = orm.QueryBuilder()
qb2.append(entity_type='data.Data.')
self.assertEqual(qb2.count(), 3)
qb3 = orm.QueryBuilder()
qb3.append(orm.Node, project='label', tag='node1')
qb3.append(orm.Node, project='label', tag='node2')
self.assertEqual(qb3.count(), 4)
qb4 = orm.QueryBuilder()
qb4.append(orm.CalculationNode, tag='node1')
qb4.append(orm.Data, tag='node2')
self.assertEqual(qb4.count(), 2)
qb5 = orm.QueryBuilder()
qb5.append(orm.Data, tag='node1')
qb5.append(orm.CalculationNode, tag='node2')
self.assertEqual(qb5.count(), 2)
qb6 = orm.QueryBuilder()
qb6.append(orm.Data, tag='node1')
qb6.append(orm.Data, tag='node2')
self.assertEqual(qb6.count(), 0)
def test_simple_query_2(self):
from datetime import datetime
from aiida.common.exceptions import MultipleObjectsError, NotExistent
n0 = orm.Data()
n0.label = 'hello'
n0.description = ''
n0.set_attribute('foo', 'bar')
n1 = orm.CalculationNode()
n1.label = 'foo'
n1.description = 'I am FoO'
n2 = orm.Data()
n2.label = 'bar'
n2.description = 'I am BaR'
n2.add_incoming(n1, link_type=LinkType.CREATE, link_label='random_2')
n1.add_incoming(n0, link_type=LinkType.INPUT_CALC, link_label='random_1')
for n in (n0, n1, n2):
n.store()
qb1 = orm.QueryBuilder()
qb1.append(orm.Node, filters={'label': 'hello'})
self.assertEqual(len(list(qb1.all())), 1)
qh = {
'path': [{
'cls': orm.Node,
'tag': 'n1'
}, {
'cls': orm.Node,
'tag': 'n2',
'with_incoming': 'n1'
}],
'filters': {
'n1': {
'label': {
'ilike': '%foO%'
},
},
'n2': {
'label': {
'ilike': 'bar%'
},
}
},
'project': {
'n1': ['id', 'uuid', 'ctime', 'label'],
'n2': ['id', 'description', 'label'],
}
}
qb2 = orm.QueryBuilder(**qh)
resdict = qb2.dict()
self.assertEqual(len(resdict), 1)
self.assertTrue(isinstance(resdict[0]['n1']['ctime'], datetime))
res_one = qb2.one()
self.assertTrue('bar' in res_one)
qh = {
'path': [{
'cls': orm.Node,
'tag': 'n1'
}, {
'cls': orm.Node,
'tag': 'n2',
'with_incoming': 'n1'
}],
'filters': {
'n1--n2': {
'label': {
'like': '%_2'
}
}
}
}
qb = orm.QueryBuilder(**qh)
self.assertEqual(qb.count(), 1)
# Test the hashing:
query1 = qb.get_query()
qb.add_filter('n2', {'label': 'nonexistentlabel'})
self.assertEqual(qb.count(), 0)
with self.assertRaises(NotExistent):
qb.one()
with self.assertRaises(MultipleObjectsError):
orm.QueryBuilder().append(orm.Node).one()
query2 = qb.get_query()
query3 = qb.get_query()
self.assertTrue(id(query1) != id(query2))
self.assertTrue(id(query2) == id(query3))
def test_operators_eq_lt_gt(self):
nodes = [orm.Data() for _ in range(8)]
nodes[0].set_attribute('fa', 1)
nodes[1].set_attribute('fa', 1.0)
nodes[2].set_attribute('fa', 1.01)
nodes[3].set_attribute('fa', 1.02)
nodes[4].set_attribute('fa', 1.03)
nodes[5].set_attribute('fa', 1.04)
nodes[6].set_attribute('fa', 1.05)
nodes[7].set_attribute('fa', 1.06)
for n in nodes:
n.store()
self.assertEqual(orm.QueryBuilder().append(orm.Node, filters={'attributes.fa': {'<': 1}}).count(), 0)
self.assertEqual(orm.QueryBuilder().append(orm.Node, filters={'attributes.fa': {'==': 1}}).count(), 2)
self.assertEqual(orm.QueryBuilder().append(orm.Node, filters={'attributes.fa': {'<': 1.02}}).count(), 3)
self.assertEqual(orm.QueryBuilder().append(orm.Node, filters={'attributes.fa': {'<=': 1.02}}).count(), 4)
self.assertEqual(orm.QueryBuilder().append(orm.Node, filters={'attributes.fa': {'>': 1.02}}).count(), 4)
self.assertEqual(orm.QueryBuilder().append(orm.Node, filters={'attributes.fa': {'>=': 1.02}}).count(), 5)
def test_subclassing(self):
s = orm.StructureData()
s.set_attribute('cat', 'miau')
s.store()
d = orm.Data()
d.set_attribute('cat', 'miau')
d.store()
p = orm.Dict(dict=dict(cat='miau'))
p.store()
# Now when asking for a node with attr.cat==miau, I want 3 esults:
qb = orm.QueryBuilder().append(orm.Node, filters={'attributes.cat': 'miau'})
self.assertEqual(qb.count(), 3)
qb = orm.QueryBuilder().append(orm.Data, filters={'attributes.cat': 'miau'})
self.assertEqual(qb.count(), 3)
# If I'm asking for the specific lowest subclass, I want one result
for cls in (orm.StructureData, orm.Dict):
qb = orm.QueryBuilder().append(cls, filters={'attributes.cat': 'miau'})
self.assertEqual(qb.count(), 1)
# Now I am not allow the subclassing, which should give 1 result for each
for cls, count in ((orm.StructureData, 1), (orm.Dict, 1), (orm.Data, 1), (orm.Node, 0)):
qb = orm.QueryBuilder().append(cls, filters={'attributes.cat': 'miau'}, subclassing=False)
self.assertEqual(qb.count(), count)
# Now I am testing the subclassing with tuples:
qb = orm.QueryBuilder().append(cls=(orm.StructureData, orm.Dict), filters={'attributes.cat': 'miau'})
self.assertEqual(qb.count(), 2)
qb = orm.QueryBuilder().append(
entity_type=('data.structure.StructureData.', 'data.dict.Dict.'), filters={'attributes.cat': 'miau'})
self.assertEqual(qb.count(), 2)
qb = orm.QueryBuilder().append(
cls=(orm.StructureData, orm.Dict), filters={'attributes.cat': 'miau'}, subclassing=False)
self.assertEqual(qb.count(), 2)
qb = orm.QueryBuilder().append(
cls=(orm.StructureData, orm.Data),
filters={'attributes.cat': 'miau'},
)
self.assertEqual(qb.count(), 3)
qb = orm.QueryBuilder().append(
entity_type=('data.structure.StructureData.', 'data.dict.Dict.'),
filters={'attributes.cat': 'miau'},
subclassing=False)
self.assertEqual(qb.count(), 2)
qb = orm.QueryBuilder().append(
entity_type=('data.structure.StructureData.', 'data.Data.'),
filters={'attributes.cat': 'miau'},
subclassing=False)
self.assertEqual(qb.count(), 2)
def test_list_behavior(self):
for _i in range(4):
orm.Data().store()
self.assertEqual(len(orm.QueryBuilder().append(orm.Node).all()), 4)
self.assertEqual(len(orm.QueryBuilder().append(orm.Node, project='*').all()), 4)
self.assertEqual(len(orm.QueryBuilder().append(orm.Node, project=['*', 'id']).all()), 4)
self.assertEqual(len(orm.QueryBuilder().append(orm.Node, project=['id']).all()), 4)
self.assertEqual(len(orm.QueryBuilder().append(orm.Node).dict()), 4)
self.assertEqual(len(orm.QueryBuilder().append(orm.Node, project='*').dict()), 4)
self.assertEqual(len(orm.QueryBuilder().append(orm.Node, project=['*', 'id']).dict()), 4)
self.assertEqual(len(orm.QueryBuilder().append(orm.Node, project=['id']).dict()), 4)
self.assertEqual(len(list(orm.QueryBuilder().append(orm.Node).iterall())), 4)
self.assertEqual(len(list(orm.QueryBuilder().append(orm.Node, project='*').iterall())), 4)
self.assertEqual(len(list(orm.QueryBuilder().append(orm.Node, project=['*', 'id']).iterall())), 4)
self.assertEqual(len(list(orm.QueryBuilder().append(orm.Node, project=['id']).iterall())), 4)
self.assertEqual(len(list(orm.QueryBuilder().append(orm.Node).iterdict())), 4)
self.assertEqual(len(list(orm.QueryBuilder().append(orm.Node, project='*').iterdict())), 4)
self.assertEqual(len(list(orm.QueryBuilder().append(orm.Node, project=['*', 'id']).iterdict())), 4)
self.assertEqual(len(list(orm.QueryBuilder().append(orm.Node, project=['id']).iterdict())), 4)
def test_append_validation(self):
from aiida.common.exceptions import InputValidationError
# So here I am giving two times the same tag
with self.assertRaises(InputValidationError):
orm.QueryBuilder().append(orm.StructureData, tag='n').append(orm.StructureData, tag='n')
# here I am giving a wrong filter specifications
with self.assertRaises(InputValidationError):
orm.QueryBuilder().append(orm.StructureData, filters=['jajjsd'])
# here I am giving a nonsensical projection:
with self.assertRaises(InputValidationError):
orm.QueryBuilder().append(orm.StructureData, project=True)
# here I am giving a nonsensical projection for the edge:
with self.assertRaises(InputValidationError):
orm.QueryBuilder().append(orm.ProcessNode).append(orm.StructureData, edge_tag='t').add_projection('t', True)
# Giving a nonsensical limit
with self.assertRaises(InputValidationError):
orm.QueryBuilder().append(orm.ProcessNode).limit(2.3)
# Giving a nonsensical offset
with self.assertRaises(InputValidationError):
orm.QueryBuilder(offset=2.3)
# So, I mess up one append, I want the QueryBuilder to clean it!
with self.assertRaises(InputValidationError):
qb = orm.QueryBuilder()
# This also checks if we correctly raise for wrong keywords
qb.append(orm.StructureData, tag='s', randomkeyword={})
# Now I'm checking whether this keyword appears anywhere in the internal dictionaries:
# pylint: disable=protected-access
self.assertTrue('s' not in qb._projections)
self.assertTrue('s' not in qb._filters)
self.assertTrue('s' not in qb._tag_to_alias_map)
self.assertTrue(len(qb._path) == 0)
self.assertTrue(orm.StructureData not in qb._cls_to_tag_map)
# So this should work now:
qb.append(orm.StructureData, tag='s').limit(2).dict()
def test_tags(self):
qb = orm.QueryBuilder()
qb.append(orm.Node, tag='n1')
qb.append(orm.Node, tag='n2', edge_tag='e1', with_incoming='n1')
qb.append(orm.Node, tag='n3', edge_tag='e2', with_incoming='n2')
qb.append(orm.Computer, with_node='n3', tag='c1', edge_tag='nonsense')
self.assertEqual(qb.get_used_tags(), ['n1', 'n2', 'e1', 'n3', 'e2', 'c1', 'nonsense'])
# Now I am testing the default tags,
qb = orm.QueryBuilder().append(orm.StructureData).append(orm.ProcessNode).append(orm.StructureData).append(
orm.Dict, with_outgoing=orm.ProcessNode)
self.assertEqual(qb.get_used_tags(), [
'StructureData_1', 'ProcessNode_1', 'StructureData_1--ProcessNode_1', 'StructureData_2',
'ProcessNode_1--StructureData_2', 'Dict_1', 'ProcessNode_1--Dict_1'
])
self.assertEqual(
qb.get_used_tags(edges=False), [
'StructureData_1',
'ProcessNode_1',
'StructureData_2',
'Dict_1',
])
self.assertEqual(
qb.get_used_tags(vertices=False),
['StructureData_1--ProcessNode_1', 'ProcessNode_1--StructureData_2', 'ProcessNode_1--Dict_1'])
self.assertEqual(
qb.get_used_tags(edges=False), [
'StructureData_1',
'ProcessNode_1',
'StructureData_2',
'Dict_1',
])
self.assertEqual(
qb.get_used_tags(vertices=False),
['StructureData_1--ProcessNode_1', 'ProcessNode_1--StructureData_2', 'ProcessNode_1--Dict_1'])
class TestQueryHelp(AiidaTestCase):
def test_queryhelp(self):
"""
Here I test the queryhelp by seeing whether results are the same as using the append method.
I also check passing of tuples.
"""
g = orm.Group(label='helloworld').store()
for cls in (orm.StructureData, orm.Dict, orm.Data):
obj = cls()
obj.set_attribute('foo-qh2', 'bar')
obj.store()
g.add_nodes(obj)
for cls, expected_count, subclassing in (
(orm.StructureData, 1, True),
(orm.Dict, 1, True),
(orm.Data, 3, True),
(orm.Data, 1, False),
((orm.Dict, orm.StructureData), 2, True),
((orm.Dict, orm.StructureData), 2, False),
((orm.Dict, orm.Data), 2, False),
((orm.Dict, orm.Data), 3, True),
((orm.Dict, orm.Data, orm.StructureData), 3, False),
):
qb = orm.QueryBuilder()
qb.append(cls, filters={'attributes.foo-qh2': 'bar'}, subclassing=subclassing, project='uuid')
self.assertEqual(qb.count(), expected_count)
qh = qb.get_json_compatible_queryhelp()
qb_new = orm.QueryBuilder(**qh)
self.assertEqual(qb_new.count(), expected_count)
self.assertEqual(sorted([uuid for uuid, in qb.all()]), sorted([uuid for uuid, in qb_new.all()]))
qb = orm.QueryBuilder().append(orm.Group, filters={'label': 'helloworld'})
self.assertEqual(qb.count(), 1)
qb = orm.QueryBuilder().append((orm.Group,), filters={'label': 'helloworld'})
self.assertEqual(qb.count(), 1)
qb = orm.QueryBuilder().append(orm.Computer,)
self.assertEqual(qb.count(), 1)
qb = orm.QueryBuilder().append(cls=(orm.Computer,))
self.assertEqual(qb.count(), 1)
class TestQueryBuilderCornerCases(AiidaTestCase):
"""
In this class corner cases of QueryBuilder are added.
"""
def test_computer_json(self): # pylint: disable=no-self-use
"""
In this test we check the correct behavior of QueryBuilder when
retrieving the _metadata and the transport_params with no content.
Note that they are in JSON format in both backends. Forcing the
decoding of a None value leads to an exception (this was the case
under Django).
"""
n1 = orm.CalculationNode()
n1.label = 'node2'
n1.set_attribute('foo', 1)
n1.store()
# Checking the correct retrieval of transport_params which is
# a JSON field (in both backends).
qb = orm.QueryBuilder()
qb.append(orm.CalculationNode, project=['id'], tag='calc')
qb.append(orm.Computer, project=['id', 'transport_params'], outerjoin=True, with_node='calc')
qb.all()
# Checking the correct retrieval of _metadata which is
# a JSON field (in both backends).
qb = orm.QueryBuilder()
qb.append(orm.CalculationNode, project=['id'], tag='calc')
qb.append(orm.Computer, project=['id', '_metadata'], outerjoin=True, with_node='calc')
qb.all()
class TestAttributes(AiidaTestCase):
def test_attribute_existence(self):
# I'm storing a value under key whatever:
val = 1.
res_uuids = set()
n1 = orm.Data()
n1.set_attribute("whatever", 3.)
n1.set_attribute("test_case", "test_attribute_existence")
n1.store()
# I want all the nodes where whatever is smaller than 1. or there is no such value:
qb = orm.QueryBuilder()
qb.append(
orm.Data,
filters={
'or': [{
'attributes': {
'!has_key': 'whatever'
}
}, {
'attributes.whatever': {
'<': val
}
}],
},
project='uuid')
res_query = {str(_[0]) for _ in qb.all()}
self.assertEqual(res_query, res_uuids)
def test_attribute_type(self):
key = 'value_test_attr_type'
n_int, n_float, n_str, n_str2, n_bool, n_arr = [orm.Data() for _ in range(6)]
n_int.set_attribute(key, 1)
n_float.set_attribute(key, 1.0)
n_bool.set_attribute(key, True)
n_str.set_attribute(key, '1')
n_str2.set_attribute(key, 'one')
n_arr.set_attribute(key, [4, 3, 5])
for n in (n_str2, n_str, n_int, n_float, n_bool, n_arr):
n.store()
# Here I am testing which values contain a number 1.
# Both 1 and 1.0 are legitimate values if ask for either 1 or 1.0
for val in (1.0, 1):
qb = orm.QueryBuilder().append(orm.Node, filters={'attributes.{}'.format(key): val}, project='uuid')
res = [str(_) for _, in qb.all()]
self.assertEqual(set(res), set((n_float.uuid, n_int.uuid)))
qb = orm.QueryBuilder().append(orm.Node, filters={'attributes.{}'.format(key): {'>': 0.5}}, project='uuid')
res = [str(_) for _, in qb.all()]
self.assertEqual(set(res), set((n_float.uuid, n_int.uuid)))
qb = orm.QueryBuilder().append(orm.Node, filters={'attributes.{}'.format(key): {'<': 1.5}}, project='uuid')
res = [str(_) for _, in qb.all()]
self.assertEqual(set(res), set((n_float.uuid, n_int.uuid)))
# Now I am testing the boolean value:
qb = orm.QueryBuilder().append(orm.Node, filters={'attributes.{}'.format(key): True}, project='uuid')
res = [str(_) for _, in qb.all()]
self.assertEqual(set(res), set((n_bool.uuid,)))
qb = orm.QueryBuilder().append(orm.Node, filters={'attributes.{}'.format(key): {'like': '%n%'}}, project='uuid')
res = [str(_) for _, in qb.all()]
self.assertEqual(set(res), set((n_str2.uuid,)))
qb = orm.QueryBuilder().append(
orm.Node, filters={'attributes.{}'.format(key): {
'ilike': 'On%'
}}, project='uuid')
res = [str(_) for _, in qb.all()]
self.assertEqual(set(res), set((n_str2.uuid,)))
qb = orm.QueryBuilder().append(orm.Node, filters={'attributes.{}'.format(key): {'like': '1'}}, project='uuid')
res = [str(_) for _, in qb.all()]
self.assertEqual(set(res), set((n_str.uuid,)))
qb = orm.QueryBuilder().append(orm.Node, filters={'attributes.{}'.format(key): {'==': '1'}}, project='uuid')
res = [str(_) for _, in qb.all()]
self.assertEqual(set(res), set((n_str.uuid,)))
if settings.BACKEND == u'sqlalchemy':
# I can't query the length of an array with Django,
# so I exclude. Not the nicest way, But I would like to keep this piece
# of code because of the initialization part, that would need to be
# duplicated or wrapped otherwise.
qb = orm.QueryBuilder().append(
orm.Node, filters={'attributes.{}'.format(key): {
'of_length': 3
}}, project='uuid')
res = [str(_) for _, in qb.all()]
self.assertEqual(set(res), set((n_arr.uuid,)))
class QueryBuilderDateTimeAttribute(AiidaTestCase):
@unittest.skipIf(settings.BACKEND == u'sqlalchemy', "SQLA doesn't have full datetime support in attributes")
def test_date(self):
from aiida.common import timezone
from datetime import timedelta
n = orm.Data()
now = timezone.now()
n.set_attribute('now', now)
n.store()
qb = orm.QueryBuilder().append(
orm.Node,
filters={
'attributes.now': {
"and": [
{
">": now - timedelta(seconds=1)
},
{
"<": now + timedelta(seconds=1)
},
]
}
})
self.assertEqual(qb.count(), 1)
class QueryBuilderLimitOffsetsTest(AiidaTestCase):
def test_ordering_limits_offsets_of_results_general(self):
# Creating 10 nodes with an attribute that can be ordered
for i in range(10):
n = orm.Data()
n.set_attribute('foo', i)
n.store()
qb = orm.QueryBuilder().append(orm.Node, project='attributes.foo').order_by({orm.Node: 'ctime'})
res = next(zip(*qb.all()))
self.assertEqual(res, tuple(range(10)))
# Now applying an offset:
qb.offset(5)
res = next(zip(*qb.all()))
self.assertEqual(res, tuple(range(5, 10)))
# Now also applying a limit:
qb.limit(3)
res = next(zip(*qb.all()))
self.assertEqual(res, tuple(range(5, 8)))
# Specifying the order explicitly the order:
qb = orm.QueryBuilder().append(
orm.Node, project='attributes.foo').order_by({orm.Node: {
'ctime': {
'order': 'asc'
}
}})
res = next(zip(*qb.all()))
self.assertEqual(res, tuple(range(10)))
# Now applying an offset:
qb.offset(5)
res = next(zip(*qb.all()))
self.assertEqual(res, tuple(range(5, 10)))
# Now also applying a limit:
qb.limit(3)
res = next(zip(*qb.all()))
self.assertEqual(res, tuple(range(5, 8)))
# Reversing the order:
qb = orm.QueryBuilder().append(
orm.Node, project='attributes.foo').order_by({orm.Node: {
'ctime': {
'order': 'desc'
}
}})
res = next(zip(*qb.all()))
self.assertEqual(res, tuple(range(9, -1, -1)))
# Now applying an offset:
qb.offset(5)
res = next(zip(*qb.all()))
self.assertEqual(res, tuple(range(4, -1, -1)))
# Now also applying a limit:
qb.limit(3)
res = next(zip(*qb.all()))
self.assertEqual(res, tuple(range(4, 1, -1)))
class QueryBuilderJoinsTests(AiidaTestCase):
def test_joins1(self):
# Creating n1, who will be a parent:
parent = orm.Data()
parent.label = 'mother'
good_child = orm.CalculationNode()
good_child.label = 'good_child'
good_child.set_attribute('is_good', True)
bad_child = orm.CalculationNode()
bad_child.label = 'bad_child'
bad_child.set_attribute('is_good', False)
unrelated = orm.CalculationNode()
unrelated.label = 'unrelated'
for n in (good_child, bad_child, parent, unrelated):
n.store()
good_child.add_incoming(parent, link_type=LinkType.INPUT_CALC, link_label='parent')
bad_child.add_incoming(parent, link_type=LinkType.INPUT_CALC, link_label='parent')
# Using a standard inner join
qb = orm.QueryBuilder()
qb.append(orm.Node, tag='parent')
qb.append(orm.Node, tag='children', project='label', filters={'attributes.is_good': True})
self.assertEqual(qb.count(), 1)
qb = orm.QueryBuilder()
qb.append(orm.Node, tag='parent')
qb.append(orm.Node, tag='children', outerjoin=True, project='label', filters={'attributes.is_good': True})
self.assertEqual(qb.count(), 1)
def test_joins2(self):
# Creating n1, who will be a parent:
students = [orm.Data() for i in range(10)]
advisors = [orm.CalculationNode() for i in range(3)]
for i, a in enumerate(advisors):
a.label = 'advisor {}'.format(i)
a.set_attribute('advisor_id', i)
for n in advisors + students:
n.store()
# advisor 0 get student 0, 1
for i in (0, 1):
students[i].add_incoming(advisors[0], link_type=LinkType.CREATE, link_label='is_advisor_{}'.format(i))
# advisor 1 get student 3, 4
for i in (3, 4):
students[i].add_incoming(advisors[1], link_type=LinkType.CREATE, link_label='is_advisor_{}'.format(i))
# advisor 2 get student 5, 6, 7
for i in (5, 6, 7):
students[i].add_incoming(advisors[2], link_type=LinkType.CREATE, link_label='is_advisor_{}'.format(i))
# let's add a differnt relationship than advisor:
students[9].add_incoming(advisors[2], link_type=LinkType.CREATE, link_label='lover')
self.assertEqual(
orm.QueryBuilder().append(orm.Node).append(
orm.Node, edge_filters={
'label': {
'like': 'is\\_advisor\\_%'
}
}, tag='student').count(), 7)
for adv_id, number_students in zip(list(range(3)), (2, 2, 3)):
self.assertEqual(
orm.QueryBuilder().append(orm.Node, filters={
'attributes.advisor_id': adv_id
}).append(orm.Node, edge_filters={
'label': {
'like': 'is\\_advisor\\_%'
}
}, tag='student').count(), number_students)
def test_joins3_user_group(self):
# Create another user
new_email = "<EMAIL>"
user = orm.User(email=new_email).store()
# Create a group that belongs to that user
group = orm.Group(label="node_group")
group.user = user
group.store()
# Search for the group of the user
qb = orm.QueryBuilder()
qb.append(orm.User, tag='user', filters={'id': {'==': user.id}})
qb.append(orm.Group, with_user='user', filters={'id': {'==': group.id}})
self.assertEqual(qb.count(), 1, "The expected group that belongs to " "the selected user was not found.")
# Search for the user that owns a group
qb = orm.QueryBuilder()
qb.append(orm.Group, tag='group', filters={'id': {'==': group.id}})
qb.append(orm.User, with_group='group', filters={'id': {'==': user.id}})
self.assertEqual(qb.count(), 1, "The expected user that owns the " "selected group was not found.")
class QueryBuilderPath(AiidaTestCase):
def test_query_path(self):
# pylint: disable=too-many-statements
q = self.backend.query_manager
n1 = orm.Data()
n1.label = 'n1'
n1.store()
n2 = orm.CalculationNode()
n2.label = 'n2'
n2.store()
n3 = orm.Data()
n3.label = 'n3'
n3.store()
n4 = orm.Data()
n4.label = 'n4'
n4.store()
n5 = orm.CalculationNode()
n5.label = 'n5'
n5.store()
n6 = orm.Data()
n6.label = 'n6'
n6.store()
n7 = orm.CalculationNode()
n7.label = 'n7'
n7.store()
n8 = orm.Data()
n8.label = 'n8'
n8.store()
n9 = orm.Data()
n9.label = 'n9'
n9.store()
# I create a strange graph, inserting links in a order
# such that I often have to create the transitive closure
# between two graphs
n3.add_incoming(n2, link_type=LinkType.CREATE, link_label='link1')
n2.add_incoming(n1, link_type=LinkType.INPUT_CALC, link_label='link2')
n5.add_incoming(n3, link_type=LinkType.INPUT_CALC, link_label='link3')
n5.add_incoming(n4, link_type=LinkType.INPUT_CALC, link_label='link4')
n4.add_incoming(n2, link_type=LinkType.CREATE, link_label='link5')
n7.add_incoming(n6, link_type=LinkType.INPUT_CALC, link_label='link6')
n8.add_incoming(n7, link_type=LinkType.CREATE, link_label='link7')
# There are no parents to n9, checking that
self.assertEqual(set([]), set(q.get_all_parents([n9.pk])))
# There is one parent to n6
self.assertEqual({(_,) for _ in (n6.pk,)}, {tuple(_) for _ in q.get_all_parents([n7.pk])})
# There are several parents to n4
self.assertEqual({(_.pk,) for _ in (n1, n2)}, {tuple(_) for _ in q.get_all_parents([n4.pk])})
# There are several parents to n5
self.assertEqual({(_.pk,) for _ in (n1, n2, n3, n4)}, {tuple(_) for _ in q.get_all_parents([n5.pk])})
# Yet, no links from 1 to 8
self.assertEqual(
orm.QueryBuilder().append(orm.Node, filters={
'id': n1.pk
}, tag='anc').append(orm.Node, with_ancestors='anc', filters={
'id': n8.pk
}).count(), 0)
self.assertEqual(
orm.QueryBuilder().append(orm.Node, filters={
'id': n8.pk
}, tag='desc').append(orm.Node, with_descendants='desc', filters={
'id': n1.pk
}).count(), 0)
n6.add_incoming(n5, link_type=LinkType.CREATE, link_label='link1')
# Yet, now 2 links from 1 to 8
self.assertEqual(
orm.QueryBuilder().append(orm.Node, filters={
'id': n1.pk
}, tag='anc').append(orm.Node, with_ancestors='anc', filters={
'id': n8.pk
}).count(), 2)
self.assertEqual(
orm.QueryBuilder().append(orm.Node, filters={
'id': n8.pk
}, tag='desc').append(orm.Node, with_descendants='desc', filters={
'id': n1.pk
}).count(), 2)
self.assertEqual(
orm.QueryBuilder().append(orm.Node, filters={
'id': n8.pk
}, tag='desc').append(
orm.Node,
with_descendants='desc',
filters={
'id': n1.pk
},
edge_filters={
'depth': {
'<': 6
}
},
).count(), 2)
self.assertEqual(
orm.QueryBuilder().append(orm.Node, filters={
'id': n8.pk
}, tag='desc').append(
orm.Node,
with_descendants='desc',
filters={
'id': n1.pk
},
edge_filters={
'depth': 5
},
).count(), 2)
self.assertEqual(
orm.QueryBuilder().append(orm.Node, filters={
'id': n8.pk
}, tag='desc').append(
orm.Node,
with_descendants='desc',
filters={
'id': n1.pk
},
edge_filters={
'depth': {
'<': 5
}
},
).count(), 0)
# TODO write a query that can filter certain paths by traversed ID # pylint: disable=fixme
qb = orm.QueryBuilder().append(
orm.Node,
filters={
'id': n8.pk
},
tag='desc',
).append(
orm.Node, with_descendants='desc', edge_project='path', filters={'id': n1.pk})
queried_path_set = {frozenset(p) for p, in qb.all()}
paths_there_should_be = {
frozenset([n1.pk, n2.pk, n3.pk, n5.pk, n6.pk, n7.pk, n8.pk]),
frozenset([n1.pk, n2.pk, n4.pk, n5.pk, n6.pk, n7.pk, n8.pk])
}
self.assertTrue(queried_path_set == paths_there_should_be)
qb = orm.QueryBuilder().append(
orm.Node, filters={
'id': n1.pk
}, tag='anc').append(
orm.Node, with_ancestors='anc', filters={'id': n8.pk}, edge_project='path')
self.assertEqual({frozenset(p) for p, in qb.all()}, {
frozenset([n1.pk, n2.pk, n3.pk, n5.pk, n6.pk, n7.pk, n8.pk]),
frozenset([n1.pk, n2.pk, n4.pk, n5.pk, n6.pk, n7.pk, n8.pk])
})
n7.add_incoming(n9, link_type=LinkType.INPUT_CALC, link_label='link0')
# Still two links...
self.assertEqual(
orm.QueryBuilder().append(orm.Node, filters={
'id': n1.pk
}, tag='anc').append(orm.Node, with_ancestors='anc', filters={
'id': n8.pk
}).count(), 2)
self.assertEqual(
orm.QueryBuilder().append(orm.Node, filters={
'id': n8.pk
}, tag='desc').append(orm.Node, with_descendants='desc', filters={
'id': n1.pk
}).count(), 2)
n9.add_incoming(n5, link_type=LinkType.CREATE, link_label='link6')
# And now there should be 4 nodes
self.assertEqual(
orm.QueryBuilder().append(orm.Node, filters={
'id': n1.pk
}, tag='anc').append(orm.Node, with_ancestors='anc', filters={
'id': n8.pk
}).count(), 4)
self.assertEqual(
orm.QueryBuilder().append(orm.Node, filters={
'id': n8.pk
}, tag='desc').append(orm.Node, with_descendants='desc', filters={
'id': n1.pk
}).count(), 4)
qb = orm.QueryBuilder().append(
orm.Node, filters={
'id': n1.pk
}, tag='anc').append(
orm.Node, with_ancestors='anc', filters={'id': n8.pk}, edge_tag='edge')
qb.add_projection('edge', 'depth')
self.assertTrue(set(next(zip(*qb.all()))), set([5, 6]))
qb.add_filter('edge', {'depth': 5})
self.assertTrue(set(next(zip(*qb.all()))), set([5]))
class TestConsistency(AiidaTestCase):
def test_create_node_and_query(self):
"""
Testing whether creating nodes within a iterall iteration changes the results.
"""
for _i in range(100):
n = orm.Data()
n.store()
for idx, _item in enumerate(orm.QueryBuilder().append(orm.Node, project=['id',
'label']).iterall(batch_size=10)):
if idx % 10 == 10:
n = orm.Data()
n.store()
self.assertEqual(idx, 99) # pylint: disable=undefined-loop-variable
self.assertTrue(len(orm.QueryBuilder().append(orm.Node, project=['id', 'label']).all(batch_size=10)) > 99)
def test_len_results(self):
"""
Test whether the len of results matches the count returned.
See also https://github.com/aiidateam/aiida_core/issues/1600
SQLAlchemy has a deduplication strategy that leads to strange behavior, tested against here
"""
parent = orm.CalculationNode().store()
# adding 5 links going out:
for inode in range(5):
output_node = orm.Data().store()
output_node.add_incoming(parent, link_type=LinkType.CREATE, link_label='link_{}'.format(inode))
for projection in ('id', '*'):
qb = orm.QueryBuilder()
qb.append(orm.CalculationNode, filters={'id': parent.id}, tag='parent', project=projection)
qb.append(orm.Data, with_incoming='parent')
self.assertEqual(len(qb.all()), qb.count())
class TestManager(AiidaTestCase):
def test_statistics(self):
"""
Test if the statistics query works properly.
I try to implement it in a way that does not depend on the past state.
"""
from collections import defaultdict
# pylint: disable=protected-access
def store_and_add(n, statistics):
n.store()
statistics['total'] += 1
statistics['types'][n._plugin_type_string] += 1 # pylint: disable=no-member
statistics['ctime_by_day'][n.ctime.strftime('%Y-%m-%d')] += 1
qmanager = self.backend.query_manager
current_db_statistics = qmanager.get_creation_statistics()
types = defaultdict(int)
types.update(current_db_statistics['types'])
ctime_by_day = defaultdict(int)
ctime_by_day.update(current_db_statistics['ctime_by_day'])
expected_db_statistics = {'total': current_db_statistics['total'], 'types': types, 'ctime_by_day': ctime_by_day}
store_and_add(orm.Data(), expected_db_statistics)
store_and_add(orm.Dict(), expected_db_statistics)
store_and_add(orm.Dict(), expected_db_statistics)
store_and_add(orm.CalculationNode(), expected_db_statistics)
new_db_statistics = qmanager.get_creation_statistics()
# I only check a few fields
new_db_statistics = {k: v for k, v in new_db_statistics.items() if k in expected_db_statistics}
expected_db_statistics = {
k: dict(v) if isinstance(v, defaultdict) else v for k, v in expected_db_statistics.items()
}
self.assertEqual(new_db_statistics, expected_db_statistics)
def test_statistics_default_class(self):
"""
Test if the statistics query works properly.
I try to implement it in a way that does not depend on the past state.
"""
from collections import defaultdict
def store_and_add(n, statistics):
n.store()
statistics['total'] += 1
statistics['types'][n._plugin_type_string] += 1 # pylint: disable=no-member,protected-access
statistics['ctime_by_day'][n.ctime.strftime('%Y-%m-%d')] += 1
current_db_statistics = self.backend.query_manager.get_creation_statistics()
types = defaultdict(int)
types.update(current_db_statistics['types'])
ctime_by_day = defaultdict(int)
ctime_by_day.update(current_db_statistics['ctime_by_day'])
expected_db_statistics = {'total': current_db_statistics['total'], 'types': types, 'ctime_by_day': ctime_by_day}
store_and_add(orm.Data(), expected_db_statistics)
store_and_add(orm.Dict(), expected_db_statistics)
store_and_add(orm.Dict(), expected_db_statistics)
store_and_add(orm.CalculationNode(), expected_db_statistics)
new_db_statistics = self.backend.query_manager.get_creation_statistics()
# I only check a few fields
new_db_statistics = {k: v for k, v in new_db_statistics.items() if k in expected_db_statistics}
expected_db_statistics = {
k: dict(v) if isinstance(v, defaultdict) else v for k, v in expected_db_statistics.items()
}
self.assertEqual(new_db_statistics, expected_db_statistics)
|
[
"aiida.orm.Group",
"collections.defaultdict",
"aiida.orm.Int",
"aiida.common.timezone.now",
"unittest.skipIf",
"warnings.simplefilter",
"six.moves.range",
"aiida.engine.if_",
"warnings.catch_warnings",
"datetime.timedelta",
"aiida.orm.Data",
"aiida.orm.Dict",
"aiida.orm.CalculationNode",
"aiida.orm.User",
"aiida.plugins.CalculationFactory",
"aiida.orm.Bool",
"aiida.engine.ExitCode",
"aiida.orm.QueryBuilder",
"aiida.engine.return_",
"aiida.orm.StructureData"
] |
[((29478, 29589), 'unittest.skipIf', 'unittest.skipIf', (["(settings.BACKEND == u'sqlalchemy')", '"""SQLA doesn\'t have full datetime support in attributes"""'], {}), '(settings.BACKEND == u\'sqlalchemy\',\n "SQLA doesn\'t have full datetime support in attributes")\n', (29493, 29589), False, 'import unittest\n'), ((1466, 1484), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (1482, 1484), False, 'from aiida import orm\n'), ((3588, 3624), 'aiida.plugins.CalculationFactory', 'CalculationFactory', (['"""arithmetic.add"""'], {}), "('arithmetic.add')\n", (3606, 3624), False, 'from aiida.plugins import CalculationFactory\n'), ((3639, 3657), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (3655, 3657), False, 'from aiida import orm\n'), ((7090, 7108), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (7106, 7108), False, 'from aiida import orm\n'), ((7754, 7772), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (7770, 7772), False, 'from aiida import orm\n'), ((8294, 8312), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (8310, 8312), False, 'from aiida import orm\n'), ((8571, 8581), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (8579, 8581), False, 'from aiida import orm\n'), ((8696, 8717), 'aiida.orm.CalculationNode', 'orm.CalculationNode', ([], {}), '()\n', (8715, 8717), False, 'from aiida import orm\n'), ((8813, 8823), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (8821, 8823), False, 'from aiida import orm\n'), ((8942, 8963), 'aiida.orm.CalculationNode', 'orm.CalculationNode', ([], {}), '()\n', (8961, 8963), False, 'from aiida import orm\n'), ((9063, 9073), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (9071, 9073), False, 'from aiida import orm\n'), ((9483, 9501), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (9499, 9501), False, 'from aiida import orm\n'), ((9626, 9644), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (9642, 9644), False, 'from aiida import orm\n'), ((9730, 9748), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (9746, 9748), False, 'from aiida import orm\n'), ((9850, 9868), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (9866, 9868), False, 'from aiida import orm\n'), ((10043, 10061), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (10059, 10061), False, 'from aiida import orm\n'), ((10213, 10231), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (10229, 10231), False, 'from aiida import orm\n'), ((10383, 10401), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (10399, 10401), False, 'from aiida import orm\n'), ((10692, 10702), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (10700, 10702), False, 'from aiida import orm\n'), ((10811, 10832), 'aiida.orm.CalculationNode', 'orm.CalculationNode', ([], {}), '()\n', (10830, 10832), False, 'from aiida import orm\n'), ((10908, 10918), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (10916, 10918), False, 'from aiida import orm\n'), ((11210, 11228), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (11226, 11228), False, 'from aiida import orm\n'), ((12057, 12079), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '(**qh)\n', (12073, 12079), False, 'from aiida import orm\n'), ((12728, 12750), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '(**qh)\n', (12744, 12750), False, 'from aiida import orm\n'), ((14488, 14507), 'aiida.orm.StructureData', 'orm.StructureData', ([], {}), '()\n', (14505, 14507), False, 'from aiida import orm\n'), ((14578, 14588), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (14586, 14588), False, 'from aiida import orm\n'), ((16912, 16920), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (16917, 16920), False, 'from six.moves import range, zip\n'), ((20518, 20536), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (20534, 20536), False, 'from aiida import orm\n'), ((24534, 24555), 'aiida.orm.CalculationNode', 'orm.CalculationNode', ([], {}), '()\n', (24553, 24555), False, 'from aiida import orm\n'), ((24764, 24782), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (24780, 24782), False, 'from aiida import orm\n'), ((25089, 25107), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (25105, 25107), False, 'from aiida import orm\n'), ((25473, 25483), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (25481, 25483), False, 'from aiida import orm\n'), ((25717, 25735), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (25733, 25735), False, 'from aiida import orm\n'), ((29704, 29714), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (29712, 29714), False, 'from aiida import orm\n'), ((29729, 29743), 'aiida.common.timezone.now', 'timezone.now', ([], {}), '()\n', (29741, 29743), False, 'from aiida.common import timezone\n'), ((30469, 30478), 'six.moves.range', 'range', (['(10)'], {}), '(10)\n', (30474, 30478), False, 'from six.moves import range, zip\n'), ((32432, 32442), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (32440, 32442), False, 'from aiida import orm\n'), ((32497, 32518), 'aiida.orm.CalculationNode', 'orm.CalculationNode', ([], {}), '()\n', (32516, 32518), False, 'from aiida import orm\n'), ((32630, 32651), 'aiida.orm.CalculationNode', 'orm.CalculationNode', ([], {}), '()\n', (32649, 32651), False, 'from aiida import orm\n'), ((32761, 32782), 'aiida.orm.CalculationNode', 'orm.CalculationNode', ([], {}), '()\n', (32780, 32782), False, 'from aiida import orm\n'), ((33141, 33159), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (33157, 33159), False, 'from aiida import orm\n'), ((33355, 33373), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (33371, 33373), False, 'from aiida import orm\n'), ((35562, 35591), 'aiida.orm.Group', 'orm.Group', ([], {'label': '"""node_group"""'}), "(label='node_group')\n", (35571, 35591), False, 'from aiida import orm\n'), ((35697, 35715), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (35713, 35715), False, 'from aiida import orm\n'), ((36046, 36064), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (36062, 36064), False, 'from aiida import orm\n'), ((36503, 36513), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (36511, 36513), False, 'from aiida import orm\n'), ((36570, 36591), 'aiida.orm.CalculationNode', 'orm.CalculationNode', ([], {}), '()\n', (36589, 36591), False, 'from aiida import orm\n'), ((36648, 36658), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (36656, 36658), False, 'from aiida import orm\n'), ((36715, 36725), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (36723, 36725), False, 'from aiida import orm\n'), ((36782, 36803), 'aiida.orm.CalculationNode', 'orm.CalculationNode', ([], {}), '()\n', (36801, 36803), False, 'from aiida import orm\n'), ((36860, 36870), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (36868, 36870), False, 'from aiida import orm\n'), ((36927, 36948), 'aiida.orm.CalculationNode', 'orm.CalculationNode', ([], {}), '()\n', (36946, 36948), False, 'from aiida import orm\n'), ((37005, 37015), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (37013, 37015), False, 'from aiida import orm\n'), ((37072, 37082), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (37080, 37082), False, 'from aiida import orm\n'), ((43790, 43800), 'six.moves.range', 'range', (['(100)'], {}), '(100)\n', (43795, 43800), False, 'from six.moves import range, zip\n'), ((44733, 44741), 'six.moves.range', 'range', (['(5)'], {}), '(5)\n', (44738, 44741), False, 'from six.moves import range, zip\n'), ((45895, 45911), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (45906, 45911), False, 'from collections import defaultdict\n'), ((45988, 46004), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (45999, 46004), False, 'from collections import defaultdict\n'), ((47491, 47507), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (47502, 47507), False, 'from collections import defaultdict\n'), ((47584, 47600), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (47595, 47600), False, 'from collections import defaultdict\n'), ((7123, 7159), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (7146, 7159), False, 'import warnings\n'), ((7264, 7295), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (7285, 7295), False, 'import warnings\n'), ((7787, 7823), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (7810, 7823), False, 'import warnings\n'), ((7928, 7959), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (7949, 7959), False, 'import warnings\n'), ((13349, 13359), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (13357, 13359), False, 'from aiida import orm\n'), ((19640, 19668), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {'offset': '(2.3)'}), '(offset=2.3)\n', (19656, 19668), False, 'from aiida import orm\n'), ((19814, 19832), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (19830, 19832), False, 'from aiida import orm\n'), ((23092, 23110), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (23108, 23110), False, 'from aiida import orm\n'), ((23349, 23371), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '(**qh)\n', (23365, 23371), False, 'from aiida import orm\n'), ((26339, 26349), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (26347, 26349), False, 'from aiida import orm\n'), ((30496, 30506), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (30504, 30506), False, 'from aiida import orm\n'), ((33665, 33675), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (33673, 33675), False, 'from aiida import orm\n'), ((33716, 33737), 'aiida.orm.CalculationNode', 'orm.CalculationNode', ([], {}), '()\n', (33735, 33737), False, 'from aiida import orm\n'), ((43818, 43828), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (43826, 43828), False, 'from aiida import orm\n'), ((44952, 44970), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (44968, 44970), False, 'from aiida import orm\n'), ((46217, 46227), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (46225, 46227), False, 'from aiida import orm\n'), ((46275, 46285), 'aiida.orm.Dict', 'orm.Dict', ([], {}), '()\n', (46283, 46285), False, 'from aiida import orm\n'), ((46333, 46343), 'aiida.orm.Dict', 'orm.Dict', ([], {}), '()\n', (46341, 46343), False, 'from aiida import orm\n'), ((46391, 46412), 'aiida.orm.CalculationNode', 'orm.CalculationNode', ([], {}), '()\n', (46410, 46412), False, 'from aiida import orm\n'), ((47813, 47823), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (47821, 47823), False, 'from aiida import orm\n'), ((47871, 47881), 'aiida.orm.Dict', 'orm.Dict', ([], {}), '()\n', (47879, 47881), False, 'from aiida import orm\n'), ((47929, 47939), 'aiida.orm.Dict', 'orm.Dict', ([], {}), '()\n', (47937, 47939), False, 'from aiida import orm\n'), ((47987, 48008), 'aiida.orm.CalculationNode', 'orm.CalculationNode', ([], {}), '()\n', (48006, 48008), False, 'from aiida import orm\n'), ((6995, 7009), 'aiida.orm.Bool', 'orm.Bool', (['(True)'], {}), '(True)\n', (7003, 7009), False, 'from aiida import orm\n'), ((13369, 13377), 'six.moves.range', 'range', (['(8)'], {}), '(8)\n', (13374, 13377), False, 'from six.moves import range, zip\n'), ((14798, 14816), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (14814, 14816), False, 'from aiida import orm\n'), ((14924, 14942), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (14940, 14942), False, 'from aiida import orm\n'), ((15692, 15710), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (15708, 15710), False, 'from aiida import orm\n'), ((15842, 15860), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (15858, 15860), False, 'from aiida import orm\n'), ((16036, 16054), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (16052, 16054), False, 'from aiida import orm\n'), ((16218, 16236), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (16234, 16236), False, 'from aiida import orm\n'), ((16403, 16421), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (16419, 16421), False, 'from aiida import orm\n'), ((16640, 16658), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (16656, 16658), False, 'from aiida import orm\n'), ((22383, 22412), 'aiida.orm.Group', 'orm.Group', ([], {'label': '"""helloworld"""'}), "(label='helloworld')\n", (22392, 22412), False, 'from aiida import orm\n'), ((23556, 23574), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (23572, 23574), False, 'from aiida import orm\n'), ((23680, 23698), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (23696, 23698), False, 'from aiida import orm\n'), ((23807, 23825), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (23823, 23825), False, 'from aiida import orm\n'), ((23902, 23920), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (23918, 23920), False, 'from aiida import orm\n'), ((26359, 26367), 'six.moves.range', 'range', (['(6)'], {}), '(6)\n', (26364, 26367), False, 'from six.moves import range, zip\n'), ((27627, 27645), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (27643, 27645), False, 'from aiida import orm\n'), ((27836, 27854), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (27852, 27854), False, 'from aiida import orm\n'), ((28055, 28073), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (28071, 28073), False, 'from aiida import orm\n'), ((28356, 28374), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (28372, 28374), False, 'from aiida import orm\n'), ((28572, 28590), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (28588, 28590), False, 'from aiida import orm\n'), ((29812, 29830), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (29828, 29830), False, 'from aiida import orm\n'), ((30745, 30754), 'six.moves.range', 'range', (['(10)'], {}), '(10)\n', (30750, 30754), False, 'from six.moves import range, zip\n'), ((30884, 30896), 'six.moves.range', 'range', (['(5)', '(10)'], {}), '(5, 10)\n', (30889, 30896), False, 'from six.moves import range, zip\n'), ((31028, 31039), 'six.moves.range', 'range', (['(5)', '(8)'], {}), '(5, 8)\n', (31033, 31039), False, 'from six.moves import range, zip\n'), ((31375, 31384), 'six.moves.range', 'range', (['(10)'], {}), '(10)\n', (31380, 31384), False, 'from six.moves import range, zip\n'), ((31514, 31526), 'six.moves.range', 'range', (['(5)', '(10)'], {}), '(5, 10)\n', (31519, 31526), False, 'from six.moves import range, zip\n'), ((31658, 31669), 'six.moves.range', 'range', (['(5)', '(8)'], {}), '(5, 8)\n', (31663, 31669), False, 'from six.moves import range, zip\n'), ((31983, 31999), 'six.moves.range', 'range', (['(9)', '(-1)', '(-1)'], {}), '(9, -1, -1)\n', (31988, 31999), False, 'from six.moves import range, zip\n'), ((32129, 32145), 'six.moves.range', 'range', (['(4)', '(-1)', '(-1)'], {}), '(4, -1, -1)\n', (32134, 32145), False, 'from six.moves import range, zip\n'), ((32277, 32292), 'six.moves.range', 'range', (['(4)', '(1)', '(-1)'], {}), '(4, 1, -1)\n', (32282, 32292), False, 'from six.moves import range, zip\n'), ((33685, 33694), 'six.moves.range', 'range', (['(10)'], {}), '(10)\n', (33690, 33694), False, 'from six.moves import range, zip\n'), ((33747, 33755), 'six.moves.range', 'range', (['(3)'], {}), '(3)\n', (33752, 33755), False, 'from six.moves import range, zip\n'), ((34964, 34972), 'six.moves.range', 'range', (['(3)'], {}), '(3)\n', (34969, 34972), False, 'from six.moves import range, zip\n'), ((35460, 35485), 'aiida.orm.User', 'orm.User', ([], {'email': 'new_email'}), '(email=new_email)\n', (35468, 35485), False, 'from aiida import orm\n'), ((44106, 44116), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (44114, 44116), False, 'from aiida import orm\n'), ((44646, 44667), 'aiida.orm.CalculationNode', 'orm.CalculationNode', ([], {}), '()\n', (44665, 44667), False, 'from aiida import orm\n'), ((15180, 15198), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (15196, 15198), False, 'from aiida import orm\n'), ((15488, 15506), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (15504, 15506), False, 'from aiida import orm\n'), ((16934, 16944), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (16942, 16944), False, 'from aiida import orm\n'), ((18894, 18912), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (18910, 18912), False, 'from aiida import orm\n'), ((19078, 19096), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (19094, 19096), False, 'from aiida import orm\n'), ((26878, 26896), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (26894, 26896), False, 'from aiida import orm\n'), ((27109, 27127), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (27125, 27127), False, 'from aiida import orm\n'), ((27347, 27365), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (27363, 27365), False, 'from aiida import orm\n'), ((29111, 29129), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (29127, 29129), False, 'from aiida import orm\n'), ((44769, 44779), 'aiida.orm.Data', 'orm.Data', ([], {}), '()\n', (44777, 44779), False, 'from aiida import orm\n'), ((5486, 5501), 'aiida.orm.Bool', 'orm.Bool', (['(False)'], {}), '(False)\n', (5494, 5501), False, 'from aiida import orm\n'), ((5580, 5595), 'aiida.orm.Bool', 'orm.Bool', (['(False)'], {}), '(False)\n', (5588, 5595), False, 'from aiida import orm\n'), ((5707, 5744), 'aiida.engine.if_', 'if_', (['cls.should_return_out_of_outline'], {}), '(cls.should_return_out_of_outline)\n', (5710, 5744), False, 'from aiida.engine import run, WorkChain, if_, return_, ExitCode\n'), ((5745, 5769), 'aiida.engine.return_', 'return_', (['cls.EXIT_STATUS'], {}), '(cls.EXIT_STATUS)\n', (5752, 5769), False, 'from aiida.engine import run, WorkChain, if_, return_, ExitCode\n'), ((6717, 6727), 'aiida.engine.ExitCode', 'ExitCode', ([], {}), '()\n', (6725, 6727), False, 'from aiida.engine import run, WorkChain, if_, return_, ExitCode\n'), ((30581, 30599), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (30597, 30599), False, 'from aiida import orm\n'), ((31110, 31128), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (31126, 31128), False, 'from aiida import orm\n'), ((31717, 31735), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (31733, 31735), False, 'from aiida import orm\n'), ((40921, 40939), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (40937, 40939), False, 'from aiida import orm\n'), ((41503, 41521), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (41519, 41521), False, 'from aiida import orm\n'), ((43157, 43175), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (43173, 43175), False, 'from aiida import orm\n'), ((6804, 6830), 'aiida.orm.Int', 'orm.Int', (['self.OUTPUT_VALUE'], {}), '(self.OUTPUT_VALUE)\n', (6811, 6830), False, 'from aiida import orm\n'), ((13084, 13102), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (13100, 13102), False, 'from aiida import orm\n'), ((13793, 13811), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (13809, 13811), False, 'from aiida import orm\n'), ((13903, 13921), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (13919, 13921), False, 'from aiida import orm\n'), ((14014, 14032), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (14030, 14032), False, 'from aiida import orm\n'), ((14127, 14145), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (14143, 14145), False, 'from aiida import orm\n'), ((14241, 14259), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (14257, 14259), False, 'from aiida import orm\n'), ((14354, 14372), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (14370, 14372), False, 'from aiida import orm\n'), ((18682, 18700), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (18698, 18700), False, 'from aiida import orm\n'), ((19482, 19500), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (19498, 19500), False, 'from aiida import orm\n'), ((43888, 43906), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (43904, 43906), False, 'from aiida import orm\n'), ((16983, 17001), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (16999, 17001), False, 'from aiida import orm\n'), ((17059, 17077), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (17075, 17077), False, 'from aiida import orm\n'), ((17148, 17166), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (17164, 17166), False, 'from aiida import orm\n'), ((17245, 17263), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (17261, 17263), False, 'from aiida import orm\n'), ((17337, 17355), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (17353, 17355), False, 'from aiida import orm\n'), ((17414, 17432), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (17430, 17432), False, 'from aiida import orm\n'), ((17504, 17522), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (17520, 17522), False, 'from aiida import orm\n'), ((17602, 17620), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (17618, 17620), False, 'from aiida import orm\n'), ((17700, 17718), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (17716, 17718), False, 'from aiida import orm\n'), ((17786, 17804), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (17802, 17804), False, 'from aiida import orm\n'), ((17885, 17903), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (17901, 17903), False, 'from aiida import orm\n'), ((17992, 18010), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (18008, 18010), False, 'from aiida import orm\n'), ((18094, 18112), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (18110, 18112), False, 'from aiida import orm\n'), ((18181, 18199), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (18197, 18199), False, 'from aiida import orm\n'), ((18281, 18299), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (18297, 18299), False, 'from aiida import orm\n'), ((18389, 18407), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (18405, 18407), False, 'from aiida import orm\n'), ((19270, 19288), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (19286, 19288), False, 'from aiida import orm\n'), ((30013, 30033), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (30022, 30033), False, 'from datetime import timedelta\n'), ((30126, 30146), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (30135, 30146), False, 'from datetime import timedelta\n'), ((34680, 34698), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (34696, 34698), False, 'from aiida import orm\n'), ((38452, 38470), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (38468, 38470), False, 'from aiida import orm\n'), ((38695, 38713), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (38711, 38713), False, 'from aiida import orm\n'), ((39056, 39074), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (39072, 39074), False, 'from aiida import orm\n'), ((39299, 39317), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (39315, 39317), False, 'from aiida import orm\n'), ((39546, 39564), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (39562, 39564), False, 'from aiida import orm\n'), ((39997, 40015), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (40013, 40015), False, 'from aiida import orm\n'), ((40395, 40413), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (40411, 40413), False, 'from aiida import orm\n'), ((42085, 42103), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (42101, 42103), False, 'from aiida import orm\n'), ((42328, 42346), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (42344, 42346), False, 'from aiida import orm\n'), ((42692, 42710), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (42708, 42710), False, 'from aiida import orm\n'), ((42935, 42953), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (42951, 42953), False, 'from aiida import orm\n'), ((44248, 44266), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (44264, 44266), False, 'from aiida import orm\n'), ((20954, 20972), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (20970, 20972), False, 'from aiida import orm\n'), ((35033, 35051), 'aiida.orm.QueryBuilder', 'orm.QueryBuilder', ([], {}), '()\n', (35049, 35051), False, 'from aiida import orm\n')]
|